gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
from sympy.core import Add, C, Derivative, Dummy, Expr, S, sympify, Wild
from sympy.concrete.gosper import gosper_sum
from sympy.functions.elementary.piecewise import piecewise_fold
from sympy.polys import apart, PolynomialError
from sympy.solvers import solve
def _free_symbols(function, limits):
"""Helper function to return the symbols that appear in a sum-like object
once it is evaluated.
"""
isyms = function.free_symbols
for xab in limits:
# take out the target symbol
if xab[0] in isyms:
isyms.remove(xab[0])
# add in the new symbols
for i in xab[1:]:
isyms.update(i.free_symbols)
return isyms
class Sum(Expr):
"""Represents unevaluated summation."""
def __new__(cls, function, *symbols, **assumptions):
from sympy.integrals.integrals import _process_limits
# Any embedded piecewise functions need to be brought out to the
# top level so that integration can go into piecewise mode at the
# earliest possible moment.
function = piecewise_fold(sympify(function))
if function is S.NaN:
return S.NaN
if not symbols:
raise ValueError("Summation variables must be given")
limits, sign = _process_limits(*symbols)
# Only limits with lower and upper bounds are supported; the indefinite Sum
# is not supported
if any(len(l) != 3 or None in l for l in limits):
raise ValueError('Sum requires values for lower and upper bounds.')
obj = Expr.__new__(cls, **assumptions)
arglist = [sign*function]
arglist.extend(limits)
obj._args = tuple(arglist)
return obj
@property
def function(self):
return self._args[0]
@property
def limits(self):
return self._args[1:]
@property
def variables(self):
"""Return a list of the summation variables
>>> from sympy import Sum
>>> from sympy.abc import x, i
>>> Sum(x**i, (i, 1, 3)).variables
[i]
"""
return [l[0] for l in self.limits]
@property
def free_symbols(self):
"""
This method returns the symbols that will exist when the
summation is evaluated. This is useful if one is trying to
determine whether a sum depends on a certain symbol or not.
>>> from sympy import Sum
>>> from sympy.abc import x, y
>>> Sum(x, (x, y, 1)).free_symbols
set([y])
"""
if self.function.is_zero:
return set()
return _free_symbols(self.function, self.limits)
@property
def is_zero(self):
"""A Sum is only zero if its function is zero or if all terms
cancel out. This only answers whether the summand zero."""
return self.function.is_zero
@property
def is_number(self):
"""
Return True if the Sum will result in a number, else False.
sympy considers anything that will result in a number to have
is_number == True.
>>> from sympy import log
>>> log(2).is_number
True
Sums are a special case since they contain symbols that can
be replaced with numbers. Whether the integral can be done or not is
another issue. But answering whether the final result is a number is
not difficult.
>>> from sympy import Sum
>>> from sympy.abc import x, y
>>> Sum(x, (y, 1, x)).is_number
False
>>> Sum(1, (y, 1, x)).is_number
False
>>> Sum(0, (y, 1, x)).is_number
True
>>> Sum(x, (y, 1, 2)).is_number
False
>>> Sum(x, (y, 1, 1)).is_number
False
>>> Sum(x, (x, 1, 2)).is_number
True
>>> Sum(x*y, (x, 1, 2), (y, 1, 3)).is_number
True
"""
return self.function.is_zero or not self.free_symbols
def doit(self, **hints):
#if not hints.get('sums', True):
# return self
f = self.function
for limit in self.limits:
i, a, b = limit
dif = b - a
if dif.is_Integer and dif < 0:
a, b = b, a
f = eval_sum(f, (i, a, b))
if f is None:
return self
if hints.get('deep', True):
return f.doit(**hints)
else:
return f
def _eval_summation(self, f, x):
return
def _eval_derivative(self, x):
"""
Differentiate wrt x as long as x is not in the free symbols of any of
the upper or lower limits.
Sum(a*b*x, (x, 1, a)) can be differentiated wrt x or b but not `a`
since the value of the sum is discontinuous in `a`. In a case
involving a limit variable, the unevaluated derivative is returned.
"""
# diff already confirmed that x is in the free symbols of self, but we
# don't want to differentiate wrt any free symbol in the upper or lower
# limits
# XXX remove this test for free_symbols when the default _eval_derivative is in
if x not in self.free_symbols:
return S.Zero
# get limits and the function
f, limits = self.function, list(self.limits)
limit = limits.pop(-1)
if limits: # f is the argument to a Sum
f = Sum(f, *limits)
if len(limit) == 3:
_, a, b = limit
if x in a.free_symbols or x in b.free_symbols:
return None
df = Derivative(f, x, **{'evaluate': True})
rv = Sum(df, limit)
if limit[0] not in df.free_symbols:
rv = rv.doit()
return rv
else:
return NotImplementedError('Lower and upper bound expected.')
def euler_maclaurin(self, m=0, n=0, eps=0, eval_integral=True):
"""
Return an Euler-Maclaurin approximation of self, where m is the
number of leading terms to sum directly and n is the number of
terms in the tail.
With m = n = 0, this is simply the corresponding integral
plus a first-order endpoint correction.
Returns (s, e) where s is the Euler-Maclaurin approximation
and e is the estimated error (taken to be the magnitude of
the first omitted term in the tail):
>>> from sympy.abc import k, a, b
>>> from sympy import Sum
>>> Sum(1/k, (k, 2, 5)).doit().evalf()
1.28333333333333
>>> s, e = Sum(1/k, (k, 2, 5)).euler_maclaurin()
>>> s
-log(2) + 7/20 + log(5)
>>> from sympy import sstr
>>> print sstr((s.evalf(), e.evalf()), full_prec=True)
(1.26629073187415, 0.0175000000000000)
The endpoints may be symbolic:
>>> s, e = Sum(1/k, (k, a, b)).euler_maclaurin()
>>> s
-log(a) + log(b) + 1/(2*b) + 1/(2*a)
>>> e
Abs(-1/(12*b**2) + 1/(12*a**2))
If the function is a polynomial of degree at most 2n+1, the
Euler-Maclaurin formula becomes exact (and e = 0 is returned):
>>> Sum(k, (k, 2, b)).euler_maclaurin()
(b**2/2 + b/2 - 1, 0)
>>> Sum(k, (k, 2, b)).doit()
b**2/2 + b/2 - 1
With a nonzero eps specified, the summation is ended
as soon as the remainder term is less than the epsilon.
"""
m = int(m)
n = int(n)
f = self.function
assert len(self.limits) == 1
i, a, b = self.limits[0]
s = S.Zero
if m:
for k in range(m):
term = f.subs(i, a+k)
if (eps and term and abs(term.evalf(3)) < eps):
return s, abs(term)
s += term
a += m
x = Dummy('x')
I = C.Integral(f.subs(i, x), (x, a, b))
if eval_integral:
I = I.doit()
s += I
def fpoint(expr):
if b is S.Infinity:
return expr.subs(i, a), 0
return expr.subs(i, a), expr.subs(i, b)
fa, fb = fpoint(f)
iterm = (fa + fb)/2
g = f.diff(i)
for k in xrange(1, n+2):
ga, gb = fpoint(g)
term = C.bernoulli(2*k)/C.factorial(2*k)*(gb-ga)
if (eps and term and abs(term.evalf(3)) < eps) or (k > n):
break
s += term
g = g.diff(i, 2)
return s + iterm, abs(term)
def _eval_subs(self, old, new): # XXX this should be the same as Integral's
if any(old == v for v in self.variables):
return self
def summation(f, *symbols, **kwargs):
r"""
Compute the summation of f with respect to symbols.
The notation for symbols is similar to the notation used in Integral.
summation(f, (i, a, b)) computes the sum of f with respect to i from a to b,
i.e.,
::
b
____
\ `
summation(f, (i, a, b)) = ) f
/___,
i = a
If it cannot compute the sum, it returns an unevaluated Sum object.
Repeated sums can be computed by introducing additional symbols tuples::
>>> from sympy import summation, oo, symbols, log
>>> i, n, m = symbols('i n m', integer=True)
>>> summation(2*i - 1, (i, 1, n))
n**2
>>> summation(1/2**i, (i, 0, oo))
2
>>> summation(1/log(n)**n, (n, 2, oo))
Sum(log(n)**(-n), (n, 2, oo))
>>> summation(i, (i, 0, n), (n, 0, m))
m**3/6 + m**2/2 + m/3
>>> from sympy.abc import x
>>> from sympy import factorial
>>> summation(x**n/factorial(n), (n, 0, oo))
exp(x)
"""
return Sum(f, *symbols, **kwargs).doit(deep=False)
def telescopic_direct(L, R, n, limits):
"""Returns the direct summation of the terms of a telescopic sum
L is the term with lower index
R is the term with higher index
n difference between the indexes of L and R
For example:
>>> from sympy.concrete.summations import telescopic_direct
>>> from sympy.abc import k, a, b
>>> telescopic_direct(1/k, -1/(k+2), 2, (k, a, b))
-1/(b + 2) - 1/(b + 1) + 1/(a + 1) + 1/a
"""
(i, a, b) = limits
s = 0
for m in xrange(n):
s += L.subs(i,a+m) + R.subs(i,b-m)
return s
def telescopic(L, R, limits):
'''Tries to perform the summation using the telescopic property
return None if not possible
'''
(i, a, b) = limits
if L.is_Add or R.is_Add:
return None
# We want to solve(L.subs(i, i + m) + R, m)
# First we try a simple match since this does things that
# solve doesn't do, e.g. solve(f(k+m)-f(k), m) fails
k = Wild("k")
sol = (-R).match(L.subs(i, i + k))
s = None
if sol and k in sol:
s = sol[k]
if not (s.is_Integer and L.subs(i,i + s) == -R):
#sometimes match fail(f(x+2).match(-f(x+k))->{k: -2 - 2x}))
s = None
# But there are things that match doesn't do that solve
# can do, e.g. determine that 1/(x + m) = 1/(1 - x) when m = 1
if s is None:
m = Dummy('m')
try:
sol = solve(L.subs(i, i + m) + R, m) or []
except NotImplementedError:
return None
sol = [si for si in sol if si.is_Integer and
(L.subs(i,i + si) + R).expand().is_zero]
if len(sol) != 1:
return None
s = sol[0]
if s < 0:
return telescopic_direct(R, L, abs(s), (i, a, b))
elif s > 0:
return telescopic_direct(L, R, s, (i, a, b))
def eval_sum(f, limits):
(i, a, b) = limits
if f is S.Zero:
return S.Zero
if i not in f.free_symbols:
return f*(b - a + 1)
if a == b:
return f.subs(i, a)
dif = b - a
definite = dif.is_Integer
# Doing it directly may be faster if there are very few terms.
if definite and (dif < 100):
return eval_sum_direct(f, (i, a, b))
# Try to do it symbolically. Even when the number of terms is known,
# this can save time when b-a is big.
# We should try to transform to partial fractions
value = eval_sum_symbolic(f.expand(), (i, a, b))
if value is not None:
return value
# Do it directly
if definite:
return eval_sum_direct(f, (i, a, b))
def eval_sum_direct(expr, limits):
(i, a, b) = limits
dif = b - a
return Add(*[expr.subs(i, a + j) for j in xrange(dif + 1)])
def eval_sum_symbolic(f, limits):
(i, a, b) = limits
if not f.has(i):
return f*(b-a+1)
# Linearity
if f.is_Mul:
L, R = f.as_two_terms()
if not L.has(i):
sR = eval_sum_symbolic(R, (i, a, b))
if sR: return L*sR
if not R.has(i):
sL = eval_sum_symbolic(L, (i, a, b))
if sL: return R*sL
try:
f = apart(f, i) # see if it becomes an Add
except PolynomialError:
pass
if f.is_Add:
L, R = f.as_two_terms()
lrsum = telescopic(L, R, (i, a, b))
if lrsum:
return lrsum
lsum = eval_sum_symbolic(L, (i, a, b))
rsum = eval_sum_symbolic(R, (i, a, b))
if None not in (lsum, rsum):
return lsum + rsum
# Polynomial terms with Faulhaber's formula
n = Wild('n')
result = f.match(i**n)
if result is not None:
n = result[n]
if n.is_Integer:
if n >= 0:
return ((C.bernoulli(n+1, b+1) - C.bernoulli(n+1, a))/(n+1)).expand()
elif a.is_Integer and a >= 1:
if n == -1:
return C.harmonic(b) - C.harmonic(a - 1)
else:
return C.harmonic(b, abs(n)) - C.harmonic(a - 1, abs(n))
# Geometric terms
c1 = C.Wild('c1', exclude=[i])
c2 = C.Wild('c2', exclude=[i])
c3 = C.Wild('c3', exclude=[i])
e = f.match(c1**(c2*i+c3))
if e is not None:
c1 = c1.subs(e)
c2 = c2.subs(e)
c3 = c3.subs(e)
# TODO: more general limit handling
return c1**c3 * (c1**(a*c2) - c1**(c2+b*c2)) / (1 - c1**c2)
if not (a.has(S.Infinity, S.NegativeInfinity) or \
b.has(S.Infinity, S.NegativeInfinity)):
r = gosper_sum(f, (i, a, b))
if not r in (None, S.NaN):
return r
return eval_sum_hyper(f, (i, a, b))
def _eval_sum_hyper(f, i, a):
""" Returns (res, cond). Sums from a to oo. """
from sympy.functions import hyper
from sympy.simplify import hyperexpand, hypersimp, fraction
from sympy.polys.polytools import Poly, factor
if a != 0:
return _eval_sum_hyper(f.subs(i, i + a), i, 0)
if f.subs(i, 0) == 0:
return _eval_sum_hyper(f.subs(i, i + 1), i, 0)
hs = hypersimp(f, i)
if hs is None:
return None
numer, denom = fraction(factor(hs))
top, topl = numer.as_coeff_mul(i)
bot, botl = denom.as_coeff_mul(i)
ab = [top, bot]
factors = [topl, botl]
params = [[], []]
for k in range(2):
for fac in factors[k]:
mul = 1
if fac.is_Pow:
mul = fac.exp
fac = fac.base
if not mul.is_Integer:
return None
p = Poly(fac, i)
if p.degree() != 1:
return None
m, n = p.all_coeffs()
ab[k] *= m**mul
params[k] += [n/m]*mul
# Add "1" to numerator parameters, to account for implicit n! in
# hypergeometric series.
ap = params[0] + [1]
bq = params[1]
x = ab[0]/ab[1]
h = hyper(ap, bq, x)
return f.subs(i, 0)*hyperexpand(h), h.convergence_statement
def eval_sum_hyper(f, (i, a, b)):
from sympy.functions import Piecewise
from sympy import oo, And
if b != oo:
if a == -oo:
res = _eval_sum_hyper(f.subs(i, -i), i, -b)
if res is not None:
return Piecewise(res, (Sum(f, (i, a, b)), True))
else:
return None
if a == -oo:
res1 = _eval_sum_hyper(f.subs(i, -i), i, 1)
res2 = _eval_sum_hyper(f, i, 0)
if res1 is None or res2 is None:
return None
res1, cond1 = res1
res2, cond2 = res2
cond = And(cond1, cond2)
if cond is False:
return None
return Piecewise((res1 + res2, cond), (Sum(f, (i, a, b)), True))
# Now b == oo, a != -oo
res = _eval_sum_hyper(f, i, a)
if res is not None:
return Piecewise(res, (Sum(f, (i, a, b)), True))
|
|
from StringIO import StringIO
from datetime import datetime, timedelta
from mock import MagicMock
from casexml.apps.case.models import CommCareCase
from casexml.apps.case.mock import CaseFactory
from casexml.apps.case.tests.util import check_xml_line_by_line
from casexml.apps.case.xml import V1
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import Client
from corehq.apps.app_manager.tests.util import TestXmlMixin
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.receiverwrapper.exceptions import DuplicateFormatException, IgnoreDocument
from corehq.apps.receiverwrapper.models import (
CaseRepeater,
FormRepeater,
RepeatRecord,
RegisterGenerator)
from corehq.apps.receiverwrapper.repeater_generators import BasePayloadGenerator
from couchforms.models import XFormInstance
case_id = "ABC123CASEID"
instance_id = "XKVB636DFYL38FNX3D38WV5EH"
update_instance_id = "ZYXKVB636DFYL38FNX3D38WV5"
case_block = """
<case>
<case_id>%s</case_id>
<date_modified>2011-12-19T00:00:00.000000Z</date_modified>
<create>
<case_type_id>repeater_case</case_type_id>
<user_id>O2XLT0WZW97W1A91E2W1Y0NJG</user_id>
<case_name>ABC 123</case_name>
<external_id>ABC 123</external_id>
</create>
</case>
""" % case_id
update_block = """
<case>
<case_id>%s</case_id>
<date_modified>2011-12-19T00:00:00.000000Z</date_modified>
<update>
<case_name>ABC 234</case_name>
</update>
</case>
""" % case_id
xform_xml_template = """<?xml version='1.0' ?>
<data xmlns:jrm="http://dev.commcarehq.org/jr/xforms" xmlns="https://www.commcarehq.org/test/repeater/">
<woman_name>Alpha</woman_name>
<husband_name>Beta</husband_name>
<meta>
<deviceID>O2XLT0WZW97W1A91E2W1Y0NJG</deviceID>
<timeStart>2011-10-01T15:25:18.404-04</timeStart>
<timeEnd>2011-10-01T15:26:29.551-04</timeEnd>
<username>admin</username>
<userID>O2XLT0WZW97W1A91E2W1Y0NJG</userID>
<instanceID>%s</instanceID>
</meta>
%s
</data>
"""
xform_xml = xform_xml_template % (instance_id, case_block)
update_xform_xml = xform_xml_template % (update_instance_id, update_block)
class BaseRepeaterTest(TestCase):
client = Client()
@classmethod
def post_xml(cls, xml, domain_name):
f = StringIO(xml)
f.name = 'form.xml'
cls.client.post(
reverse('receiver_post', args=[domain_name]), {
'xml_submission_file': f
}
)
@classmethod
def repeat_records(cls, domain_name):
return RepeatRecord.all(domain=domain_name, due_before=datetime.utcnow())
class RepeaterTest(BaseRepeaterTest):
def setUp(self):
self.domain = "test-domain"
create_domain(self.domain)
self.case_repeater = CaseRepeater(
domain=self.domain,
url='case-repeater-url',
version=V1,
)
self.case_repeater.save()
self.form_repeater = FormRepeater(
domain=self.domain,
url='form-repeater-url',
)
self.form_repeater.save()
self.log = []
self.post_xml(xform_xml, self.domain)
def clear_log(self):
for i in range(len(self.log)):
self.log.pop()
def make_post_fn(self, status_codes):
status_codes = iter(status_codes)
def post_fn(data, url, headers=None):
status_code = status_codes.next()
self.log.append((url, status_code, data, headers))
class resp:
status = status_code
return resp
return post_fn
def tearDown(self):
self.case_repeater.delete()
self.form_repeater.delete()
XFormInstance.get(instance_id).delete()
repeat_records = RepeatRecord.all()
for repeat_record in repeat_records:
repeat_record.delete()
def test_repeater(self):
# this test should probably be divided into more units
CommCareCase.get(case_id)
def now():
return datetime.utcnow()
repeat_records = RepeatRecord.all(domain=self.domain, due_before=now())
self.assertEqual(len(repeat_records), 2)
self.clear_log()
records_by_repeater_id = {}
for repeat_record in repeat_records:
repeat_record.fire(post_fn=self.make_post_fn([404, 404, 404]))
repeat_record.save()
records_by_repeater_id[repeat_record.repeater_id] = repeat_record
for (url, status, data, headers) in self.log:
self.assertEqual(status, 404)
self.clear_log()
next_check_time = now() + timedelta(minutes=60)
repeat_records = RepeatRecord.all(
domain=self.domain,
due_before=now() + timedelta(minutes=15),
)
self.assertEqual(len(repeat_records), 0)
repeat_records = RepeatRecord.all(
domain=self.domain,
due_before=next_check_time + timedelta(seconds=2),
)
self.assertEqual(len(repeat_records), 2)
for repeat_record in repeat_records:
self.assertLess(abs(next_check_time - repeat_record.next_check),
timedelta(seconds=2))
repeat_record.fire(post_fn=self.make_post_fn([404, 200]))
repeat_record.save()
self.assertEqual(len(self.log), 4)
# The following is pretty fickle and depends on which of
# - corehq.apps.receiverwrapper.signals
# - casexml.apps.case.signals
# gets loaded first.
# This is deterministic but easily affected by minor code changes
# check case stuff
rec = records_by_repeater_id[self.case_repeater.get_id]
self.assertEqual(self.log[1][:2], (self.case_repeater.get_url(rec), 200))
self.assertIn('server-modified-on', self.log[1][3])
check_xml_line_by_line(self, self.log[1][2], case_block)
# check form stuff
rec = records_by_repeater_id[self.form_repeater.get_id]
self.assertEqual(self.log[3][:3],
(self.form_repeater.get_url(rec), 200, xform_xml))
self.assertIn('received-on', self.log[3][3])
repeat_records = RepeatRecord.all(
domain=self.domain,
due_before=next_check_time,
)
for repeat_record in repeat_records:
self.assertEqual(repeat_record.succeeded, True)
self.assertEqual(repeat_record.next_check, None)
self.assertEqual(len(self.repeat_records(self.domain)), 0)
self.post_xml(update_xform_xml, self.domain)
self.assertEqual(len(self.repeat_records(self.domain)), 2)
class CaseRepeaterTest(BaseRepeaterTest, TestXmlMixin):
@classmethod
def setUpClass(cls):
cls.domain_name = "test-domain"
cls.domain = create_domain(cls.domain_name)
cls.repeater = CaseRepeater(
domain=cls.domain_name,
url="case-repeater-url",
)
cls.repeater.save()
@classmethod
def tearDownClass(cls):
cls.domain.delete()
cls.repeater.delete()
for repeat_record in cls.repeat_records(cls.domain_name):
repeat_record.delete()
def test_case_close_format(self):
# create a case
self.post_xml(xform_xml, self.domain_name)
payload = self.repeat_records(self.domain_name).all()[0].get_payload()
self.assertXmlHasXpath(payload, '//*[local-name()="case"]')
self.assertXmlHasXpath(payload, '//*[local-name()="create"]')
# close the case
CaseFactory().close_case(case_id)
close_payload = self.repeat_records(self.domain_name).all()[1].get_payload()
self.assertXmlHasXpath(close_payload, '//*[local-name()="case"]')
self.assertXmlHasXpath(close_payload, '//*[local-name()="close"]')
self.assertXmlHasXpath(close_payload, '//*[local-name()="update"]')
class IgnoreDocumentTest(BaseRepeaterTest):
def setUp(self):
self.domain = "test-domain"
create_domain(self.domain)
self.repeater = FormRepeater(
domain=self.domain,
url='form-repeater-url',
version=V1,
format='new_format'
)
self.repeater.save()
def tearDown(self):
self.repeater.delete()
repeat_records = RepeatRecord.all()
for repeat_record in repeat_records:
repeat_record.delete()
def test_ignore_document(self):
"""
When get_payload raises IgnoreDocument, fire should call update_success
"""
@RegisterGenerator(FormRepeater, 'new_format', 'XML')
class NewFormGenerator(BasePayloadGenerator):
def get_payload(self, repeat_record, payload_doc):
raise IgnoreDocument
repeat_records = RepeatRecord.all(
domain=self.domain,
)
for repeat_record_ in repeat_records:
repeat_record_.fire()
self.assertIsNone(repeat_record_.next_check)
self.assertTrue(repeat_record_.succeeded)
class TestRepeaterFormat(BaseRepeaterTest):
def setUp(self):
self.domain = "test-domain"
create_domain(self.domain)
self.post_xml(xform_xml, self.domain)
self.repeater = CaseRepeater(
domain=self.domain,
url='case-repeater-url',
version=V1,
format='new_format'
)
self.repeater.save()
def tearDown(self):
self.repeater.delete()
XFormInstance.get(instance_id).delete()
repeat_records = RepeatRecord.all()
for repeat_record in repeat_records:
repeat_record.delete()
def test_new_format_same_name(self):
with self.assertRaises(DuplicateFormatException):
@RegisterGenerator(CaseRepeater, 'case_xml', 'XML', is_default=False)
class NewCaseGenerator(BasePayloadGenerator):
def get_payload(self, repeat_record, payload_doc):
return "some random case"
def test_new_format_second_default(self):
with self.assertRaises(DuplicateFormatException):
@RegisterGenerator(CaseRepeater, 'rubbish', 'XML', is_default=True)
class NewCaseGenerator(BasePayloadGenerator):
def get_payload(self, repeat_record, payload_doc):
return "some random case"
def test_new_format_payload(self):
payload = "some random case"
@RegisterGenerator(CaseRepeater, 'new_format', 'XML')
class NewCaseGenerator(BasePayloadGenerator):
def get_payload(self, repeat_record, payload_doc):
return payload
repeat_record = self.repeater.register(case_id)
post_fn = MagicMock()
repeat_record.fire(post_fn=post_fn)
headers = self.repeater.get_headers(repeat_record)
post_fn.assert_called_with(payload, self.repeater.url, headers=headers)
class RepeaterLockTest(TestCase):
def testLocks(self):
r = RepeatRecord(domain='test')
r.save()
r2 = RepeatRecord.get(r._id)
self.assertTrue(r.acquire_lock(datetime.utcnow()))
r3 = RepeatRecord.get(r._id)
self.assertFalse(r2.acquire_lock(datetime.utcnow()))
self.assertFalse(r3.acquire_lock(datetime.utcnow()))
r.release_lock()
r4 = RepeatRecord.get(r._id)
self.assertTrue(r4.acquire_lock(datetime.utcnow()))
|
|
# Copyright (c) 2013 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import json
import ddt
import falcon
import six
from . import base # noqa
from marconi import tests as testing
@ddt.ddt
class QueueLifecycleBaseTest(base.TestBase):
config_file = None
# NOTE(flaper87): This is temporary. Ideally, each version
# of the API should have its own lifecycle tests. The v1.1
# of our API removes the support for the API. Although most of
# the test methods were overwritten in the test definition, there
# are some that need to be disabled in-lieu for other tests to be
# excuted. Also, ddt plays dirty and makes it impossible to override
# a test case.
metadata_support = True
def setUp(self):
super(QueueLifecycleBaseTest, self).setUp()
self.queue_path = self.url_prefix + '/queues'
self.gumshoe_queue_path = self.queue_path + '/gumshoe'
self.fizbat_queue_path = self.queue_path + '/fizbat'
self.fizbat_queue_path_metadata = self.fizbat_queue_path + '/metadata'
def test_empty_project_id(self):
self.simulate_get(self.gumshoe_queue_path, '')
self.assertEqual(self.srmock.status, falcon.HTTP_400)
self.simulate_put(self.gumshoe_queue_path, '')
self.assertEqual(self.srmock.status, falcon.HTTP_400)
self.simulate_head(self.gumshoe_queue_path, '')
self.assertEqual(self.srmock.status, falcon.HTTP_400)
self.simulate_delete(self.gumshoe_queue_path, '')
self.assertEqual(self.srmock.status, falcon.HTTP_400)
@ddt.data('480924', 'foo', None)
def test_basics_thoroughly(self, project_id):
gumshoe_queue_path_metadata = self.gumshoe_queue_path + '/metadata'
gumshoe_queue_path_stats = self.gumshoe_queue_path + '/stats'
# Stats not found - queue not created yet
self.simulate_get(gumshoe_queue_path_stats, project_id)
self.assertEqual(self.srmock.status, falcon.HTTP_404)
# Metadata not found - queue not created yet
self.simulate_get(gumshoe_queue_path_metadata, project_id)
self.assertEqual(self.srmock.status, falcon.HTTP_404)
# Create
self.simulate_put(self.gumshoe_queue_path, project_id)
self.assertEqual(self.srmock.status, falcon.HTTP_201)
location = self.srmock.headers_dict['Location']
self.assertEqual(location, self.gumshoe_queue_path)
# Ensure queue existence
self.simulate_head(self.gumshoe_queue_path, project_id)
self.assertEqual(self.srmock.status, falcon.HTTP_204)
# Add metadata
if self.metadata_support:
doc = '{"messages": {"ttl": 600}}'
self.simulate_put(gumshoe_queue_path_metadata,
project_id, body=doc)
self.assertEqual(self.srmock.status, falcon.HTTP_204)
# Fetch metadata
result = self.simulate_get(gumshoe_queue_path_metadata,
project_id)
result_doc = json.loads(result[0])
self.assertEqual(self.srmock.status, falcon.HTTP_200)
self.assertEqual(result_doc, json.loads(doc))
# Stats empty queue
self.simulate_get(gumshoe_queue_path_stats, project_id)
self.assertEqual(self.srmock.status, falcon.HTTP_200)
# Delete
self.simulate_delete(self.gumshoe_queue_path, project_id)
self.assertEqual(self.srmock.status, falcon.HTTP_204)
# Get non-existent queue
self.simulate_get(self.gumshoe_queue_path, project_id)
self.assertEqual(self.srmock.status, falcon.HTTP_404)
# Get non-existent stats
self.simulate_get(gumshoe_queue_path_stats, project_id)
self.assertEqual(self.srmock.status, falcon.HTTP_404)
# Get non-existent metadata
if self.metadata_support:
self.simulate_get(gumshoe_queue_path_metadata, project_id)
self.assertEqual(self.srmock.status, falcon.HTTP_404)
def test_name_restrictions(self):
self.simulate_put(self.queue_path + '/Nice-Boat_2')
self.assertEqual(self.srmock.status, falcon.HTTP_201)
self.simulate_put(self.queue_path + '/Nice-Bo@t')
self.assertEqual(self.srmock.status, falcon.HTTP_400)
self.simulate_put(self.queue_path + '/_' + 'niceboat' * 8)
self.assertEqual(self.srmock.status, falcon.HTTP_400)
def test_project_id_restriction(self):
muvluv_queue_path = self.queue_path + '/Muv-Luv'
self.simulate_put(muvluv_queue_path,
headers={'X-Project-ID': 'JAM Project' * 24})
self.assertEqual(self.srmock.status, falcon.HTTP_400)
# no charset restrictions
self.simulate_put(muvluv_queue_path,
headers={'X-Project-ID': 'JAM Project'})
self.assertEqual(self.srmock.status, falcon.HTTP_201)
def test_non_ascii_name(self):
test_params = ((u'/queues/non-ascii-n\u0153me', 'utf-8'),
(u'/queues/non-ascii-n\xc4me', 'iso8859-1'))
for uri, enc in test_params:
uri = self.url_prefix + uri
if six.PY2:
uri = uri.encode(enc)
self.simulate_put(uri)
self.assertEqual(self.srmock.status, falcon.HTTP_400)
self.simulate_get(uri)
self.assertEqual(self.srmock.status, falcon.HTTP_400)
self.simulate_delete(uri)
self.assertEqual(self.srmock.status, falcon.HTTP_400)
def test_no_metadata(self):
self.simulate_put(self.fizbat_queue_path)
self.assertEqual(self.srmock.status, falcon.HTTP_201)
self.simulate_put(self.fizbat_queue_path_metadata)
self.assertEqual(self.srmock.status, falcon.HTTP_400)
self.simulate_put(self.fizbat_queue_path_metadata, body='')
self.assertEqual(self.srmock.status, falcon.HTTP_400)
@ddt.data('{', '[]', '.', ' ', '')
def test_bad_metadata(self, document):
if not self.metadata_support:
return
self.simulate_put(self.fizbat_queue_path, '7e55e1a7e')
self.assertEqual(self.srmock.status, falcon.HTTP_201)
self.simulate_put(self.fizbat_queue_path_metadata, '7e55e1a7e',
body=document)
self.assertEqual(self.srmock.status, falcon.HTTP_400)
def test_too_much_metadata(self):
self.simulate_put(self.fizbat_queue_path, '7e55e1a7e')
self.assertEqual(self.srmock.status, falcon.HTTP_201)
doc = '{{"messages": {{"ttl": 600}}, "padding": "{pad}"}}'
max_size = self.transport_cfg.max_queue_metadata
padding_len = max_size - (len(doc) - 10) + 1
doc = doc.format(pad='x' * padding_len)
self.simulate_put(self.fizbat_queue_path_metadata, '7e55e1a7e',
body=doc)
self.assertEqual(self.srmock.status, falcon.HTTP_400)
def test_way_too_much_metadata(self):
self.simulate_put(self.fizbat_queue_path, '7e55e1a7e')
self.assertEqual(self.srmock.status, falcon.HTTP_201)
doc = '{{"messages": {{"ttl": 600}}, "padding": "{pad}"}}'
max_size = self.transport_cfg.max_queue_metadata
padding_len = max_size * 100
doc = doc.format(pad='x' * padding_len)
self.simulate_put(self.fizbat_queue_path_metadata,
'7e55e1a7e', body=doc)
self.assertEqual(self.srmock.status, falcon.HTTP_400)
def test_custom_metadata(self):
self.simulate_put(self.fizbat_queue_path, '480924')
self.assertEqual(self.srmock.status, falcon.HTTP_201)
# Set
doc = '{{"messages": {{"ttl": 600}}, "padding": "{pad}"}}'
max_size = self.transport_cfg.max_queue_metadata
padding_len = max_size - (len(doc) - 2)
doc = doc.format(pad='x' * padding_len)
self.simulate_put(self.fizbat_queue_path_metadata, '480924', body=doc)
self.assertEqual(self.srmock.status, falcon.HTTP_204)
# Get
result = self.simulate_get(self.fizbat_queue_path_metadata, '480924')
result_doc = json.loads(result[0])
self.assertEqual(result_doc, json.loads(doc))
self.assertEqual(self.srmock.status, falcon.HTTP_200)
def test_update_metadata(self):
xyz_queue_path = self.url_prefix + '/queues/xyz'
xyz_queue_path_metadata = xyz_queue_path + '/metadata'
# Create
project_id = '480924'
self.simulate_put(xyz_queue_path, project_id)
self.assertEqual(self.srmock.status, falcon.HTTP_201)
# Set meta
doc1 = '{"messages": {"ttl": 600}}'
self.simulate_put(xyz_queue_path_metadata, project_id, body=doc1)
self.assertEqual(self.srmock.status, falcon.HTTP_204)
# Update
doc2 = '{"messages": {"ttl": 100}}'
self.simulate_put(xyz_queue_path_metadata, project_id, body=doc2)
self.assertEqual(self.srmock.status, falcon.HTTP_204)
# Get
result = self.simulate_get(xyz_queue_path_metadata, project_id)
result_doc = json.loads(result[0])
self.assertEqual(result_doc, json.loads(doc2))
self.assertEqual(self.srmock.headers_dict['Content-Location'],
xyz_queue_path_metadata)
def test_list(self):
arbitrary_number = 644079696574693
project_id = str(arbitrary_number)
# NOTE(kgriffs): It's important that this one sort after the one
# above. This is in order to prove that bug/1236605 is fixed, and
# stays fixed!
alt_project_id = str(arbitrary_number + 1)
# List empty
self.simulate_get(self.queue_path, project_id)
self.assertEqual(self.srmock.status, falcon.HTTP_204)
# Payload exceeded
self.simulate_get(self.queue_path, project_id, query_string='limit=21')
self.assertEqual(self.srmock.status, falcon.HTTP_400)
# Create some
def create_queue(name, project_id, body):
uri = self.queue_path + '/' + name
self.simulate_put(uri, project_id)
if self.metadata_support:
self.simulate_put(uri + '/metadata', project_id, body=body)
create_queue('g1', None, '{"answer": 42}')
create_queue('g2', None, '{"answer": 42}')
create_queue('q1', project_id, '{"node": 31}')
create_queue('q2', project_id, '{"node": 32}')
create_queue('q3', project_id, '{"node": 33}')
create_queue('q3', alt_project_id, '{"alt": 1}')
# List (global queues)
result = self.simulate_get(self.queue_path, None,
query_string='limit=2&detailed=true')
result_doc = json.loads(result[0])
queues = result_doc['queues']
self.assertEqual(len(queues), 2)
if self.metadata_support:
for queue in queues:
self.assertEqual(queue['metadata'], {'answer': 42})
# List (limit)
result = self.simulate_get(self.queue_path, project_id,
query_string='limit=2')
result_doc = json.loads(result[0])
self.assertEqual(len(result_doc['queues']), 2)
# List (no metadata, get all)
result = self.simulate_get(self.queue_path,
project_id, query_string='limit=5')
result_doc = json.loads(result[0])
[target, params] = result_doc['links'][0]['href'].split('?')
self.assertEqual(self.srmock.status, falcon.HTTP_200)
self.assertEqual(self.srmock.headers_dict['Content-Location'],
self.queue_path + '?limit=5')
# Ensure we didn't pick up the queue from the alt project.
queues = result_doc['queues']
self.assertEqual(len(queues), 3)
for queue in queues:
self.simulate_get(queue['href'] + '/metadata', project_id)
self.assertEqual(self.srmock.status, falcon.HTTP_200)
self.simulate_get(queue['href'] + '/metadata', 'imnothere')
self.assertEqual(self.srmock.status, falcon.HTTP_404)
self.assertNotIn('metadata', queue)
# List with metadata
result = self.simulate_get(self.queue_path, project_id,
query_string='detailed=true')
self.assertEqual(self.srmock.status, falcon.HTTP_200)
result_doc = json.loads(result[0])
[target, params] = result_doc['links'][0]['href'].split('?')
if self.metadata_support:
queue = result_doc['queues'][0]
result = self.simulate_get(queue['href'] + '/metadata', project_id)
result_doc = json.loads(result[0])
self.assertEqual(result_doc, queue['metadata'])
self.assertEqual(result_doc, {'node': 31})
# List tail
self.simulate_get(target, project_id, query_string=params)
self.assertEqual(self.srmock.status, falcon.HTTP_204)
# List manually-constructed tail
self.simulate_get(target, project_id, query_string='marker=zzz')
self.assertEqual(self.srmock.status, falcon.HTTP_204)
class TestQueueLifecycleMongoDB(QueueLifecycleBaseTest):
config_file = 'wsgi_mongodb.conf'
@testing.requires_mongodb
def setUp(self):
super(TestQueueLifecycleMongoDB, self).setUp()
def tearDown(self):
storage = self.boot.storage._storage
connection = storage.connection
connection.drop_database(storage.queues_database)
for db in storage.message_databases:
connection.drop_database(db)
super(TestQueueLifecycleMongoDB, self).tearDown()
class TestQueueLifecycleSqlalchemy(QueueLifecycleBaseTest):
config_file = 'wsgi_sqlalchemy.conf'
class TestQueueLifecycleFaultyDriver(base.TestBaseFaulty):
config_file = 'wsgi_faulty.conf'
def test_simple(self):
gumshoe_queue_path = self.url_prefix + '/queues/gumshoe'
doc = '{"messages": {"ttl": 600}}'
self.simulate_put(gumshoe_queue_path, '480924', body=doc)
self.assertEqual(self.srmock.status, falcon.HTTP_503)
location = ('Location', gumshoe_queue_path)
self.assertNotIn(location, self.srmock.headers)
result = self.simulate_get(gumshoe_queue_path + '/metadata', '480924')
result_doc = json.loads(result[0])
self.assertEqual(self.srmock.status, falcon.HTTP_503)
self.assertNotEqual(result_doc, json.loads(doc))
self.simulate_get(gumshoe_queue_path + '/stats', '480924')
self.assertEqual(self.srmock.status, falcon.HTTP_503)
self.simulate_get(self.url_prefix + '/queues', '480924')
self.assertEqual(self.srmock.status, falcon.HTTP_503)
self.simulate_delete(gumshoe_queue_path, '480924')
self.assertEqual(self.srmock.status, falcon.HTTP_503)
|
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo.config import cfg
import webob
from cinder.api import extensions
from cinder.api.v1 import snapshot_metadata
from cinder.api.v1 import snapshots
import cinder.db
from cinder import exception
from cinder.openstack.common import jsonutils
from cinder import test
from cinder.tests.api import fakes
CONF = cfg.CONF
def return_create_snapshot_metadata_max(context,
snapshot_id,
metadata,
delete):
return stub_max_snapshot_metadata()
def return_create_snapshot_metadata(context, snapshot_id, metadata, delete):
return stub_snapshot_metadata()
def return_create_snapshot_metadata_insensitive(context, snapshot_id,
metadata, delete):
return stub_snapshot_metadata_insensitive()
def return_new_snapshot_metadata(context, snapshot_id, metadata, delete):
return stub_new_snapshot_metadata()
def return_snapshot_metadata(context, snapshot_id):
if not isinstance(snapshot_id, str) or not len(snapshot_id) == 36:
msg = 'id %s must be a uuid in return snapshot metadata' % snapshot_id
raise Exception(msg)
return stub_snapshot_metadata()
def return_empty_snapshot_metadata(context, snapshot_id):
return {}
def return_empty_container_metadata(context, snapshot_id, metadata, delete):
return {}
def delete_snapshot_metadata(context, snapshot_id, key):
pass
def stub_snapshot_metadata():
metadata = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
}
return metadata
def stub_snapshot_metadata_insensitive():
metadata = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
"KEY4": "value4",
}
return metadata
def stub_new_snapshot_metadata():
metadata = {
'key10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
}
return metadata
def stub_max_snapshot_metadata():
metadata = {"metadata": {}}
for num in range(CONF.quota_metadata_items):
metadata['metadata']['key%i' % num] = "blah"
return metadata
def return_snapshot(context, snapshot_id):
return {'id': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
'name': 'fake',
'status': 'available',
'metadata': {}}
def return_volume(context, volume_id):
return {'id': 'fake-vol-id',
'size': 100,
'name': 'fake',
'host': 'fake-host',
'status': 'available',
'encryption_key_id': None,
'volume_type_id': None,
'migration_status': None,
'metadata': {}}
def return_snapshot_nonexistent(context, snapshot_id):
raise exception.SnapshotNotFound('bogus test message')
def fake_update_snapshot_metadata(self, context, snapshot, diff):
pass
class SnapshotMetaDataTest(test.TestCase):
def setUp(self):
super(SnapshotMetaDataTest, self).setUp()
self.volume_api = cinder.volume.api.API()
fakes.stub_out_key_pair_funcs(self.stubs)
self.stubs.Set(cinder.db, 'volume_get', return_volume)
self.stubs.Set(cinder.db, 'snapshot_get', return_snapshot)
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_snapshot_metadata)
self.stubs.Set(self.volume_api, 'update_snapshot_metadata',
fake_update_snapshot_metadata)
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.snapshot_controller = snapshots.SnapshotsController(self.ext_mgr)
self.controller = snapshot_metadata.Controller()
self.req_id = str(uuid.uuid4())
self.url = '/v1/fake/snapshots/%s/metadata' % self.req_id
snap = {"volume_size": 100,
"volume_id": "fake-vol-id",
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1",
"host": "fake-host",
"metadata": {}}
body = {"snapshot": snap}
req = fakes.HTTPRequest.blank('/v1/snapshots')
self.snapshot_controller.create(req, body)
def test_index(self):
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req, self.req_id)
expected = {
'metadata': {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
},
}
self.assertEqual(expected, res_dict)
def test_index_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_snapshot_nonexistent)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.index, req, self.url)
def test_index_no_data(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_empty_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req, self.req_id)
expected = {'metadata': {}}
self.assertEqual(expected, res_dict)
def test_show(self):
req = fakes.HTTPRequest.blank(self.url + '/key2')
res_dict = self.controller.show(req, self.req_id, 'key2')
expected = {'meta': {'key2': 'value2'}}
self.assertEqual(expected, res_dict)
def test_show_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_snapshot_nonexistent)
req = fakes.HTTPRequest.blank(self.url + '/key2')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, self.req_id, 'key2')
def test_show_meta_not_found(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_empty_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key6')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, self.req_id, 'key6')
def test_delete(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_snapshot_metadata)
self.stubs.Set(cinder.db, 'snapshot_metadata_delete',
delete_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key2')
req.method = 'DELETE'
res = self.controller.delete(req, self.req_id, 'key2')
self.assertEqual(200, res.status_int)
def test_delete_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_get',
return_snapshot_nonexistent)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, self.req_id, 'key1')
def test_delete_meta_not_found(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_empty_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key6')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, self.req_id, 'key6')
def test_create(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_empty_snapshot_metadata)
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank('/v1/snapshot_metadata')
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key1": "value1",
"key2": "value2",
"key3": "value3"}}
req.body = jsonutils.dumps(body)
res_dict = self.controller.create(req, self.req_id, body)
self.assertEqual(body, res_dict)
def test_create_with_keys_in_uppercase_and_lowercase(self):
# if the keys in uppercase_and_lowercase, should return the one
# which server added
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_empty_snapshot_metadata)
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata_insensitive)
req = fakes.HTTPRequest.blank('/v1/snapshot_metadata')
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key1": "value1",
"KEY1": "value1",
"key2": "value2",
"KEY2": "value2",
"key3": "value3",
"KEY4": "value4"}}
expected = {"metadata": {"key1": "value1",
"key2": "value2",
"key3": "value3",
"KEY4": "value4"}}
req.body = jsonutils.dumps(body)
res_dict = self.controller.create(req, self.req_id, body)
self.assertEqual(expected, res_dict)
def test_create_empty_body(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'POST'
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, self.req_id, None)
def test_create_item_empty_key(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, self.req_id, body)
def test_create_item_key_too_long(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {("a" * 260): "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req, self.req_id, body)
def test_create_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_get',
return_snapshot_nonexistent)
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_snapshot_metadata)
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank('/v1/snapshot_metadata')
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key9": "value9"}}
req.body = jsonutils.dumps(body)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, req, self.req_id, body)
def test_update_all(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_create_snapshot_metadata)
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_new_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {
'metadata': {
'key10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
},
}
req.body = jsonutils.dumps(expected)
res_dict = self.controller.update_all(req, self.req_id, expected)
self.assertEqual(expected, res_dict)
def test_update_all_with_keys_in_uppercase_and_lowercase(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_create_snapshot_metadata)
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_new_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
body = {
'metadata': {
'key10': 'value10',
'KEY10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
},
}
expected = {
'metadata': {
'key10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
},
}
req.body = jsonutils.dumps(expected)
res_dict = self.controller.update_all(req, self.req_id, body)
self.assertEqual(expected, res_dict)
def test_update_all_empty_container(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_empty_container_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {'metadata': {}}
req.body = jsonutils.dumps(expected)
res_dict = self.controller.update_all(req, self.req_id, expected)
self.assertEqual(expected, res_dict)
def test_update_all_malformed_container(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {'meta': {}}
req.body = jsonutils.dumps(expected)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update_all, req, self.req_id,
expected)
def test_update_all_malformed_data(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {'metadata': ['asdf']}
req.body = jsonutils.dumps(expected)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update_all, req, self.req_id,
expected)
def test_update_all_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_get', return_snapshot_nonexistent)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
body = {'metadata': {'key10': 'value10'}}
req.body = jsonutils.dumps(body)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update_all, req, '100', body)
def test_update_item(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res_dict = self.controller.update(req, self.req_id, 'key1', body)
expected = {'meta': {'key1': 'value1'}}
self.assertEqual(expected, res_dict)
def test_update_item_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_get',
return_snapshot_nonexistent)
req = fakes.HTTPRequest.blank(
'/v1.1/fake/snapshots/asdf/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update, req, self.req_id, 'key1',
body)
def test_update_item_empty_body(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, 'key1',
None)
def test_update_item_empty_key(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, '', body)
def test_update_item_key_too_long(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {("a" * 260): "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.update,
req, self.req_id, ("a" * 260), body)
def test_update_item_value_too_long(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"key1": ("a" * 260)}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.update,
req, self.req_id, "key1", body)
def test_update_item_too_many_keys(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1", "key2": "value2"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, 'key1',
body)
def test_update_item_body_uri_mismatch(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/bad')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, 'bad',
body)
def test_invalid_metadata_items_on_create(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'POST'
req.headers["content-type"] = "application/json"
#test for long key
data = {"metadata": {"a" * 260: "value1"}}
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.create, req, self.req_id, data)
#test for long value
data = {"metadata": {"key": "v" * 260}}
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.create, req, self.req_id, data)
#test for empty key.
data = {"metadata": {"": "value1"}}
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, self.req_id, data)
|
|
__author__ = 'rcj1492'
__created__ = '2016.03'
__license__ = 'MIT'
import os
from jsonmodel.validators import jsonModel
from socket import gethostname, gethostbyname
class osClient(object):
''' a class of methods for retrieving local os properties '''
def __init__(self):
''' initialization method for osClient class '''
# construct empty methods
self.sysname = ''
self.nodename = ''
self.release = ''
self.version = ''
self.machine = ''
self.processor = ''
# reconstruct class methods from system call
from platform import uname
local_os = uname()
if local_os.system:
self.sysname = local_os.system
if local_os.node:
self.nodename = local_os.node
if local_os.release:
self.release = local_os.release
if local_os.version:
self.version = local_os.version
if local_os.machine:
self.machine = local_os.machine
if local_os.version:
self.processor = local_os.processor
# from os import uname
# local_os = uname()
# if local_os.sysname:
# self.sysname = local_os.sysname
# if local_os.nodename:
# self.nodename = local_os.nodename
# if local_os.release:
# self.release = local_os.release
# if local_os.version:
# self.release = local_os.version
# if local_os.machine:
# self.machine = local_os.machine
class localhostClient(object):
''' a class of methods to interact with the localhost '''
_class_fields = {
'schema': {
'org_name': 'Collective Acuity',
'prod_name': 'labPack',
'walk_root': '../',
'list_root': '../',
'max_results': 1,
'previous_file': '/home/user/.config/collective-acuity-labpack/user-data/test.json',
'file_path': '/home/user/.config/collective-acuity-labpack/user-data/test.json',
'query_root': '../',
'metadata_filters': [ {
'.file_name': {},
'.file_path': {},
'.file_size': {},
'.create_date': {},
'.update_date': {},
'.access_date': {}
} ]
},
'components': {
'.org_name': {
'must_not_contain': ['/']
},
'.prod_name': {
'must_not_contain': ['/']
},
'.max_results': {
'min_value': 1,
'integer_data': True
}
}
}
def __init__(self):
''' a method to initialize a client class to interact with the localhost '''
# construct class field input validation property
self.fields = jsonModel(self._class_fields)
# retrieve operating system from localhost
self.os = osClient()
# TODO: determine file system and parameters
# TODO: request latest info from
# https://en.wikipedia.org/wiki/Comparison_of_file_systems#Limits
# retrieve IP from system
self.os.nodename = gethostname()
self.ip = gethostbyname(self.os.nodename)
# retrieve environment variables from system
self.environ = dict(os.environ.items())
# retrieve path to user home
self.home = ''
if self.os.sysname == 'Windows':
env_username = os.environ.get('USERNAME')
from re import compile
xp_pattern = compile('^C:\\Documents and Settings')
app_data = ''
if os.environ.get('APPDATA'):
app_data = os.environ.get('APPDATA')
if xp_pattern.findall(app_data):
self.home = 'C:\\Documents and Settings\\%s' % env_username
else:
self.home = 'C:\\Users\\%s' % env_username
elif self.os.sysname in ('Linux', 'FreeBSD', 'Solaris', 'Darwin'):
self.home = os.path.expanduser('~')
# retrieve path to shell configs
self.bash_config = ''
self.sh_config = ''
if self.os.sysname == 'Windows':
bash_config = '.bash_profile'
sh_config = ''
else:
bash_config = '.bashrc'
sh_config = '.cshrc'
if bash_config:
self.bash_config = os.path.join(self.home, bash_config)
if sh_config:
self.sh_config = os.path.join(self.home, sh_config)
# TODO check different terminal protocols
# construct file record model property
file_model = {
'schema': {
'file_name': 'test.json',
'file_path': '/home/user/.config/collective-acuity-labpack/user-data/test.json',
'file_size': 678,
'create_date': 1474509314.419702,
'update_date': 1474509314.419702,
'access_date': 1474509314.419702
},
'components': {
'.file_size': {
'integer_data': True
}
}
}
self.file_model = jsonModel(file_model)
def app_data(self, org_name, prod_name):
''' a method to retrieve the os appropriate path to user app data
# https://www.chromium.org/user-experience/user-data-directory
:param org_name: string with name of product/service creator
:param prod_name: string with name of product/service
:return: string with path to app data
'''
__name__ = '%s.app_data' % self.__class__.__name__
# validate inputs
org_name = self.fields.validate(org_name, '.org_name')
prod_name = self.fields.validate(prod_name, '.prod_name')
# construct empty fields
data_path = ''
# construct path from os
if self.os.sysname == 'Windows':
from re import compile
xp_pattern = compile('^C:\\Documents and Settings')
app_data = ''
if os.environ.get('APPDATA'):
app_data = os.environ.get('APPDATA')
if xp_pattern.findall(app_data):
data_path = '%s\\Local Settings\\Application Data\\%s\\%s' % (self.home, org_name, prod_name)
else:
data_path = '%s\\AppData\\Local\\%s\\%s' % (self.home, org_name, prod_name)
elif self.os.sysname == 'Darwin':
data_path = '%s/Library/Application Support/%s/%s/' % (self.home, org_name, prod_name)
elif self.os.sysname in ('Linux', 'FreeBSD', 'Solaris'):
org_format = org_name.replace(' ','-').lower()
prod_format = prod_name.replace(' ', '-').lower()
data_path = '%s/.config/%s-%s/' % (self.home, org_format, prod_format)
return data_path
def walk(self, walk_root='', reverse_order=False, previous_file=''):
''' a generator method of file paths on localhost from walk of directories
:param walk_root: string with path from which to root walk of localhost directories
:param reverse_order: boolean to determine alphabetical direction of walk
:param previous_file: string with path of file after which to start walk
:return: string with absolute path to file
'''
__name__ = '%s.walk(...)' % self.__class__.__name__
# validate input
input_kwargs = [walk_root, previous_file]
input_names = ['.walk_root', '.previous_file']
for i in range(len(input_kwargs)):
if input_kwargs[i]:
self.fields.validate(input_kwargs[i], input_names[i])
# validate that previous file exists
file_exists = False
previous_found = False
if previous_file:
if os.path.exists(previous_file):
if os.path.isfile(previous_file):
file_exists = True
previous_file = os.path.abspath(previous_file)
if not file_exists:
err_msg = __name__.replace('...', 'previous_file="%s"' % previous_file)
raise ValueError('%s must be a valid file.' % err_msg)
# construct empty result
file_path = ''
# determine root for walk
if walk_root:
if not os.path.isdir(walk_root):
err_msg = __name__.replace('...', 'walk_root="%s"' % walk_root)
raise ValueError('%s msut be a valid directory.' % err_msg)
else:
walk_root = './'
# walk directory structure to find files
for current_dir, sub_dirs, dir_files in os.walk(walk_root):
dir_files.sort()
sub_dirs.sort()
if reverse_order:
sub_dirs.reverse()
dir_files.reverse()
if previous_file and not previous_found:
key_path = previous_file.split(os.sep)
current_path = os.path.abspath(current_dir)
for i in range(len(current_path.split(os.sep))):
del key_path[0]
if key_path:
if key_path[0] in sub_dirs:
path_index = sub_dirs.index(key_path[0])
sub_dirs[0:path_index] = []
dir_files = []
elif key_path[0] in dir_files:
file_index = dir_files.index(key_path[0]) + 1
dir_files[0:file_index] = []
previous_found = True
# yield file path
for file in dir_files:
file_path = os.path.join(os.path.abspath(current_dir), file)
yield file_path
def metadata(self, file_path):
''' a method to retrieve the metadata of a file on the localhost
:param file_path: string with path to file
:return: dictionary with file properties
'''
__name__ = '%s.metadata(...)' % self.__class__.__name__
# validate input
self.fields.validate(file_path, '.file_path')
file_exists = False
if os.path.exists(file_path):
if os.path.isfile(file_path):
file_exists = True
if not file_exists:
err_msg = __name__.replace('...', 'file_path=%s' % file_path)
raise ValueError('%s must be a valid file.' % err_msg)
# construct metadata dictionary
abs_path = os.path.abspath(file_path)
file_stats = os.stat(file_path)
file_metadata = {
'path_segments': abs_path.split(os.sep),
'file_name': os.path.split(abs_path)[1],
'file_path': abs_path,
'file_size': file_stats.st_size,
'create_date': file_stats.st_ctime,
'update_date': file_stats.st_mtime,
'access_date': file_stats.st_atime
}
return file_metadata
def conditional_filter(self, metadata_filters):
''' a method to construct a conditional filter function for the list method
:param metadata_filters: list with query criteria dictionaries
:return: filter_function object
NOTE: query criteria architecture
each item in the metadata filters list must be a dictionary
which is composed of one or more key names which represent the
dotpath to a metadata element of the record to be queried with a
key value that is a dictionary of conditional operators used to
test the value in the corresponding metadata field of the record.
eg. path_filters = [ { '.file_name': { 'must_contain': [ '^lab' ] } } ]
this example filter looks in the file tree that is walked for a
file which starts with the characters 'lab'. as a result, it will
match both the following:
log/unittests/test/20160912/lab.json
laboratory20160912.json
NOTE: the filter method uses a query filters list structure to represent
the disjunctive normal form of a logical expression. a record is
added to the results list if any query criteria dictionary in the
list evaluates to true. within each query criteria dictionary, all
declared conditional operators must evaluate to true.
in this way, the metadata_filters represents a boolean OR operator and
each criteria dictionary inside the list represents a boolean AND
operator between all keys in the dictionary.
NOTE: each query_criteria uses the architecture of query declaration in
the jsonModel.query method
query_criteria = {
'.file_name': {},
'.file_path': {},
'.file_size': {},
'.create_date': {},
'.update_date': {},
'.access_date': {}
}
conditional operators for '.file_name' and '.file_path' fields:
"byte_data": false,
"discrete_values": [ "" ],
"excluded_values": [ "" ],
"greater_than": "",
"less_than": "",
"max_length": 0,
"max_value": "",
"min_length": 0,
"min_value": "",
"must_contain": [ "" ],
"must_not_contain": [ "" ],
"contains_either": [ "" ]
conditional operators for '.file_size', '.create_date', '.update_date', '.access_date':
"discrete_values": [ 0.0 ],
"excluded_values": [ 0.0 ],
"greater_than": 0.0,
"integer_data": false,
"less_than": 0.0,
"max_value": 0.0,
"min_value": 0.0
'''
# validate input
self.fields.validate(metadata_filters, '.metadata_filters')
# construct function called by list function
def query_function(**kwargs):
file_metadata = {}
for key, value in kwargs.items():
if key in self.file_model.schema.keys():
file_metadata[key] = value
for query_criteria in metadata_filters:
if self.file_model.query(query_criteria, file_metadata):
return True
return False
return query_function
def list(self, filter_function=None, list_root='', max_results=1, reverse_order=False, previous_file=''):
''' a method to list files on localhost from walk of directories
:param filter_function: (keyword arguments) function used to filter results
:param list_root: string with localhost path from which to root list of files
:param max_results: integer with maximum number of results to return
:param reverse_order: boolean to determine alphabetical direction of walk
:param previous_file: string with absolute path of file to begin search after
:return: list of file absolute path strings
NOTE: the filter_function must be able to accept keyword arguments and
return a value that can evaluate to true or false. while walking
the local file structure, the metadata for each file will be
fed to the filter function. if the function evaluates this input
and returns a true value the file will be included in the list
results.
fields produced by the metadata function are listed in the
self.file_model.schema
'''
__name__ = '%s.list(...)' % self.__class__.__name__
# validate input
input_kwargs = [list_root, max_results, previous_file]
input_names = ['.list_root', '.max_results', '.previous_file']
for i in range(len(input_kwargs)):
if input_kwargs[i]:
self.fields.validate(input_kwargs[i], input_names[i])
# validate filter function
if filter_function:
try:
filter_function(**self.file_model.schema)
except:
err_msg = __name__.replace('...', 'filter_function=%s' % filter_function.__class__.__name__)
raise TypeError('%s must accept key word arguments.' % err_msg)
# validate that previous file exists
file_exists = False
if previous_file:
if os.path.exists(previous_file):
if os.path.isfile(previous_file):
file_exists = True
if not file_exists:
err_msg = __name__.replace('...', 'previous_file="%s"' % previous_file)
raise ValueError('%s must be a valid file.' % err_msg)
# construct empty results object
results_list = []
# determine root for walk
if list_root:
if not os.path.isdir(list_root):
return results_list
else:
list_root = './'
# walk directory structure to find files
for file_path in self.walk(list_root, reverse_order, previous_file):
if filter_function:
file_metadata = self.metadata(file_path)
if filter_function(**file_metadata):
results_list.append(file_path)
else:
results_list.append(file_path)
# return results list
if len(results_list) == max_results:
return results_list
return results_list
|
|
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_context import context as o_context
from oslo_context import fixture as o_fixture
from nova import context
from nova import test
class ContextTestCase(test.NoDBTestCase):
def setUp(self):
super(ContextTestCase, self).setUp()
self.useFixture(o_fixture.ClearRequestContext())
def test_request_context_elevated(self):
user_ctxt = context.RequestContext('111',
'222',
admin=False)
self.assertFalse(user_ctxt.is_admin)
admin_ctxt = user_ctxt.elevated()
self.assertTrue(admin_ctxt.is_admin)
self.assertIn('admin', admin_ctxt.roles)
self.assertFalse(user_ctxt.is_admin)
self.assertNotIn('admin', user_ctxt.roles)
def test_request_context_sets_is_admin(self):
ctxt = context.RequestContext('111',
'222',
roles=['admin', 'weasel'])
self.assertEqual(ctxt.is_admin, True)
def test_request_context_sets_is_admin_by_role(self):
ctxt = context.RequestContext('111',
'222',
roles=['administrator'])
self.assertEqual(ctxt.is_admin, True)
def test_request_context_sets_is_admin_upcase(self):
ctxt = context.RequestContext('111',
'222',
roles=['Admin', 'weasel'])
self.assertEqual(ctxt.is_admin, True)
def test_request_context_read_deleted(self):
ctxt = context.RequestContext('111',
'222',
read_deleted='yes')
self.assertEqual(ctxt.read_deleted, 'yes')
ctxt.read_deleted = 'no'
self.assertEqual(ctxt.read_deleted, 'no')
def test_request_context_read_deleted_invalid(self):
self.assertRaises(ValueError,
context.RequestContext,
'111',
'222',
read_deleted=True)
ctxt = context.RequestContext('111', '222')
self.assertRaises(ValueError,
setattr,
ctxt,
'read_deleted',
True)
def test_extra_args_to_context_get_logged(self):
info = {}
def fake_warn(log_msg):
info['log_msg'] = log_msg
self.stubs.Set(context.LOG, 'warning', fake_warn)
c = context.RequestContext('user', 'project',
extra_arg1='meow', extra_arg2='wuff')
self.assertTrue(c)
self.assertIn("'extra_arg1': 'meow'", info['log_msg'])
self.assertIn("'extra_arg2': 'wuff'", info['log_msg'])
def test_service_catalog_default(self):
ctxt = context.RequestContext('111', '222')
self.assertEqual(ctxt.service_catalog, [])
ctxt = context.RequestContext('111', '222',
service_catalog=[])
self.assertEqual(ctxt.service_catalog, [])
ctxt = context.RequestContext('111', '222',
service_catalog=None)
self.assertEqual(ctxt.service_catalog, [])
def test_service_catalog_cinder_only(self):
service_catalog = [
{u'type': u'compute', u'name': u'nova'},
{u'type': u's3', u'name': u's3'},
{u'type': u'image', u'name': u'glance'},
{u'type': u'volume', u'name': u'cinder'},
{u'type': u'ec2', u'name': u'ec2'},
{u'type': u'object-store', u'name': u'swift'},
{u'type': u'identity', u'name': u'keystone'},
{u'type': None, u'name': u'S_withouttype'},
{u'type': u'vo', u'name': u'S_partofvolume'}]
volume_catalog = [{u'type': u'volume', u'name': u'cinder'}]
ctxt = context.RequestContext('111', '222',
service_catalog=service_catalog)
self.assertEqual(ctxt.service_catalog, volume_catalog)
def test_to_dict_from_dict_no_log(self):
warns = []
def stub_warn(msg, *a, **kw):
if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]):
a = a[0]
warns.append(str(msg) % a)
self.stubs.Set(context.LOG, 'warn', stub_warn)
ctxt = context.RequestContext('111',
'222',
roles=['admin', 'weasel'])
context.RequestContext.from_dict(ctxt.to_dict())
self.assertEqual(len(warns), 0, warns)
def test_store_when_no_overwrite(self):
# If no context exists we store one even if overwrite is false
# (since we are not overwriting anything).
ctx = context.RequestContext('111',
'222',
overwrite=False)
self.assertIs(o_context.get_current(), ctx)
def test_no_overwrite(self):
# If there is already a context in the cache a new one will
# not overwrite it if overwrite=False.
ctx1 = context.RequestContext('111',
'222',
overwrite=True)
context.RequestContext('333',
'444',
overwrite=False)
self.assertIs(o_context.get_current(), ctx1)
def test_admin_no_overwrite(self):
# If there is already a context in the cache creating an admin
# context will not overwrite it.
ctx1 = context.RequestContext('111',
'222',
overwrite=True)
context.get_admin_context()
self.assertIs(o_context.get_current(), ctx1)
def test_convert_from_rc_to_dict(self):
ctx = context.RequestContext(
111, 222, request_id='req-679033b7-1755-4929-bf85-eb3bfaef7e0b',
timestamp='2015-03-02T22:31:56.641629')
values2 = ctx.to_dict()
expected_values = {'auth_token': None,
'domain': None,
'instance_lock_checked': False,
'is_admin': False,
'project_id': 222,
'project_domain': None,
'project_name': None,
'quota_class': None,
'read_deleted': 'no',
'read_only': False,
'remote_address': None,
'request_id':
'req-679033b7-1755-4929-bf85-eb3bfaef7e0b',
'resource_uuid': None,
'roles': [],
'service_catalog': [],
'show_deleted': False,
'tenant': 222,
'timestamp': '2015-03-02T22:31:56.641629',
'user': 111,
'user_domain': None,
'user_id': 111,
'user_identity': '111 222 - - -',
'user_name': None}
self.assertEqual(expected_values, values2)
def test_convert_from_dict_then_to_dict(self):
values = {'user': '111',
'user_id': '111',
'tenant': '222',
'project_id': '222',
'domain': None, 'project_domain': None,
'auth_token': None,
'resource_uuid': None, 'read_only': False,
'user_identity': '111 222 - - -',
'instance_lock_checked': False,
'user_name': None, 'project_name': None,
'timestamp': '2015-03-02T20:03:59.416299',
'remote_address': None, 'quota_class': None,
'is_admin': True,
'service_catalog': [],
'read_deleted': 'no', 'show_deleted': False,
'roles': [],
'request_id': 'req-956637ad-354a-4bc5-b969-66fd1cc00f50',
'user_domain': None}
ctx = context.RequestContext.from_dict(values)
self.assertEqual(ctx.user, '111')
self.assertEqual(ctx.tenant, '222')
self.assertEqual(ctx.user_id, '111')
self.assertEqual(ctx.project_id, '222')
values2 = ctx.to_dict()
self.assertEqual(values, values2)
|
|
# Copyright 2019 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import mock
from keystoneauth1 import exceptions as ks_exc
from requests.models import Response
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from nova.accelerator import cyborg
from nova import context
from nova import exception
from nova import objects
from nova.objects import request_spec
from nova import test
from nova.tests.unit import fake_requests
class CyborgTestCase(test.NoDBTestCase):
def setUp(self):
super(CyborgTestCase, self).setUp()
self.context = context.get_admin_context()
self.client = cyborg.get_client(self.context)
def test_get_client(self):
# Set up some ksa conf options
region = 'MyRegion'
endpoint = 'http://example.com:1234'
self.flags(group='cyborg',
region_name=region,
endpoint_override=endpoint)
ctxt = context.get_admin_context()
client = cyborg.get_client(ctxt)
# Dig into the ksa adapter a bit to ensure the conf options got through
# We don't bother with a thorough test of get_ksa_adapter - that's done
# elsewhere - this is just sanity-checking that we spelled things right
# in the conf setup.
self.assertEqual('accelerator', client._client.service_type)
self.assertEqual(region, client._client.region_name)
self.assertEqual(endpoint, client._client.endpoint_override)
@mock.patch('keystoneauth1.adapter.Adapter.get')
def test_call_cyborg(self, mock_ksa_get):
mock_ksa_get.return_value = 1 # dummy value
resp, err_msg = self.client._call_cyborg(
self.client._client.get, self.client.DEVICE_PROFILE_URL)
self.assertEqual(resp, 1)
self.assertIsNone(err_msg)
@mock.patch('keystoneauth1.adapter.Adapter.get')
def test_call_cyborg_keystone_error(self, mock_ksa_get):
mock_ksa_get.side_effect = ks_exc.ClientException
resp, err_msg = self.client._call_cyborg(
self.client._client.get, self.client.DEVICE_PROFILE_URL)
self.assertIsNone(resp)
expected_err = 'Could not communicate with Cyborg.'
self.assertIn(expected_err, err_msg)
@mock.patch('keystoneauth1.adapter.Adapter.get')
def test_call_cyborg_bad_response(self, mock_ksa_get):
mock_ksa_get.return_value = None
resp, err_msg = self.client._call_cyborg(
self.client._client.get, self.client.DEVICE_PROFILE_URL)
self.assertIsNone(resp)
expected_err = 'Invalid response from Cyborg:'
self.assertIn(expected_err, err_msg)
@mock.patch('nova.accelerator.cyborg._CyborgClient._call_cyborg')
@mock.patch.object(Response, 'json')
def test_get_device_profile_list(self, mock_resp_json, mock_call_cyborg):
mock_call_cyborg.return_value = Response(), None
mock_resp_json.return_value = {'device_profiles': 1} # dummy value
ret = self.client._get_device_profile_list(dp_name='mydp')
self.assertEqual(ret, 1)
@mock.patch('nova.accelerator.cyborg._CyborgClient._call_cyborg')
def test_get_device_profile_list_bad_response(self, mock_call_cyborg):
"If Cyborg cannot be reached or returns bad response, raise exception."
mock_call_cyborg.return_value = (None, 'Some error')
self.assertRaises(exception.DeviceProfileError,
self.client._get_device_profile_list,
dp_name='mydp')
@mock.patch('nova.accelerator.cyborg._CyborgClient.'
'_get_device_profile_list')
def _test_get_device_profile_groups(self, mock_get_dp_list, owner):
mock_get_dp_list.return_value = [{
"groups": [{
"resources:FPGA": "1",
"trait:CUSTOM_FPGA_CARD": "required"
}],
"name": "mydp",
"uuid": "307076c2-5aed-4f72-81e8-1b42f9aa2ec6"
}]
request_id = cyborg.get_device_profile_group_requester_id(
dp_group_id=0, owner=owner)
rg = request_spec.RequestGroup(requester_id=request_id)
rg.add_resource(rclass='FPGA', amount='1')
rg.add_trait(trait_name='CUSTOM_FPGA_CARD', trait_type='required')
expected_groups = [rg]
dp_groups = self.client.get_device_profile_groups('mydp')
actual_groups = self.client.get_device_request_groups(dp_groups,
owner=owner)
self.assertEqual(len(expected_groups), len(actual_groups))
self.assertEqual(expected_groups[0].__dict__,
actual_groups[0].__dict__)
def test_get_device_profile_groups_no_owner(self):
self._test_get_device_profile_groups(owner=None)
def test_get_device_profile_groups_port_owner(self):
self._test_get_device_profile_groups(owner=uuids.port)
@mock.patch('nova.accelerator.cyborg._CyborgClient.'
'_get_device_profile_list')
def test_get_device_profile_groups_no_dp(self, mock_get_dp_list):
# If the return value has no device profiles, raise exception
mock_get_dp_list.return_value = None
self.assertRaises(exception.DeviceProfileError,
self.client.get_device_profile_groups,
dp_name='mydp')
@mock.patch('nova.accelerator.cyborg._CyborgClient.'
'_get_device_profile_list')
def test_get_device_profile_groups_many_dp(self, mock_get_dp_list):
# If the returned list has more than one dp, raise exception
mock_get_dp_list.return_value = [1, 2]
self.assertRaises(exception.DeviceProfileError,
self.client.get_device_profile_groups,
dp_name='mydp')
def _get_arqs_and_request_groups(self):
arq_common = {
# All ARQs for an instance have same device profile name.
"device_profile_name": "noprog-dp",
"device_rp_uuid": "",
"hostname": "",
"instance_uuid": "",
"state": "Initial",
}
arq_variants = [
{"device_profile_group_id": 0,
"uuid": "edbba496-3cc8-4256-94ca-dfe3413348eb"},
{"device_profile_group_id": 1,
"uuid": "20125bcb-9f55-4e13-8e8c-3fee30e54cca"},
]
arqs = [dict(arq_common, **variant) for variant in arq_variants]
rg_rp_map = {
'device_profile_0': ['c532cf11-02ed-4b03-9dd8-3e9a454131dc'],
'device_profile_1': ['2c332d7b-daaf-4726-a80d-ecf5212da4b8'],
}
return arqs, rg_rp_map
def _get_bound_arqs(self):
arqs, rg_rp_map = self._get_arqs_and_request_groups()
common = {
'host_name': 'myhost',
'instance_uuid': '15d3acf8-df76-400b-bfc9-484a5208daa1',
}
bindings = {
arqs[0]['uuid']: dict(
common, device_rp_uuid=rg_rp_map['device_profile_0'][0]),
arqs[1]['uuid']: dict(
common, device_rp_uuid=rg_rp_map['device_profile_1'][0]),
}
bound_arq_common = {
"attach_handle_info": {
"bus": "01",
"device": "00",
"domain": "0000",
"function": "0" # will vary function ID later
},
"attach_handle_type": "PCI",
"state": "Bound",
# Devic eprofile name is common to all bound ARQs
"device_profile_name": arqs[0]["device_profile_name"],
**common
}
bound_arqs = [
{'uuid': arq['uuid'],
'device_profile_group_id': arq['device_profile_group_id'],
'device_rp_uuid': bindings[arq['uuid']]['device_rp_uuid'],
**bound_arq_common} for arq in arqs]
for index, bound_arq in enumerate(bound_arqs):
bound_arq['attach_handle_info']['function'] = index # fix func ID
return bindings, bound_arqs
@mock.patch('keystoneauth1.adapter.Adapter.post')
def test_create_arqs_failure(self, mock_cyborg_post):
# If Cyborg returns invalid response, raise exception.
mock_cyborg_post.return_value = None
self.assertRaises(exception.AcceleratorRequestOpFailed,
self.client._create_arqs,
dp_name='mydp')
@mock.patch('nova.accelerator.cyborg._CyborgClient.'
'_create_arqs')
def test_create_arq_and_match_rps(self, mock_create_arqs):
# Happy path
arqs, rg_rp_map = self._get_arqs_and_request_groups()
dp_name = arqs[0]["device_profile_name"]
mock_create_arqs.return_value = arqs
ret_arqs = self.client.create_arqs_and_match_resource_providers(
dp_name, rg_rp_map)
# Each value in rg_rp_map is a list. We merge them into a single list.
expected_rp_uuids = sorted(list(
itertools.chain.from_iterable(rg_rp_map.values())))
ret_rp_uuids = sorted([arq['device_rp_uuid'] for arq in ret_arqs])
self.assertEqual(expected_rp_uuids, ret_rp_uuids)
@mock.patch('nova.accelerator.cyborg._CyborgClient.'
'_create_arqs')
def test_create_arqs(self, mock_create_arqs):
# Happy path
arqs, rg_rp_map = self._get_arqs_and_request_groups()
dp_name = arqs[0]["device_profile_name"]
mock_create_arqs.return_value = arqs
ret_arqs = self.client.create_arqs(dp_name)
self.assertEqual(arqs, ret_arqs)
def test_get_arq_device_rp_uuid(self):
arqs, rg_rp_map = self._get_arqs_and_request_groups()
rp_uuid = self.client.get_arq_device_rp_uuid(
arqs[0], rg_rp_map, owner=None)
self.assertEqual(rg_rp_map['device_profile_0'][0], rp_uuid)
@mock.patch('nova.accelerator.cyborg._CyborgClient.'
'_create_arqs')
def test_create_arq_and_match_rps_exception(self, mock_create_arqs):
# If Cyborg response does not contain ARQs, raise
arqs, rg_rp_map = self._get_arqs_and_request_groups()
dp_name = arqs[0]["device_profile_name"]
mock_create_arqs.return_value = None
self.assertRaises(
exception.AcceleratorRequestOpFailed,
self.client.create_arqs_and_match_resource_providers,
dp_name, rg_rp_map)
@mock.patch('keystoneauth1.adapter.Adapter.patch')
def test_bind_arqs(self, mock_cyborg_patch):
bindings, bound_arqs = self._get_bound_arqs()
arq_uuid = bound_arqs[0]['uuid']
patch_list = {}
for arq_uuid, binding in bindings.items():
patch = [{"path": "/" + field,
"op": "add",
"value": value
} for field, value in binding.items()]
patch_list[arq_uuid] = patch
self.client.bind_arqs(bindings)
mock_cyborg_patch.assert_called_once_with(
self.client.ARQ_URL, json=mock.ANY)
called_params = mock_cyborg_patch.call_args.kwargs['json']
self.assertEqual(sorted(called_params), sorted(patch_list))
@mock.patch('nova.accelerator.cyborg._CyborgClient.delete_arqs_by_uuid')
@mock.patch('nova.accelerator.cyborg._CyborgClient._call_cyborg')
def test_bind_arqs_exception(self, mock_call_cyborg, mock_del_arqs):
# If Cyborg returns invalid response, raise exception.
bindings, _ = self._get_bound_arqs()
mock_call_cyborg.return_value = None, 'Some error'
self.assertRaises(exception.AcceleratorRequestBindingFailed,
self.client.bind_arqs, bindings=bindings)
mock_del_arqs.assert_not_called()
@mock.patch('keystoneauth1.adapter.Adapter.get')
def test_get_arqs_for_instance(self, mock_cyborg_get):
# Happy path, without only_resolved=True
_, bound_arqs = self._get_bound_arqs()
instance_uuid = bound_arqs[0]['instance_uuid']
query = {"instance": instance_uuid}
content = jsonutils.dumps({'arqs': bound_arqs})
resp = fake_requests.FakeResponse(200, content)
mock_cyborg_get.return_value = resp
ret_arqs = self.client.get_arqs_for_instance(instance_uuid)
mock_cyborg_get.assert_called_once_with(
self.client.ARQ_URL, params=query)
bound_arqs.sort(key=lambda x: x['uuid'])
ret_arqs.sort(key=lambda x: x['uuid'])
for ret_arq, bound_arq in zip(ret_arqs, bound_arqs):
self.assertDictEqual(ret_arq, bound_arq)
@mock.patch('keystoneauth1.adapter.Adapter.get')
def test_get_arqs_for_instance_exception(self, mock_cyborg_get):
# If Cyborg returns an error code, raise exception
_, bound_arqs = self._get_bound_arqs()
instance_uuid = bound_arqs[0]['instance_uuid']
resp = fake_requests.FakeResponse(404, content='')
mock_cyborg_get.return_value = resp
self.assertRaises(
exception.AcceleratorRequestOpFailed,
self.client.get_arqs_for_instance, instance_uuid)
@mock.patch('keystoneauth1.adapter.Adapter.get')
def test_get_arqs_for_instance_exception_no_resp(self, mock_cyborg_get):
# If Cyborg returns an error code, raise exception
_, bound_arqs = self._get_bound_arqs()
instance_uuid = bound_arqs[0]['instance_uuid']
content = jsonutils.dumps({'noarqs': 'oops'})
resp = fake_requests.FakeResponse(200, content)
mock_cyborg_get.return_value = resp
self.assertRaisesRegex(
exception.AcceleratorRequestOpFailed,
'Cyborg returned no accelerator requests for ',
self.client.get_arqs_for_instance, instance_uuid)
@mock.patch('keystoneauth1.adapter.Adapter.get')
def test_get_arqs_for_instance_all_resolved(self, mock_cyborg_get):
# If all ARQs are resolved, return full list
_, bound_arqs = self._get_bound_arqs()
instance_uuid = bound_arqs[0]['instance_uuid']
query = {"instance": instance_uuid}
content = jsonutils.dumps({'arqs': bound_arqs})
resp = fake_requests.FakeResponse(200, content)
mock_cyborg_get.return_value = resp
ret_arqs = self.client.get_arqs_for_instance(
instance_uuid, only_resolved=True)
mock_cyborg_get.assert_called_once_with(
self.client.ARQ_URL, params=query)
bound_arqs.sort(key=lambda x: x['uuid'])
ret_arqs.sort(key=lambda x: x['uuid'])
for ret_arq, bound_arq in zip(ret_arqs, bound_arqs):
self.assertDictEqual(ret_arq, bound_arq)
@mock.patch('keystoneauth1.adapter.Adapter.get')
def test_get_arqs_for_instance_some_resolved(self, mock_cyborg_get):
# If only some ARQs are resolved, return just the resolved ones
unbound_arqs, _ = self._get_arqs_and_request_groups()
_, bound_arqs = self._get_bound_arqs()
# Create a amixture of unbound and bound ARQs
arqs = [unbound_arqs[0], bound_arqs[0]]
instance_uuid = bound_arqs[0]['instance_uuid']
query = {"instance": instance_uuid}
content = jsonutils.dumps({'arqs': arqs})
resp = fake_requests.FakeResponse(200, content)
mock_cyborg_get.return_value = resp
ret_arqs = self.client.get_arqs_for_instance(
instance_uuid, only_resolved=True)
mock_cyborg_get.assert_called_once_with(
self.client.ARQ_URL, params=query)
self.assertEqual(ret_arqs, [bound_arqs[0]])
@mock.patch('nova.accelerator.cyborg._CyborgClient._call_cyborg')
def test_delete_arqs_for_instance(self, mock_call_cyborg):
# Happy path
mock_call_cyborg.return_value = ('Some Value', None)
instance_uuid = 'edbba496-3cc8-4256-94ca-dfe3413348eb'
self.client.delete_arqs_for_instance(instance_uuid)
mock_call_cyborg.assert_called_once_with(mock.ANY,
self.client.ARQ_URL, params={'instance': instance_uuid})
@mock.patch('nova.accelerator.cyborg._CyborgClient._call_cyborg')
def test_delete_arqs_for_instance_exception(self, mock_call_cyborg):
# If Cyborg returns invalid response, raise exception.
err_msg = 'Some error'
mock_call_cyborg.return_value = (None, err_msg)
instance_uuid = 'edbba496-3cc8-4256-94ca-dfe3413348eb'
exc = self.assertRaises(exception.AcceleratorRequestOpFailed,
self.client.delete_arqs_for_instance, instance_uuid)
expected_msg = ('Failed to delete accelerator requests: ' +
err_msg + ' Instance ' + instance_uuid)
self.assertEqual(expected_msg, exc.format_message())
@mock.patch('nova.accelerator.cyborg._CyborgClient._call_cyborg')
def test_delete_arqs_by_uuid(self, mock_call_cyborg):
# Happy path
mock_call_cyborg.return_value = ('Some Value', None)
_, bound_arqs = self._get_bound_arqs()
arq_uuids = [arq['uuid'] for arq in bound_arqs]
arq_uuid_str = ','.join(arq_uuids)
self.client.delete_arqs_by_uuid(arq_uuids)
mock_call_cyborg.assert_called_once_with(mock.ANY,
self.client.ARQ_URL, params={'arqs': arq_uuid_str})
@mock.patch('nova.accelerator.cyborg.LOG.error')
@mock.patch('nova.accelerator.cyborg._CyborgClient._call_cyborg')
def test_delete_arqs_by_uuid_exception(self, mock_call_cyborg, mock_log):
mock_call_cyborg.return_value = (None, 'Some error')
_, bound_arqs = self._get_bound_arqs()
arq_uuids = [arq['uuid'] for arq in bound_arqs]
arq_uuid_str = ','.join(arq_uuids)
self.client.delete_arqs_by_uuid(arq_uuids)
mock_call_cyborg.assert_called_once_with(mock.ANY,
self.client.ARQ_URL, params={'arqs': arq_uuid_str})
mock_log.assert_called_once_with('Failed to delete ARQs %s',
arq_uuid_str)
@mock.patch('keystoneauth1.adapter.Adapter.get')
def test_get_arq_by_uuid(self, mock_cyborg_get):
_, bound_arqs = self._get_bound_arqs()
arq_uuids = [arq['uuid'] for arq in bound_arqs]
content = jsonutils.dumps({'arqs': bound_arqs[0]})
resp = fake_requests.FakeResponse(200, content)
mock_cyborg_get.return_value = resp
ret_arqs = self.client.get_arq_by_uuid(arq_uuids[0])
mock_cyborg_get.assert_called_once_with(
"%s/%s" % (self.client.ARQ_URL, arq_uuids[0]))
self.assertEqual(bound_arqs[0], ret_arqs['arqs'])
@mock.patch('nova.accelerator.cyborg._CyborgClient._call_cyborg')
def test_get_arq_by_uuid_exception(self, mock_call_cyborg):
mock_call_cyborg.return_value = (None, 'Some error')
_, bound_arqs = self._get_bound_arqs()
arq_uuids = [arq['uuid'] for arq in bound_arqs]
self.assertRaises(exception.AcceleratorRequestOpFailed,
self.client.get_arq_by_uuid,
arq_uuids[0])
@mock.patch('keystoneauth1.adapter.Adapter.get')
def test_get_arq_by_uuid_not_found(self, mock_cyborg_get):
_, bound_arqs = self._get_bound_arqs()
arq_uuids = [arq['uuid'] for arq in bound_arqs]
content = jsonutils.dumps({})
resp = fake_requests.FakeResponse(404, content)
mock_cyborg_get.return_value = resp
self.assertRaises(exception.AcceleratorRequestOpFailed,
self.client.get_arq_by_uuid,
arq_uuids[0])
@mock.patch('keystoneauth1.adapter.Adapter.get')
def test_get_arq_uuids_for_instance(self, mock_cyborg_get):
# Happy path, without only_resolved=True
_, bound_arqs = self._get_bound_arqs()
instance_uuid = bound_arqs[0]['instance_uuid']
flavor = objects.Flavor(extra_specs={'accel:device_profile': 'dp1'})
instance = objects.Instance(flavor=flavor,
uuid=instance_uuid)
query = {"instance": instance_uuid}
content = jsonutils.dumps({'arqs': bound_arqs})
resp = fake_requests.FakeResponse(200, content)
mock_cyborg_get.return_value = resp
ret_arqs = self.client.get_arq_uuids_for_instance(instance)
mock_cyborg_get.assert_called_once_with(
self.client.ARQ_URL, params=query)
bound_arqs = [bound_arq['uuid'] for bound_arq in bound_arqs]
bound_arqs.sort()
ret_arqs.sort()
self.assertEqual(bound_arqs, ret_arqs)
def test_get_arq_pci_device_profile(self):
"""Test extractin arq pci device info"""
arq = {'uuid': uuids.arq_uuid,
'device_profile_name': "smart_nic",
'device_profile_group_id': '5',
'state': 'Bound',
'device_rp_uuid': uuids.resource_provider_uuid,
'hostname': "host_nodename",
'instance_uuid': uuids.instance_uuid,
'attach_handle_info': {
'bus': '0c', 'device': '0',
'domain': '0000', 'function': '0',
'physical_network': 'physicalnet1'
},
'attach_handle_type': 'PCI'
}
expect_info = {
'physical_network': "physicalnet1",
'pci_slot': "0000:0c:0.0",
'arq_uuid': arq['uuid']
}
bind_info = cyborg.get_arq_pci_device_profile(arq)
self.assertEqual(expect_info, bind_info)
def test_get_device_amount_of_dp_groups(self):
group1 = {
"resources:FPGA": "1",
"trait:CUSTOM_FPGA_CARD": "required"
}
group2 = {
"resources:FPGA": "2",
"trait:CUSTOM_FPGA_CARD": "required"
}
num = cyborg.get_device_amount_of_dp_groups([group1])
self.assertEqual(1, num)
num = cyborg.get_device_amount_of_dp_groups([group2])
self.assertEqual(2, num)
num = cyborg.get_device_amount_of_dp_groups([group1, group2])
self.assertEqual(3, num)
|
|
#!/usr/bin/env python
import pygame
import sys
import math
SCALE = 0.5
sprite_size = [int(85*SCALE), int(112*SCALE)]
# Initialize the screen
pygame.init()
SCREEN_SIZE = (640, 480)
screen = pygame.display.set_mode(SCREEN_SIZE)
pygame.display.set_caption('Get Off My Head')
#pygame.mouse.set_visible(0)
# Create the background
background = pygame.Surface(screen.get_size())
background = background.convert()
background.fill((0, 0, 0))
# Scrolling here. X and Y (Y to be implemented later...)
SCROLL_OFFSET = [0, 0]
def LoadImage(filename):
image = pygame.image.load(filename)
image = pygame.transform.scale(image, (int(image.get_width()*SCALE), int(image.get_height()*SCALE)))
image = image.convert_alpha()
return image
# Load the SF character sprites
sf_sprites = LoadImage('sf_sprites.png')
# Load scene and it's collision mask
scene = pygame.image.load('sf_back.png')
scene_mask = pygame.image.load('sf_back_mask.png')
# Create Actor Animations Sets (ghetto style, only left/right)
animations = {}
# for row in range(0, SF_SPRITE_MATRIX[1]):
# for col in range(0, SF_SPRITE_MATRIX[0]):
for row in range(0, 4):
for col in range(0, 4):
key = (col, row)
face_right = pygame.Surface(sprite_size)
face_right.convert_alpha()
face_right.blit(sf_sprites, (0,0), [sprite_size[0] * col, sprite_size[1] * row, sprite_size[0], sprite_size[1]])
face_left = pygame.transform.flip(face_right, True, False)
animations[key] = [face_right, face_left]
class Actor:
def __init__(self, id, name, start_pos, image_size, image_right, image_left):
print 'Creating Actor: %s: %s: %s' % (id, name, start_pos)
# Specified information
self.id = id
self.name = name
self.pos = start_pos
self.image_size = image_size
self.image_right = image_right
self.image_left = image_left
# Internal information
self.jump = 0
self.fall = 1
self.move_left = False
def __repr__(self):
output = '%s: %s: %s' % (self.id, self.name, self.pos)
return output
def GetSurface(self):
"""Return the current surface for this game.
TODO(g): Animations have not yet been introduced.
"""
if self.move_left:
return self.image_left
else:
return self.image_right
def FindClosestActor(self):
global ACTORS
closest_actor = None
closest_dist = None
for actor in ACTORS:
# Skip yourself
if actor.id == self.id:
continue
dist = self.GetDistanceToActor(actor)
if closest_dist == None or dist < closest_dist:
closest_actor = actor
closest_dist = dist
return closest_actor
def GetDistanceToActor(self, actor):
dist = math.sqrt((actor.pos[0] - self.pos[0])**2 + (actor.pos[1] - self.pos[1])**2 )
return dist
def Update(self):
"""Process all physics and junk"""
#TODO(g): Replace actor. with self., this is a short-cut
actor = self
# Fall, if you can
if actor.jump == 0:
[fall_pos, collision_actor] = MovePosCollide(actor, [0, actor.fall], ACTORS, scene_mask)
if fall_pos != actor.pos:
actor.pos = fall_pos
if actor.fall < 10:
actor.fall += 1
else:
actor.fall = 1
if actor.jump > 0:
hit_the_roof = False
for count in range(0, actor.jump):
[jump_pos, collision_actor] = MovePosCollide(actor, [0, -1], ACTORS, scene_mask)
# If we hit a ceiling, dont immediately cancell the jump, but reduce it quickly (gives a sense of upward inertia)
if jump_pos == actor.pos:
hit_the_roof = True
break
# Update the new position, cause we didnt hit the roof
else:
actor.pos = jump_pos
# Reduce the jump each frame
if not hit_the_roof:
actor.jump -= 1
else:
actor.jump = actor.jump / 2
if actor.jump <= 2:
actor.jump = 0
def Jump(self):
global ACTORS
global scene_mask
[ground_test_pos, collision_actor] = MovePosCollide(self, [0, 1], ACTORS, scene_mask)
# If we are free to jump
if ground_test_pos == self.pos and self.jump == 0:
# Test if there is an actor (or obstacle) directly above us
[actor_on_head_test_pos, collision_actor] = MovePosCollide(self, [0, -1], ACTORS, scene_mask)
if actor_on_head_test_pos != self.pos:
self.jump = 17
# Else, if there was an actor standing on our head
elif collision_actor != None:
collision_actor.jump += 17
# Create our actors
ACTORS = []
# Automatically load all the character
for row in range(0, 4):
for col in range(0, 4):
key = (col, row)
id = 4*row + col
# Only create this character if its not off the screen. Thats a lot of characters anyway
start_x = id * 150
if len(ACTORS) < 6:
actor = Actor(id, 'Name: %s' % id, [start_x, 130], sprite_size, animations[key][0], animations[key][1])
ACTORS.append(actor)
# Specify the player, so that we dont use NPC AI for it
PLAYER_ACTOR_ID = 1
# Find player actor
PLAYER_ACTOR = None
for actor in ACTORS:
if actor.id == PLAYER_ACTOR_ID:
PLAYER_ACTOR = actor
break
if PLAYER_ACTOR == None:
raise Exception('WTF? Couldnt find the player actor, you didnt specify the ID correctly or didnt add the player actor in ACTORS')
def TestCollisionByPixelStep(start_pos, end_pos, step, scene, scene_obstacle_color=(255,255,255), log=False):
"""Test for a collision against the scene, starting at start_pos, ending at end_pos, using step to increment.
NOTE: This function assumes that the bounding box has already been tested against the scene, and may call scene.get_at() in negative or over scene size, and crash
"""
# Create deltas (differences) for the step in X and Y depending on the step and start-end positions
# Delta X
if start_pos[0] < end_pos[0]:
dx = 1
elif start_pos[0] > end_pos[0]:
dx = -1
else:
dx = 0
# Delta Y
if start_pos[1] < end_pos[1]:
dy = 1
elif start_pos[1] > end_pos[1]:
dy = -1
else:
dy = 0
# Ensure we can actually move across the line, or fail
if dx == 0 and dy == 0:
raise Exception('What the fuck? The start and end positions are the same... Handle this case later.')
# Determine the distance required to travel in X and Y directions based on the start/end positions
distance_x = abs(start_pos[0] - end_pos[0])
distance_y = abs(start_pos[1] - end_pos[1])
# Start the current position at the starting position
current_pos = [start_pos[0], start_pos[1]]
# Loop until we reach the end position, or find a collision
end_pos_reached = False
has_collision = False
distance_travelled = 0
while not end_pos_reached and not has_collision:
# Get the pixel value at the current position
scene_value = scene.get_at(current_pos)[:3]
if log:
print 'Col: dx: %s dy: %s Start: %s End: %s Cur: %s distX: %s distY: %s Pix: %s' % (dx, dy, start_pos, end_pos, current_pos, distance_x, distance_y, scene_value)
# If the pixel matches the scene_obstacle_color, there is a collision
if scene_value == scene_obstacle_color:
has_collision = True
# Else, increment the current_pos by the dx and dy, multiplied by the step
else:
# Increment the current_pos
current_pos = [current_pos[0] + (dx * step), current_pos[1] + (dy * step)]
distance_travelled += step
# If the current_pos is past the end_pos, then test the end_pos position, and set end_pos_reached (final test it required)
if distance_x != 0 and distance_travelled >= distance_x:
# We reached the end, but make the last pixel test anyway, just to be sure we have checked them all
end_pos_reached = True
# Get the pixel value at the current position
scene_value = scene.get_at(end_pos)[:3]
# If the pixel matches the scene_obstacle_color, there is a collision
if scene_value == scene_obstacle_color:
has_collision = True
elif distance_y != 0 and distance_travelled >= distance_y:
# We reached the end, but make the last pixel test anyway, just to be sure we have checked them all
end_pos_reached = True
# Get the pixel value at the current position
scene_value = scene.get_at(end_pos)[:3]
# If the pixel matches the scene_obstacle_color, there is a collision
if scene_value == scene_obstacle_color:
has_collision = True
return has_collision
def MovePosCollide(actor, move, all_actors, scene_image, scene_obstacle_color=(255,255,255), log=False):
"""Collision with actors and scene"""
# Collision with scene
scene_pos = MovePosCollideWithScene(actor.pos, move, actor.image_size, scene_image, scene_obstacle_color=(255,255,255), log=log)
if scene_pos == actor.pos:
scene_collision = True
else:
scene_collision = False
# Test against actors
actor_collision = False
collision_with_actor = None
target_pos = [actor.pos[0] + move[0], actor.pos[1] + move[1]]
target_rect = pygame.Rect(target_pos, actor.image_size)
for test_actor in all_actors:
# Dont count yourself
if actor.id != test_actor.id:
test_actor_rect = pygame.Rect(test_actor.pos, test_actor.image_size)
has_collision = test_actor_rect.colliderect(target_rect)
if has_collision:
#print 'Collision: %s with %s' % (target_pos, test_actor)
actor_collision = True
collision_with_actor = test_actor
break
else:
#print 'Collision: Skip self: %s' % test_actor
pass
# If we didnt have collisions with scene or actors, return moved position
if not scene_collision and not actor_collision:
return (target_pos, collision_with_actor)
# Else, had collision so return current position
else:
result = [list(actor.pos), collision_with_actor]
#print 'Collision with actor: %s' % result
return result
def MovePosCollideWithScene(pos, move, bounding_box_size, scene_image, scene_obstacle_color=(255,255,255), log=False):
"""Returns a new position [x, y] from pos, moved by move [dx, dy], with
respect to colliding against non-moveable area in scene_image
(non [0,0,0] colors)
Args:
pos: list, [x, y]
move: list, [dx, dy]
bounding_box_size: list, [width, height]
scene_image, Surface object
Returns: list [new_x, new_y], if move is OK, otherwise [old_x, old_y]
"""
has_collision = False
# Create target position, where we want to move to
target_pos = [pos[0] + move[0], pos[1] + move[1]]
# Test for out of scene positions, and block
if target_pos[0] < 0:
has_collision = True
elif target_pos[0] + bounding_box_size[0] >= scene.get_width() - 1:
has_collision = True
elif target_pos[1] < 0:
has_collision = True
elif target_pos[1] + bounding_box_size[1] >= scene.get_height() - 1:
has_collision = True
# Test scene, if we havent already found a collision with the scene border
if not has_collision:
# Test every N pixels, to not miss collisions that are smaller than the bounding box
step_test = 1
#TODO(g): Collision detection with scene_image
# Make all 4 corners of the bounding box
corner_top_left = [target_pos[0], target_pos[1]]
corner_top_right = [target_pos[0] + bounding_box_size[0], target_pos[1]]
corner_bottom_left = [target_pos[0], target_pos[1] + bounding_box_size[1]]
corner_bottom_right = [target_pos[0] + bounding_box_size[0], target_pos[1] + bounding_box_size[1]]
if log:
print ''
# Test the bounding box, using step (N pixels) to get better resolution on obstacle collision
if TestCollisionByPixelStep(corner_top_left, corner_top_right, step_test, scene_image, log=log):
has_collision = True
elif TestCollisionByPixelStep(corner_top_left, corner_bottom_left, step_test, scene_image, log=log):
has_collision = True
elif TestCollisionByPixelStep(corner_top_right, corner_bottom_right, step_test, scene_image, log=log):
has_collision = True
elif TestCollisionByPixelStep(corner_bottom_left, corner_bottom_right, step_test, scene_image, log=log):
has_collision = True
# If there was a collision, dont move, create a new list form the old list
if has_collision:
final_pos = [pos[0], pos[1]]
# Else, there was not a collision, move the position
else:
final_pos = target_pos
return final_pos
def GetPosScrolled(pos):
global SCROLL_OFFSET
scrolled_pos = [pos[0] - SCROLL_OFFSET[0], pos[1] - SCROLL_OFFSET[1]]
return scrolled_pos
def Draw(surface, target_surface, pos):
target_surface.blit(surface, GetPosScrolled(pos))
while True:
#print 'Actors: %s' % ACTORS
# Enemy AI
for actor in ACTORS:
# Skip the player, process everyone else
if actor.id == PLAYER_ACTOR_ID:
continue
# Find targer actor (the closest)
target_actor = actor.FindClosestActor()
if target_actor == None:
raise Exception('WTF, is there only one?')
# Player is to the Right
if actor.pos[0] < target_actor.pos[0]:
actor.move_left = False
[move_pos, collision_actor] = MovePosCollide(actor, [5, 0], ACTORS, scene_mask)
if move_pos != actor.pos:
actor.pos = move_pos
# Player is to the Left
elif actor.pos[0] > target_actor.pos[0]:
actor.move_left = True
[move_pos, collision_actor] = MovePosCollide(actor, [-5, 0], ACTORS, scene_mask)
if move_pos != actor.pos:
actor.pos = move_pos
# Try to jump, all the time
actor.Jump()
# Event pump
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit(0)
# Player input handling
keys = pygame.key.get_pressed() #checking pressed keys
# Left
if keys[pygame.K_LEFT]:
PLAYER_ACTOR.move_left = True
[PLAYER_ACTOR.pos, collision_actor] = MovePosCollide(PLAYER_ACTOR, [-5, 0], ACTORS, scene_mask)
# Right
if keys[pygame.K_RIGHT]:
PLAYER_ACTOR.move_left = False
[PLAYER_ACTOR.pos, collision_actor] = MovePosCollide(PLAYER_ACTOR, [5, 0], ACTORS, scene_mask)
# Up
if keys[pygame.K_UP]:
PLAYER_ACTOR.Jump()
# [ground_test_pos, collision_actor] = MovePosCollide(PLAYER_ACTOR, [0, 1], ACTORS, scene_mask)
# # If we are free to jump
# if ground_test_pos == PLAYER_ACTOR.pos and PLAYER_ACTOR.jump == 0:
# # Test if there is an actor (or obstacle) directly above us
# [actor_on_head_test_pos, collision_actor] = MovePosCollide(PLAYER_ACTOR, [0, -1], ACTORS, scene_mask)
# if actor_on_head_test_pos != PLAYER_ACTOR.pos:
# PLAYER_ACTOR.jump = 17
# # Else, if there was an actor standing on our head
# elif collision_actor != None:
# collision_actor.jump += 17
# Update all our actors
for actor in ACTORS:
actor.Update()
# If ESC is hit, quit
if keys[pygame.K_ESCAPE]:
sys.exit(0)
# Handle scrolling the world
scrolled_screen_x = [SCROLL_OFFSET[0], SCROLL_OFFSET[0] + SCREEN_SIZE[0]]
boundary_x = int(SCREEN_SIZE[0] / 2.5)
scroll_by_pixels = 3
# Left screen boundary
if PLAYER_ACTOR.pos[0] < scrolled_screen_x[0] + boundary_x:
SCROLL_OFFSET[0] -= scroll_by_pixels
if SCROLL_OFFSET[0] < 0:
SCROLL_OFFSET[0] = 0
# Right screen boundary
elif PLAYER_ACTOR.pos[0] > scrolled_screen_x[1] - boundary_x:
SCROLL_OFFSET[0] += scroll_by_pixels
max_scroll_x = scene.get_width() - SCREEN_SIZE[0]
if SCROLL_OFFSET[0] >= max_scroll_x:
SCROLL_OFFSET[0] = max_scroll_x
# Render background
Draw(scene, background, (0,0))
# Draw all the actors
for actor in ACTORS:
Draw(actor.GetSurface(), background, actor.pos)
# Render to screen
screen.blit(background, (0,0))
pygame.display.flip()
|
|
#!/usr/bin/env python
from __future__ import division
import os
import re
import argparse
from itertools import izip, compress
from collections import defaultdict
import numpy as np
import pandas as pd
from Bio import SeqIO
import matplotlib.pyplot as plt
def get_parser():
"""
Create a parser and add arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--depth', type=int, default=4, help='minimum read depth, default: 4')
parser.add_argument('-p', '--pmtsize', type=int, default=1000, help='promoter size, default: 1000')
parser.add_argument('-w', '--winsize', type=int, default=200000, help='window size, default: 200000')
parser.add_argument('gtf', help='GTF file')
parser.add_argument('fasta', help='reference genome FASTA file')
parser.add_argument('cgmap', help='CGmap file')
return parser
def const_gtftree(gtffile):
"""
Read a GTF file and convert it to a nested dictionary
"""
gtftree = defaultdict(lambda: defaultdict(list))
with open(gtffile) as infile:
for line in infile:
if not line.startswith('#'):
gene_id = None
transcript_id = None
line = line.strip().split('\t')
chr = line[0]
feature = line[2]
start = int(line[3]) - 1
end = int(line[4])
strand = line[6]
attributes = line[8].split(';')
if feature == 'exon':
for atb in attributes:
if 'gene_id' in atb:
gene_id = atb.strip().split()[1][1:-1]
elif 'transcript_id' in atb:
transcript_id = atb.strip().split()[1][1:-1]
if gene_id and transcript_id:
gtftree[chr][(gene_id, strand)].append((start, end))
return gtftree
def const_ctxstr(reffile):
"""
Construct methylation context strings from a reference genome FASTA file
"""
with open(reffile) as infile:
fasta = SeqIO.to_dict(SeqIO.parse(infile, 'fasta'))
for chr in fasta:
fasta[chr] = str(fasta[chr].seq).upper()
ctxstr = {}
for chr in fasta:
ctxstr[chr] = ['-']*len(fasta[chr])
cg = [match.start() for match in re.finditer(r'(?=(CG))', fasta[chr])]
for pos in cg:
ctxstr[chr][pos] = 'X'
chg = [match.start() for match in re.finditer(r'(?=(C[ACT]G))', fasta[chr])]
for pos in chg:
ctxstr[chr][pos] = 'Y'
chh = [match.start() for match in re.finditer(r'(?=(C[ACT][ACT]))', fasta[chr])]
for pos in chh:
ctxstr[chr][pos] = 'Z'
rcg = [match.start()-1 for match in re.finditer(r'(?<=(CG))', fasta[chr])]
for pos in rcg:
ctxstr[chr][pos] = 'x'
rchg = [match.start()-1 for match in re.finditer(r'(?<=(C[AGT]G))', fasta[chr])]
for pos in rchg:
ctxstr[chr][pos] = 'y'
rchh = [match.start()-1 for match in re.finditer(r'(?<=([AGT][AGT]G))', fasta[chr])]
for pos in rchh:
ctxstr[chr][pos] = 'z'
for chr in ctxstr:
ctxstr[chr] = ''.join(ctxstr[chr])
return ctxstr
def const_cgmap(ctxstr, cgmapfile, readdepth=4):
"""
Construct lists of methylation levels from a CGmap file for rapid access
"""
cgmap = {}
with open(cgmapfile) as infile:
for chr in ctxstr.keys():
cgmap[chr] = ['-' for _ in xrange(len(ctxstr[chr]))]
for line in infile:
line = line.strip().split()
chr = line[0]
pos = int(line[2]) - 1 # Transfer to 0-based
context = line[3]
level = float(line[5])
depth = int(line[7])
if context in ['CG', 'CHG', 'CHH'] and depth >= readdepth:
cgmap[chr][pos] = level
return cgmap
def calc_bulk(ctxstr, cgmap):
"""
Compute the global methylation level in CG/CHG/CHH
"""
inv_ctxs = {'X': 'CG', 'Y': 'CHG', 'Z': 'CHH'}
bulk = defaultdict(list)
for chr in set(ctxstr) & set(cgmap):
for tag, mlevel in izip(ctxstr[chr], cgmap[chr]):
tag = tag.upper()
if tag in inv_ctxs and mlevel != '-':
bulk[inv_ctxs[tag]].append(mlevel)
return bulk
def calc_mlevel(ctxstr, cgmap, gtftree, pmtsize=1000):
"""
Compute the mean methylation level of promoter/gene/exon/intron/IGN in each gene
"""
inv_ctxs = {'X': 'CG', 'Y': 'CHG', 'Z': 'CHH'}
ign = defaultdict(list)
mtable = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))
counter = defaultdict(lambda: defaultdict(int))
for chr in set(ctxstr) & set(cgmap) & set(gtftree):
mask = [1]*len(cgmap[chr])
for (gene_id, strand) in gtftree[chr]:
feature_mlevels = defaultdict(lambda: defaultdict(list))
gstart = min(gtftree[chr][(gene_id, strand)])[0]
gend = max(gtftree[chr][(gene_id, strand)])[1]
mask[gstart:gend] = [0]*(gend - gstart)
if strand == '+':
for (pos, (tag, mlevel)) in enumerate(izip(ctxstr[chr][gstart-pmtsize:gstart], cgmap[chr][gstart-pmtsize:gstart])):
tag = tag.upper()
if tag in inv_ctxs and mlevel != '-':
feature_mlevels[inv_ctxs[tag]]['pmt'].append(mlevel)
elif strand == '-':
for (pos, (tag, mlevel)) in enumerate(izip(ctxstr[chr][gend:gend+pmtsize], cgmap[chr][gend:gend+pmtsize])):
tag = tag.upper()
if tag in inv_ctxs and mlevel != '-':
feature_mlevels[inv_ctxs[tag]]['pmt'].append(mlevel)
for (pos, (tag, mlevel)) in enumerate(izip(ctxstr[chr][gstart:gend], cgmap[chr][gstart:gend])):
tag = tag.upper()
inexon = False
if tag in inv_ctxs and mlevel != '-':
feature_mlevels[inv_ctxs[tag]]['gene'].append(mlevel)
for exon in gtftree[chr][(gene_id, strand)]:
if exon[0] <= pos+gstart < exon[1]:
feature_mlevels[inv_ctxs[tag]]['exon'].append(mlevel)
inexon = True
break
if not inexon:
feature_mlevels[inv_ctxs[tag]]['intron'].append(mlevel)
for ctx in ['CG', 'CHG', 'CHH']:
for feature in ['pmt', 'gene', 'exon', 'intron']:
if feature in feature_mlevels[ctx]:
counter[ctx][feature] += len(feature_mlevels[ctx][feature])
mtable[ctx][gene_id][feature] = np.mean(feature_mlevels[ctx][feature])
else:
counter[ctx][feature] += 0
mtable[ctx][gene_id][feature] = 0.0
for (pos, (tag, mlevel)) in enumerate(izip(ctxstr[chr], cgmap[chr])):
tag = tag.upper()
if (tag in inv_ctxs) and (mask[pos] == 1) and (mlevel != '-'):
ign[inv_ctxs[tag]].append(mlevel)
for ctx in ['CG', 'CHG', 'CHH']:
if len(ign[ctx]) > 0:
ign[ctx] = np.mean(ign[ctx])
else:
ign[ctx] = 0.0
cg_table = pd.DataFrame(mtable['CG']).T
cg_table = cg_table[['pmt', 'gene', 'exon', 'intron']]
chg_table = pd.DataFrame(mtable['CHG']).T
chg_table = chg_table[['pmt', 'gene', 'exon', 'intron']]
chh_table = pd.DataFrame(mtable['CHH']).T
chh_table = chh_table[['pmt', 'gene', 'exon', 'intron']]
return ign, cg_table, chg_table, chh_table
def plot_bar(dataframe, bulk, ctx):
colors = { 'CG': ( 38/255, 173/255, 84/255),
'CHG': ( 44/255, 180/255, 234/255),
'CHH': (249/255, 42/255, 54/255)}
dataframe = dataframe*100
plt.switch_backend('Agg')
fig = plt.figure()
ax = fig.add_subplot(111)
ax = dataframe.plot(ax=ax, kind='bar', grid=False, rot=0, color=colors[ctx], ylim=(0, 100))
ax.set_ylabel('Methylation Level (%)', fontsize='xx-large', fontweight='bold')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_linewidth(2)
ax.spines['left'].set_linewidth(2)
#ax.spines['bottom'].set_position(('outward', 5))
#ax.spines['left'].set_position(('outward', 5))
ax.tick_params(direction='out', length=6, width=2, labelsize='xx-large', top='off', right='off')
for label in ax.xaxis.get_ticklabels():
label.set_fontweight('bold')
for label in ax.yaxis.get_ticklabels():
label.set_fontweight('bold')
ax.set_title(ctx, fontsize='xx-large', weight='bold')
#ax.axhline(y=np.mean(bulk[ctx])*100, linewidth=2, linestyle='--', color='k')
fig.tight_layout()
return ax
def plot_feature_mlevel(bulk, ign, cg_table, chg_table, chh_table):
cg = cg_table.mean()
cg = cg.set_value('genome', np.mean(bulk['CG']))
cg = cg.set_value('IGN', ign['CG'])
cg = cg[['genome', 'pmt', 'gene', 'exon', 'intron', 'IGN']]
cg.to_csv("CG.txt", sep="\t")
cg_ax = plot_bar(cg, bulk, 'CG')
chg = chg_table.mean()
chg = chg.set_value('genome', np.mean(bulk['CHG']))
chg = chg.set_value('IGN', ign['CHG'])
chg = chg[['genome', 'pmt', 'gene', 'exon', 'intron', 'IGN']]
chg_ax = plot_bar(chg, bulk, 'CHG')
chh = chh_table.mean()
chh = chh.set_value('genome', np.mean(bulk['CHH']))
chh = chh.set_value('IGN', ign['CHH'])
chh = chh[['genome', 'pmt', 'gene', 'exon', 'intron', 'IGN']]
chh_ax = plot_bar(chh, bulk, 'CHH')
return cg_ax, chg_ax, chh_ax
def plot_bulkmean(bulk):
bulk_mean = {}
for ctx in ['CG', 'CHG', 'CHH']:
bulk_mean[ctx] = np.mean(bulk[ctx])
bulk_mean = pd.Series(bulk_mean)*100
colors = { 'CG': ( 38/255, 173/255, 84/255),
'CHG': ( 44/255, 180/255, 234/255),
'CHH': (249/255, 42/255, 54/255)}
plt.switch_backend('Agg')
fig = plt.figure()
ax = fig.add_subplot(111)
ax = bulk_mean.plot(ax=ax, kind='bar', grid=False, rot=0, color=[colors[ctx] for ctx in ['CG', 'CHG', 'CHH']], ylim=(0, 100))
ax.set_ylabel('Methylation Level (%)', fontsize='xx-large', fontweight='bold')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_linewidth(2)
ax.spines['left'].set_linewidth(2)
ax.tick_params(direction='out', length=6, width=2, labelsize='xx-large', top='off', right='off')
for label in ax.xaxis.get_ticklabels():
label.set_fontweight('bold')
for label in ax.yaxis.get_ticklabels():
label.set_fontweight('bold')
fig.tight_layout()
return ax
def plot_bulkhist(bulk):
colors = { 'CG': ( 38/255, 173/255, 84/255),
'CHG': ( 44/255, 180/255, 234/255),
'CHH': (249/255, 42/255, 54/255)}
plt.switch_backend('Agg')
fig = plt.figure(figsize=(8, 3))
axes = {}
for i, ctx in enumerate(['CG', 'CHG', 'CHH']):
if i == 0:
axes[ctx] = fig.add_axes((0.15, 0.25, 0.25, 0.65))
#axes[ctx] = fig.add_subplot(131)
axes[ctx].hist(bulk[ctx], weights=np.repeat(1.0/len(bulk[ctx]), len(bulk[ctx])), color=colors[ctx])
axes[ctx].spines['top'].set_visible(False)
axes[ctx].spines['right'].set_visible(False)
axes[ctx].spines['bottom'].set_linewidth(2)
axes[ctx].spines['left'].set_linewidth(2)
axes[ctx].spines['left'].set_position(('outward', 10))
plt.setp(axes[ctx].get_xticklabels(), visible=False)
axes[ctx].tick_params(axis='y', direction='out', right='off', length=6, width=2, labelsize='xx-large')
axes[ctx].tick_params(axis='x', top='off', bottom='off')
for label in axes[ctx].yaxis.get_ticklabels():
label.set_fontweight('bold')
axes[ctx].set_ylabel('Fraction', fontsize='xx-large', fontweight='bold')
else:
axes[ctx] = fig.add_axes((0.15 + (0.25 + 0.025) * i, 0.25, 0.25, 0.65))
#axes[ctx] = fig.add_subplot(1, 3, i+1)
axes[ctx].hist(bulk[ctx], weights=np.repeat(1.0/len(bulk[ctx]), len(bulk[ctx])), color=colors[ctx])
axes[ctx].spines['top'].set_visible(False)
axes[ctx].spines['left'].set_visible(False)
axes[ctx].spines['right'].set_visible(False)
axes[ctx].spines['bottom'].set_linewidth(2)
axes[ctx].spines['left'].set_linewidth(2)
plt.setp(axes[ctx].get_xticklabels(), visible=False)
plt.setp(axes[ctx].get_yticklabels(), visible=False)
axes[ctx].tick_params(top='off', bottom='off', left='off', right='off')
axes[ctx].set_ylim(0, 1)
axes[ctx].set_yticks(np.arange(0, 1.2, 0.2))
axes[ctx].set_xlim(-0.025, 1.025)
axes[ctx].set_xlabel(ctx, fontsize='xx-large', fontweight='bold')
fig.suptitle('Methylation Level (0 -> 100%)', x=0.55, y=0.1, fontsize='xx-large', fontweight='bold')
return fig
# The alphanum algorithm is from http://www.davekoelle.com/alphanum.html
re_chunk = re.compile("([\D]+|[\d]+)")
re_letters = re.compile("\D+")
re_numbers = re.compile("\d+")
def getchunk(item):
itemchunk = re_chunk.match(item)
# Subtract the matched portion from the original string
# if there was a match, otherwise set it to ""
item = (item[itemchunk.end():] if itemchunk else "")
# Don't return the match object, just the text
itemchunk = (itemchunk.group() if itemchunk else "")
return (itemchunk, item)
def alphanum(a, b):
n = 0
while (n == 0):
# Get a chunk and the original string with the chunk subtracted
(ac, a) = getchunk(a)
(bc, b) = getchunk(b)
# Both items contain only letters
if (re_letters.match(ac) and re_letters.match(bc)):
n = cmp(ac, bc)
else:
# Both items contain only numbers
if (re_numbers.match(ac) and re_numbers.match(bc)):
n = cmp(int(ac), int(bc))
# One item has letters and one item has numbers, or one item is empty
else:
n = cmp(ac, bc)
# Prevent deadlocks
if (n == 0):
n = 1
return n
def calc_genomewide(ctxstr, cgmap, winsize=200000):
inv_ctxs = {'X': 'CG', 'Y': 'CHG', 'Z': 'CHH'}
win_mlevel = defaultdict(list)
win_x = []
pos = 0
chrs = ctxstr.keys()
chrs.sort(cmp=alphanum)
"""
if 'chr' in ctxstr.keys()[0].lower():
chrs = sorted(ctxstr.keys(), key=lambda s: s[3:])
else:
chrs = sorted(ctxstr.keys())
"""
for chr in chrs:
start = 0
while (start + winsize) <= len(ctxstr[chr]):
win_x.append(pos+(winsize/2))
tmp = defaultdict(list)
for tag, mlevel in izip(ctxstr[chr][start:start+winsize], cgmap[chr][start:start+winsize]):
tag = tag.upper()
if tag in inv_ctxs and mlevel != '-':
tmp[inv_ctxs[tag]].append(mlevel)
for ctx in ['CG', 'CHG', 'CHH']:
win_mlevel[ctx].append(np.mean(tmp[ctx])*100)
start += winsize
pos += winsize
return win_x, win_mlevel
def plot_genomewide(ctxstr, gpos, gmlevel):
colors = { 'CG': ( 38/255, 173/255, 84/255),
'CHG': ( 44/255, 180/255, 234/255),
'CHH': (249/255, 42/255, 54/255)}
chrs = ctxstr.keys()
chrs.sort(cmp=alphanum)
"""
if 'chr' in ctxstr.keys()[0].lower():
chrs = sorted(ctxstr.keys(), key=lambda s: s[3:])
else:
chrs = sorted(ctxstr.keys())
"""
#chrs = map(str, range(1, 23)) + ['X', 'Y']
vlines = [0]
for i, chr in enumerate(chrs):
vlines.append(vlines[i] + len(ctxstr[chr]))
plt.switch_backend('Agg')
fig = plt.figure(figsize=(16, 4.5))
ax = fig.add_subplot(111)
ax.plot(gpos, gmlevel['CG'], color=colors['CG'], linewidth=1.5, label='CG')
ax.plot(gpos, gmlevel['CHG'], color=colors['CHG'], linewidth=1.5, label='CHG')
ax.plot(gpos, gmlevel['CHH'], color=colors['CHH'], linewidth=1.5, label='CHH')
ax.set_ylim(0, 100)
ax.set_xlim(0, vlines[-1])
for pos in vlines[1:-1]:
ax.axvline(x=pos, linestyle='--', linewidth=1.5, color='gray')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_linewidth(2)
ax.spines['left'].set_linewidth(2)
ax.spines['left'].set_position(('outward', 10))
for label in ax.xaxis.get_ticklabels():
label.set_fontweight('bold')
for label in ax.yaxis.get_ticklabels():
label.set_fontweight('bold')
ax.tick_params(direction='out', length=6, width=2, labelsize='large', top='off', right='off', bottom='off')
ax.set_xticks([(vlines[i] + vlines[i+1])/2 for i in xrange(len(vlines) - 1)])
ax.set_xticklabels(chrs)
ax.set_xlabel('Chromosome', fontsize='xx-large', fontweight='bold')
ax.set_ylabel('Methylation Level (%)', fontsize='xx-large', fontweight='bold')
ax.legend(loc='upper right', fontsize='large', frameon=False)
fig.tight_layout()
return ax
def main():
parser = get_parser()
args = parser.parse_args()
root = os.path.splitext(os.path.basename(args.cgmap))[0]
ctxstr = const_ctxstr(args.fasta)
cgmap = const_cgmap(ctxstr, args.cgmap, args.depth)
gtftree = const_gtftree(args.gtf)
bulk = calc_bulk(ctxstr, cgmap)
plt.switch_backend('Agg')
bulk_ax = plot_bulkmean(bulk)
fig = bulk_ax.get_figure()
fig.savefig('{}.bulk.mean.png'.format(root), dpi=300)
plt.close(fig)
bulk_fig = plot_bulkhist(bulk)
bulk_fig.savefig('{}.bulk.hist.png'.format(root), dpi=300)
plt.close(fig)
ign, cg_table, chg_table, chh_table = calc_mlevel(ctxstr, cgmap, gtftree, args.pmtsize)
cg_table.to_csv('{}.feature.CG.txt'.format(root), sep='\t', float_format='%.3f')
chg_table.to_csv('{}.feature.CHG.txt'.format(root), sep='\t', float_format='%.3f')
chh_table.to_csv('{}.feature.CHH.txt'.format(root), sep='\t', float_format='%.3f')
cg_ax, chg_ax, chh_ax = plot_feature_mlevel(bulk, ign, cg_table, chg_table, chh_table)
fig = cg_ax.get_figure()
fig.savefig('{}.feature.CG.png'.format(root), dpi=300)
plt.close(fig)
fig = chg_ax.get_figure()
fig.savefig('{}.feature.CHG.png'.format(root), dpi=300)
plt.close(fig)
fig = chh_ax.get_figure()
fig.savefig('{}.feature.CHH.png'.format(root), dpi=300)
plt.close(fig)
gpos, gmlevel = calc_genomewide(ctxstr, cgmap, winsize=args.winsize)
gax = plot_genomewide(ctxstr, gpos, gmlevel)
fig = gax.get_figure()
fig.savefig('{}.genomewide.png'.format(root), dpi=300)
plt.close(fig)
if __name__ == '__main__':
main()
|
|
from datetime import datetime
from django.conf import settings
from django.contrib.auth import authenticate
from django.contrib.auth.backends import RemoteUserBackend
from django.contrib.auth.middleware import RemoteUserMiddleware
from django.contrib.auth.models import User
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.test import TestCase, override_settings
from django.utils import timezone
@skipIfCustomUser
@override_settings(ROOT_URLCONF='django.contrib.auth.tests.urls')
class RemoteUserTest(TestCase):
middleware = 'django.contrib.auth.middleware.RemoteUserMiddleware'
backend = 'django.contrib.auth.backends.RemoteUserBackend'
header = 'REMOTE_USER'
# Usernames to be passed in REMOTE_USER for the test_known_user test case.
known_user = 'knownuser'
known_user2 = 'knownuser2'
def setUp(self):
self.curr_middleware = settings.MIDDLEWARE_CLASSES
self.curr_auth = settings.AUTHENTICATION_BACKENDS
settings.MIDDLEWARE_CLASSES += (self.middleware,)
settings.AUTHENTICATION_BACKENDS += (self.backend,)
def test_no_remote_user(self):
"""
Tests requests where no remote user is specified and insures that no
users get created.
"""
num_users = User.objects.count()
response = self.client.get('/remote_user/')
self.assertTrue(response.context['user'].is_anonymous())
self.assertEqual(User.objects.count(), num_users)
response = self.client.get('/remote_user/', **{self.header: None})
self.assertTrue(response.context['user'].is_anonymous())
self.assertEqual(User.objects.count(), num_users)
response = self.client.get('/remote_user/', **{self.header: ''})
self.assertTrue(response.context['user'].is_anonymous())
self.assertEqual(User.objects.count(), num_users)
def test_unknown_user(self):
"""
Tests the case where the username passed in the header does not exist
as a User.
"""
num_users = User.objects.count()
response = self.client.get('/remote_user/', **{self.header: 'newuser'})
self.assertEqual(response.context['user'].username, 'newuser')
self.assertEqual(User.objects.count(), num_users + 1)
User.objects.get(username='newuser')
# Another request with same user should not create any new users.
response = self.client.get('/remote_user/', **{self.header: 'newuser'})
self.assertEqual(User.objects.count(), num_users + 1)
def test_known_user(self):
"""
Tests the case where the username passed in the header is a valid User.
"""
User.objects.create(username='knownuser')
User.objects.create(username='knownuser2')
num_users = User.objects.count()
response = self.client.get('/remote_user/',
**{self.header: self.known_user})
self.assertEqual(response.context['user'].username, 'knownuser')
self.assertEqual(User.objects.count(), num_users)
# Test that a different user passed in the headers causes the new user
# to be logged in.
response = self.client.get('/remote_user/',
**{self.header: self.known_user2})
self.assertEqual(response.context['user'].username, 'knownuser2')
self.assertEqual(User.objects.count(), num_users)
def test_last_login(self):
"""
Tests that a user's last_login is set the first time they make a
request but not updated in subsequent requests with the same session.
"""
user = User.objects.create(username='knownuser')
# Set last_login to something so we can determine if it changes.
default_login = datetime(2000, 1, 1)
if settings.USE_TZ:
default_login = default_login.replace(tzinfo=timezone.utc)
user.last_login = default_login
user.save()
response = self.client.get('/remote_user/',
**{self.header: self.known_user})
self.assertNotEqual(default_login, response.context['user'].last_login)
user = User.objects.get(username='knownuser')
user.last_login = default_login
user.save()
response = self.client.get('/remote_user/',
**{self.header: self.known_user})
self.assertEqual(default_login, response.context['user'].last_login)
def test_header_disappears(self):
"""
Tests that a logged in user is logged out automatically when
the REMOTE_USER header disappears during the same browser session.
"""
User.objects.create(username='knownuser')
# Known user authenticates
response = self.client.get('/remote_user/',
**{self.header: self.known_user})
self.assertEqual(response.context['user'].username, 'knownuser')
# During the session, the REMOTE_USER header disappears. Should trigger logout.
response = self.client.get('/remote_user/')
self.assertEqual(response.context['user'].is_anonymous(), True)
# verify the remoteuser middleware will not remove a user
# authenticated via another backend
User.objects.create_user(username='modeluser', password='foo')
self.client.login(username='modeluser', password='foo')
authenticate(username='modeluser', password='foo')
response = self.client.get('/remote_user/')
self.assertEqual(response.context['user'].username, 'modeluser')
def test_user_switch_forces_new_login(self):
"""
Tests that if the username in the header changes between requests
that the original user is logged out
"""
User.objects.create(username='knownuser')
# Known user authenticates
response = self.client.get('/remote_user/',
**{self.header: self.known_user})
self.assertEqual(response.context['user'].username, 'knownuser')
# During the session, the REMOTE_USER changes to a different user.
response = self.client.get('/remote_user/',
**{self.header: "newnewuser"})
# Ensure that the current user is not the prior remote_user
# In backends that create a new user, username is "newnewuser"
# In backends that do not create new users, it is '' (anonymous user)
self.assertNotEqual(response.context['user'].username, 'knownuser')
def tearDown(self):
"""Restores settings to avoid breaking other tests."""
settings.MIDDLEWARE_CLASSES = self.curr_middleware
settings.AUTHENTICATION_BACKENDS = self.curr_auth
class RemoteUserNoCreateBackend(RemoteUserBackend):
"""Backend that doesn't create unknown users."""
create_unknown_user = False
@skipIfCustomUser
class RemoteUserNoCreateTest(RemoteUserTest):
"""
Contains the same tests as RemoteUserTest, but using a custom auth backend
class that doesn't create unknown users.
"""
backend = 'django.contrib.auth.tests.test_remote_user.RemoteUserNoCreateBackend'
def test_unknown_user(self):
num_users = User.objects.count()
response = self.client.get('/remote_user/', **{self.header: 'newuser'})
self.assertTrue(response.context['user'].is_anonymous())
self.assertEqual(User.objects.count(), num_users)
class CustomRemoteUserBackend(RemoteUserBackend):
"""
Backend that overrides RemoteUserBackend methods.
"""
def clean_username(self, username):
"""
Grabs username before the @ character.
"""
return username.split('@')[0]
def configure_user(self, user):
"""
Sets user's email address.
"""
user.email = 'user@example.com'
user.save()
return user
@skipIfCustomUser
class RemoteUserCustomTest(RemoteUserTest):
"""
Tests a custom RemoteUserBackend subclass that overrides the clean_username
and configure_user methods.
"""
backend = 'django.contrib.auth.tests.test_remote_user.CustomRemoteUserBackend'
# REMOTE_USER strings with email addresses for the custom backend to
# clean.
known_user = 'knownuser@example.com'
known_user2 = 'knownuser2@example.com'
def test_known_user(self):
"""
The strings passed in REMOTE_USER should be cleaned and the known users
should not have been configured with an email address.
"""
super(RemoteUserCustomTest, self).test_known_user()
self.assertEqual(User.objects.get(username='knownuser').email, '')
self.assertEqual(User.objects.get(username='knownuser2').email, '')
def test_unknown_user(self):
"""
The unknown user created should be configured with an email address.
"""
super(RemoteUserCustomTest, self).test_unknown_user()
newuser = User.objects.get(username='newuser')
self.assertEqual(newuser.email, 'user@example.com')
class CustomHeaderMiddleware(RemoteUserMiddleware):
"""
Middleware that overrides custom HTTP auth user header.
"""
header = 'HTTP_AUTHUSER'
@skipIfCustomUser
class CustomHeaderRemoteUserTest(RemoteUserTest):
"""
Tests a custom RemoteUserMiddleware subclass with custom HTTP auth user
header.
"""
middleware = (
'django.contrib.auth.tests.test_remote_user.CustomHeaderMiddleware'
)
header = 'HTTP_AUTHUSER'
|
|
"""Base classes to manage a Client's interaction with a running kernel"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import absolute_import
import atexit
import errno
from threading import Thread
import time
import zmq
# import ZMQError in top-level namespace, to avoid ugly attribute-error messages
# during garbage collection of threads at exit:
from zmq import ZMQError
from zmq.eventloop import ioloop, zmqstream
from IPython.core.release import kernel_protocol_version_info
from .channelsabc import (
ShellChannelABC, IOPubChannelABC,
HBChannelABC, StdInChannelABC,
)
from IPython.utils.py3compat import string_types, iteritems
#-----------------------------------------------------------------------------
# Constants and exceptions
#-----------------------------------------------------------------------------
major_protocol_version = kernel_protocol_version_info[0]
class InvalidPortNumber(Exception):
pass
#-----------------------------------------------------------------------------
# Utility functions
#-----------------------------------------------------------------------------
# some utilities to validate message structure, these might get moved elsewhere
# if they prove to have more generic utility
def validate_string_list(lst):
"""Validate that the input is a list of strings.
Raises ValueError if not."""
if not isinstance(lst, list):
raise ValueError('input %r must be a list' % lst)
for x in lst:
if not isinstance(x, string_types):
raise ValueError('element %r in list must be a string' % x)
def validate_string_dict(dct):
"""Validate that the input is a dict with string keys and values.
Raises ValueError if not."""
for k, v in iteritems(dct):
if not isinstance(k, string_types):
raise ValueError('key %r in dict must be a string' % k)
if not isinstance(v, string_types):
raise ValueError('value %r in dict must be a string' % v)
#-----------------------------------------------------------------------------
# ZMQ Socket Channel classes
#-----------------------------------------------------------------------------
class ZMQSocketChannel(Thread):
"""The base class for the channels that use ZMQ sockets."""
context = None
session = None
socket = None
ioloop = None
stream = None
_address = None
_exiting = False
proxy_methods = []
def __init__(self, context, session, address):
"""Create a channel.
Parameters
----------
context : :class:`zmq.Context`
The ZMQ context to use.
session : :class:`session.Session`
The session to use.
address : zmq url
Standard (ip, port) tuple that the kernel is listening on.
"""
super(ZMQSocketChannel, self).__init__()
self.daemon = True
self.context = context
self.session = session
if isinstance(address, tuple):
if address[1] == 0:
message = 'The port number for a channel cannot be 0.'
raise InvalidPortNumber(message)
address = "tcp://%s:%i" % address
self._address = address
atexit.register(self._notice_exit)
def _notice_exit(self):
self._exiting = True
def _run_loop(self):
"""Run my loop, ignoring EINTR events in the poller"""
while True:
try:
self.ioloop.start()
except ZMQError as e:
if e.errno == errno.EINTR:
continue
else:
raise
except Exception:
if self._exiting:
break
else:
raise
else:
break
def stop(self):
"""Stop the channel's event loop and join its thread.
This calls :meth:`~threading.Thread.join` and returns when the thread
terminates. :class:`RuntimeError` will be raised if
:meth:`~threading.Thread.start` is called again.
"""
if self.ioloop is not None:
self.ioloop.stop()
self.join()
self.close()
def close(self):
if self.ioloop is not None:
try:
self.ioloop.close(all_fds=True)
except Exception:
pass
if self.socket is not None:
try:
self.socket.close(linger=0)
except Exception:
pass
self.socket = None
@property
def address(self):
"""Get the channel's address as a zmq url string.
These URLS have the form: 'tcp://127.0.0.1:5555'.
"""
return self._address
def _queue_send(self, msg):
"""Queue a message to be sent from the IOLoop's thread.
Parameters
----------
msg : message to send
This is threadsafe, as it uses IOLoop.add_callback to give the loop's
thread control of the action.
"""
def thread_send():
self.session.send(self.stream, msg)
self.ioloop.add_callback(thread_send)
def _handle_recv(self, msg):
"""Callback for stream.on_recv.
Unpacks message, and calls handlers with it.
"""
ident, smsg = self.session.feed_identities(msg)
msg = self.session.deserialize(smsg)
self.call_handlers(msg)
class ShellChannel(ZMQSocketChannel):
"""The shell channel for issuing request/replies to the kernel."""
command_queue = None
# flag for whether execute requests should be allowed to call raw_input:
allow_stdin = True
proxy_methods = [
'execute',
'complete',
'inspect',
'history',
'kernel_info',
'shutdown',
'is_complete',
]
def __init__(self, context, session, address):
super(ShellChannel, self).__init__(context, session, address)
self.ioloop = ioloop.IOLoop()
def run(self):
"""The thread's main activity. Call start() instead."""
self.socket = self.context.socket(zmq.DEALER)
self.socket.linger = 1000
self.socket.setsockopt(zmq.IDENTITY, self.session.bsession)
self.socket.connect(self.address)
self.stream = zmqstream.ZMQStream(self.socket, self.ioloop)
self.stream.on_recv(self._handle_recv)
self._run_loop()
def call_handlers(self, msg):
"""This method is called in the ioloop thread when a message arrives.
Subclasses should override this method to handle incoming messages.
It is important to remember that this method is called in the thread
so that some logic must be done to ensure that the application level
handlers are called in the application thread.
"""
raise NotImplementedError(
'call_handlers must be defined in a subclass.')
def execute(self, code, silent=False, store_history=True,
user_expressions=None, allow_stdin=None):
"""Execute code in the kernel.
Parameters
----------
code : str
A string of Python code.
silent : bool, optional (default False)
If set, the kernel will execute the code as quietly possible, and
will force store_history to be False.
store_history : bool, optional (default True)
If set, the kernel will store command history. This is forced
to be False if silent is True.
user_expressions : dict, optional
A dict mapping names to expressions to be evaluated in the user's
dict. The expression values are returned as strings formatted using
:func:`repr`.
allow_stdin : bool, optional (default self.allow_stdin)
Flag for whether the kernel can send stdin requests to frontends.
Some frontends (e.g. the Notebook) do not support stdin requests.
If raw_input is called from code executed from such a frontend, a
StdinNotImplementedError will be raised.
Returns
-------
The msg_id of the message sent.
"""
if user_expressions is None:
user_expressions = {}
if allow_stdin is None:
allow_stdin = self.allow_stdin
# Don't waste network traffic if inputs are invalid
if not isinstance(code, string_types):
raise ValueError('code %r must be a string' % code)
validate_string_dict(user_expressions)
# Create class for content/msg creation. Related to, but possibly
# not in Session.
content = dict(code=code, silent=silent, store_history=store_history,
user_expressions=user_expressions,
allow_stdin=allow_stdin,
)
msg = self.session.msg('execute_request', content)
self._queue_send(msg)
return msg['header']['msg_id']
def complete(self, code, cursor_pos=None):
"""Tab complete text in the kernel's namespace.
Parameters
----------
code : str
The context in which completion is requested.
Can be anything between a variable name and an entire cell.
cursor_pos : int, optional
The position of the cursor in the block of code where the completion was requested.
Default: ``len(code)``
Returns
-------
The msg_id of the message sent.
"""
if cursor_pos is None:
cursor_pos = len(code)
content = dict(code=code, cursor_pos=cursor_pos)
msg = self.session.msg('complete_request', content)
self._queue_send(msg)
return msg['header']['msg_id']
def inspect(self, code, cursor_pos=None, detail_level=0):
"""Get metadata information about an object in the kernel's namespace.
It is up to the kernel to determine the appropriate object to inspect.
Parameters
----------
code : str
The context in which info is requested.
Can be anything between a variable name and an entire cell.
cursor_pos : int, optional
The position of the cursor in the block of code where the info was requested.
Default: ``len(code)``
detail_level : int, optional
The level of detail for the introspection (0-2)
Returns
-------
The msg_id of the message sent.
"""
if cursor_pos is None:
cursor_pos = len(code)
content = dict(code=code, cursor_pos=cursor_pos,
detail_level=detail_level,
)
msg = self.session.msg('inspect_request', content)
self._queue_send(msg)
return msg['header']['msg_id']
def history(self, raw=True, output=False, hist_access_type='range', **kwargs):
"""Get entries from the kernel's history list.
Parameters
----------
raw : bool
If True, return the raw input.
output : bool
If True, then return the output as well.
hist_access_type : str
'range' (fill in session, start and stop params), 'tail' (fill in n)
or 'search' (fill in pattern param).
session : int
For a range request, the session from which to get lines. Session
numbers are positive integers; negative ones count back from the
current session.
start : int
The first line number of a history range.
stop : int
The final (excluded) line number of a history range.
n : int
The number of lines of history to get for a tail request.
pattern : str
The glob-syntax pattern for a search request.
Returns
-------
The msg_id of the message sent.
"""
content = dict(raw=raw, output=output, hist_access_type=hist_access_type,
**kwargs)
msg = self.session.msg('history_request', content)
self._queue_send(msg)
return msg['header']['msg_id']
def kernel_info(self):
"""Request kernel info."""
msg = self.session.msg('kernel_info_request')
self._queue_send(msg)
return msg['header']['msg_id']
def _handle_kernel_info_reply(self, msg):
"""handle kernel info reply
sets protocol adaptation version
"""
adapt_version = int(msg['content']['protocol_version'].split('.')[0])
if adapt_version != major_protocol_version:
self.session.adapt_version = adapt_version
def shutdown(self, restart=False):
"""Request an immediate kernel shutdown.
Upon receipt of the (empty) reply, client code can safely assume that
the kernel has shut down and it's safe to forcefully terminate it if
it's still alive.
The kernel will send the reply via a function registered with Python's
atexit module, ensuring it's truly done as the kernel is done with all
normal operation.
"""
# Send quit message to kernel. Once we implement kernel-side setattr,
# this should probably be done that way, but for now this will do.
msg = self.session.msg('shutdown_request', {'restart': restart})
self._queue_send(msg)
return msg['header']['msg_id']
def is_complete(self, code):
msg = self.session.msg('is_complete_request', {'code': code})
self._queue_send(msg)
return msg['header']['msg_id']
class IOPubChannel(ZMQSocketChannel):
"""The iopub channel which listens for messages that the kernel publishes.
This channel is where all output is published to frontends.
"""
def __init__(self, context, session, address):
super(IOPubChannel, self).__init__(context, session, address)
self.ioloop = ioloop.IOLoop()
def run(self):
"""The thread's main activity. Call start() instead."""
self.socket = self.context.socket(zmq.SUB)
self.socket.linger = 1000
self.socket.setsockopt(zmq.SUBSCRIBE, b'')
self.socket.setsockopt(zmq.IDENTITY, self.session.bsession)
self.socket.connect(self.address)
self.stream = zmqstream.ZMQStream(self.socket, self.ioloop)
self.stream.on_recv(self._handle_recv)
self._run_loop()
def call_handlers(self, msg):
"""This method is called in the ioloop thread when a message arrives.
Subclasses should override this method to handle incoming messages.
It is important to remember that this method is called in the thread
so that some logic must be done to ensure that the application leve
handlers are called in the application thread.
"""
raise NotImplementedError(
'call_handlers must be defined in a subclass.')
def flush(self, timeout=1.0):
"""Immediately processes all pending messages on the iopub channel.
Callers should use this method to ensure that :meth:`call_handlers`
has been called for all messages that have been received on the
0MQ SUB socket of this channel.
This method is thread safe.
Parameters
----------
timeout : float, optional
The maximum amount of time to spend flushing, in seconds. The
default is one second.
"""
# We do the IOLoop callback process twice to ensure that the IOLoop
# gets to perform at least one full poll.
stop_time = time.time() + timeout
for i in range(2):
self._flushed = False
self.ioloop.add_callback(self._flush)
while not self._flushed and time.time() < stop_time:
time.sleep(0.01)
def _flush(self):
"""Callback for :method:`self.flush`."""
self.stream.flush()
self._flushed = True
class StdInChannel(ZMQSocketChannel):
"""The stdin channel to handle raw_input requests that the kernel makes."""
msg_queue = None
proxy_methods = ['input']
def __init__(self, context, session, address):
super(StdInChannel, self).__init__(context, session, address)
self.ioloop = ioloop.IOLoop()
def run(self):
"""The thread's main activity. Call start() instead."""
self.socket = self.context.socket(zmq.DEALER)
self.socket.linger = 1000
self.socket.setsockopt(zmq.IDENTITY, self.session.bsession)
self.socket.connect(self.address)
self.stream = zmqstream.ZMQStream(self.socket, self.ioloop)
self.stream.on_recv(self._handle_recv)
self._run_loop()
def call_handlers(self, msg):
"""This method is called in the ioloop thread when a message arrives.
Subclasses should override this method to handle incoming messages.
It is important to remember that this method is called in the thread
so that some logic must be done to ensure that the application leve
handlers are called in the application thread.
"""
raise NotImplementedError(
'call_handlers must be defined in a subclass.')
def input(self, string):
"""Send a string of raw input to the kernel."""
content = dict(value=string)
msg = self.session.msg('input_reply', content)
self._queue_send(msg)
class HBChannel(ZMQSocketChannel):
"""The heartbeat channel which monitors the kernel heartbeat.
Note that the heartbeat channel is paused by default. As long as you start
this channel, the kernel manager will ensure that it is paused and un-paused
as appropriate.
"""
time_to_dead = 3.0
socket = None
poller = None
_running = None
_pause = None
_beating = None
def __init__(self, context, session, address):
super(HBChannel, self).__init__(context, session, address)
self._running = False
self._pause = True
self.poller = zmq.Poller()
def _create_socket(self):
if self.socket is not None:
# close previous socket, before opening a new one
self.poller.unregister(self.socket)
self.socket.close()
self.socket = self.context.socket(zmq.REQ)
self.socket.linger = 1000
self.socket.connect(self.address)
self.poller.register(self.socket, zmq.POLLIN)
def _poll(self, start_time):
"""poll for heartbeat replies until we reach self.time_to_dead.
Ignores interrupts, and returns the result of poll(), which
will be an empty list if no messages arrived before the timeout,
or the event tuple if there is a message to receive.
"""
until_dead = self.time_to_dead - (time.time() - start_time)
# ensure poll at least once
until_dead = max(until_dead, 1e-3)
events = []
while True:
try:
events = self.poller.poll(1000 * until_dead)
except ZMQError as e:
if e.errno == errno.EINTR:
# ignore interrupts during heartbeat
# this may never actually happen
until_dead = self.time_to_dead - (time.time() - start_time)
until_dead = max(until_dead, 1e-3)
pass
else:
raise
except Exception:
if self._exiting:
break
else:
raise
else:
break
return events
def run(self):
"""The thread's main activity. Call start() instead."""
self._create_socket()
self._running = True
self._beating = True
while self._running:
if self._pause:
# just sleep, and skip the rest of the loop
time.sleep(self.time_to_dead)
continue
since_last_heartbeat = 0.0
# io.rprint('Ping from HB channel') # dbg
# no need to catch EFSM here, because the previous event was
# either a recv or connect, which cannot be followed by EFSM
self.socket.send(b'ping')
request_time = time.time()
ready = self._poll(request_time)
if ready:
self._beating = True
# the poll above guarantees we have something to recv
self.socket.recv()
# sleep the remainder of the cycle
remainder = self.time_to_dead - (time.time() - request_time)
if remainder > 0:
time.sleep(remainder)
continue
else:
# nothing was received within the time limit, signal heart
# failure
self._beating = False
since_last_heartbeat = time.time() - request_time
self.call_handlers(since_last_heartbeat)
# and close/reopen the socket, because the REQ/REP cycle has
# been broken
self._create_socket()
continue
def pause(self):
"""Pause the heartbeat."""
self._pause = True
def unpause(self):
"""Unpause the heartbeat."""
self._pause = False
def is_beating(self):
"""Is the heartbeat running and responsive (and not paused)."""
if self.is_alive() and not self._pause and self._beating:
return True
else:
return False
def stop(self):
"""Stop the channel's event loop and join its thread."""
self._running = False
super(HBChannel, self).stop()
def call_handlers(self, since_last_heartbeat):
"""This method is called in the ioloop thread when a message arrives.
Subclasses should override this method to handle incoming messages.
It is important to remember that this method is called in the thread
so that some logic must be done to ensure that the application level
handlers are called in the application thread.
"""
raise NotImplementedError(
'call_handlers must be defined in a subclass.')
#---------------------------------------------------------------------#---
# ABC Registration
#-----------------------------------------------------------------------------
ShellChannelABC.register(ShellChannel)
IOPubChannelABC.register(IOPubChannel)
HBChannelABC.register(HBChannel)
StdInChannelABC.register(StdInChannel)
|
|
# Copyright 2016 Adobe. All rights reserved.
# Methods:
# Parse args. If glyphlist is from file, read in entire file as single string,
# and remove all white space, then parse out glyph-names and GID's.
# For each font name:
# Use fontTools library to open font and extract CFF table.
# If error, skip font and report error.
# Filter specified glyph list, if any, with list of glyphs in the font.
# Open font plist file, if any. If not, create empty font plist.
# Build alignment zone string
# For identifier in glyph-list:
# Get T2 charstring for glyph from parent font CFF table. If not present,
# report and skip.
# Get new alignment zone string if FDarray index (which font dict is used)
# has changed.
# Convert to bez
# Build autohint point list string; this is used to tell if glyph has been
# changed since the last time it was hinted.
# If requested, check against plist dict, and skip if glyph is already
# hinted or is manually hinted.
# Call autohint library on bez string.
# If change to the point list is permitted and happened, rebuild.
# Autohint point list string.
# Convert bez string to T2 charstring, and update parent font CFF.
# Add glyph hint entry to plist file
# Save font plist file.
import ast
import logging
import os
import sys
import time
from collections import defaultdict, namedtuple
from .otfFont import CFFFontData
from .ufoFont import UFOFontData
from ._psautohint import error as PsAutoHintCError
from . import (get_font_format, hint_bez_glyph, hint_compatible_bez_glyphs,
FontParseError)
log = logging.getLogger(__name__)
class ACOptions(object):
def __init__(self):
self.inputPaths = []
self.outputPaths = []
self.reference_font = None
self.glyphList = []
self.nameAliases = {}
self.excludeGlyphList = False
self.hintAll = False
self.read_hints = False
self.allowChanges = False
self.noFlex = False
self.noHintSub = False
self.allow_no_blues = False
self.hCounterGlyphs = []
self.vCounterGlyphs = []
self.logOnly = False
self.printDefaultFDDict = False
self.printFDDictList = False
self.round_coords = True
self.writeToDefaultLayer = False
self.baseMaster = {}
self.font_format = None
self.report_zones = False
self.report_stems = False
self.report_all_stems = False
def __str__(self):
# used only when debugging.
import inspect
data = []
methodList = inspect.getmembers(self)
for fname, fvalue in methodList:
if fname[0] == "_":
continue
data.append(str((fname, fvalue)))
data.append("")
return os.linesep.join(data)
class ACHintError(Exception):
pass
class GlyphReports:
def __init__(self):
self.glyphs = {}
def addGlyphReport(self, glyphName, reportString):
hstems = {}
vstems = {}
hstems_pos = {}
vstems_pos = {}
char_zones = {}
stem_zone_stems = {}
self.glyphs[glyphName] = [hstems, vstems, char_zones, stem_zone_stems]
lines = reportString.splitlines()
for line in lines:
tokens = line.split()
key = tokens[0]
x = ast.literal_eval(tokens[3])
y = ast.literal_eval(tokens[5])
hintpos = "%s %s" % (x, y)
if key == "charZone":
char_zones[hintpos] = (x, y)
elif key == "stemZone":
stem_zone_stems[hintpos] = (x, y)
elif key == "HStem":
width = x - y
# avoid counting duplicates
if hintpos not in hstems_pos:
count = hstems.get(width, 0)
hstems[width] = count + 1
hstems_pos[hintpos] = width
elif key == "VStem":
width = x - y
# avoid counting duplicates
if hintpos not in vstems_pos:
count = vstems.get(width, 0)
vstems[width] = count + 1
vstems_pos[hintpos] = width
else:
raise FontParseError("Found unknown keyword %s in report file "
"for glyph %s." % (key, glyphName))
@staticmethod
def round_value(val):
if val >= 0:
return int(val + 0.5)
else:
return int(val - 0.5)
def parse_stem_dict(self, stem_dict):
"""
stem_dict: {45.5: 1, 47.0: 2}
"""
# key: stem width
# value: stem count
width_dict = defaultdict(int)
for width, count in stem_dict.items():
width = self.round_value(width)
width_dict[width] += count
return width_dict
def parse_zone_dicts(self, char_dict, stem_dict):
all_zones_dict = char_dict.copy()
all_zones_dict.update(stem_dict)
# key: zone height
# value: zone count
top_dict = defaultdict(int)
bot_dict = defaultdict(int)
for top, bot in all_zones_dict.values():
top = self.round_value(top)
top_dict[top] += 1
bot = self.round_value(bot)
bot_dict[bot] += 1
return top_dict, bot_dict
def assemble_rep_list(self, items_dict, count_dict):
# item 0: stem/zone count
# item 1: stem width/zone height
# item 2: list of glyph names
gorder = list(self.glyphs.keys())
rep_list = []
for item in items_dict:
gnames = list(items_dict[item])
# sort the names by the font's glyph order
if len(gnames) > 1:
gindexes = [gorder.index(gname) for gname in gnames]
gnames = [x for _, x in sorted(zip(gindexes, gnames))]
rep_list.append((count_dict[item], item, gnames))
return rep_list
def _get_lists(self):
"""
self.glyphs is a dictionary:
key: glyph name
value: list of 4 dictionaries
hstems
vstems
char_zones
stem_zone_stems
{
'A': [{45.5: 1, 47.0: 2}, {229.0: 1}, {}, {}],
'B': [{46.0: 2, 46.5: 2, 47.0: 1}, {94.0: 1, 100.0: 1}, {}, {}],
'C': [{50.0: 2}, {109.0: 1}, {}, {}],
'D': [{46.0: 1, 46.5: 2, 47.0: 1}, {95.0: 1, 109.0: 1}, {}, {}],
'E': [{46.5: 2, 47.0: 1, 50.0: 2, 177.0: 1, 178.0: 1},
{46.0: 1, 75.5: 2, 95.0: 1}, {}, {}],
'F': [{46.5: 2, 47.0: 1, 50.0: 1, 177.0: 1},
{46.0: 1, 60.0: 1, 75.5: 1, 95.0: 1}, {}, {}],
'G': [{43.0: 1, 44.5: 1, 50.0: 1}, {94.0: 1, 109.0: 1}, {}, {}]
}
"""
h_stem_items_dict = defaultdict(set)
h_stem_count_dict = defaultdict(int)
v_stem_items_dict = defaultdict(set)
v_stem_count_dict = defaultdict(int)
top_zone_items_dict = defaultdict(set)
top_zone_count_dict = defaultdict(int)
bot_zone_items_dict = defaultdict(set)
bot_zone_count_dict = defaultdict(int)
for gName, dicts in self.glyphs.items():
hStemDict, vStemDict, charZoneDict, stemZoneStemDict = dicts
glyph_h_stem_dict = self.parse_stem_dict(hStemDict)
glyph_v_stem_dict = self.parse_stem_dict(vStemDict)
for stem_width, stem_count in glyph_h_stem_dict.items():
h_stem_items_dict[stem_width].add(gName)
h_stem_count_dict[stem_width] += stem_count
for stem_width, stem_count in glyph_v_stem_dict.items():
v_stem_items_dict[stem_width].add(gName)
v_stem_count_dict[stem_width] += stem_count
glyph_top_zone_dict, glyph_bot_zone_dict = self.parse_zone_dicts(
charZoneDict, stemZoneStemDict)
for zone_height, zone_count in glyph_top_zone_dict.items():
top_zone_items_dict[zone_height].add(gName)
top_zone_count_dict[zone_height] += zone_count
for zone_height, zone_count in glyph_bot_zone_dict.items():
bot_zone_items_dict[zone_height].add(gName)
bot_zone_count_dict[zone_height] += zone_count
# item 0: stem count
# item 1: stem width
# item 2: list of glyph names
h_stem_list = self.assemble_rep_list(
h_stem_items_dict, h_stem_count_dict)
v_stem_list = self.assemble_rep_list(
v_stem_items_dict, v_stem_count_dict)
# item 0: zone count
# item 1: zone height
# item 2: list of glyph names
top_zone_list = self.assemble_rep_list(
top_zone_items_dict, top_zone_count_dict)
bot_zone_list = self.assemble_rep_list(
bot_zone_items_dict, bot_zone_count_dict)
return h_stem_list, v_stem_list, top_zone_list, bot_zone_list
@staticmethod
def _sort_count(t):
"""
sort by: count (1st item), value (2nd item), list of glyph names (3rd
item)
"""
return (-t[0], -t[1], t[2])
@staticmethod
def _sort_val(t):
"""
sort by: value (2nd item), count (1st item), list of glyph names (3rd
item)
"""
return (t[1], -t[0], t[2])
@staticmethod
def _sort_val_reversed(t):
"""
sort by: value (2nd item), count (1st item), list of glyph names (3rd
item)
"""
return (-t[1], -t[0], t[2])
def save(self, path):
h_stems, v_stems, top_zones, bot_zones = self._get_lists()
items = ([h_stems, self._sort_count],
[v_stems, self._sort_count],
[top_zones, self._sort_val_reversed],
[bot_zones, self._sort_val])
atime = time.asctime()
suffixes = (".hstm.txt", ".vstm.txt", ".top.txt", ".bot.txt")
titles = ("Horizontal Stem List for %s on %s\n" % (path, atime),
"Vertical Stem List for %s on %s\n" % (path, atime),
"Top Zone List for %s on %s\n" % (path, atime),
"Bottom Zone List for %s on %s\n" % (path, atime),
)
headers = (["count width glyphs\n"] * 2 +
["count height glyphs\n"] * 2)
for i, item in enumerate(items):
reps, sortFunc = item
if not reps:
continue
fName = f'{path}{suffixes[i]}'
title = titles[i]
header = headers[i]
with open(fName, "w") as fp:
fp.write(title)
fp.write(header)
reps.sort(key=sortFunc)
for rep in reps:
gnames = ' '.join(rep[2])
fp.write(f"{rep[0]:5} {rep[1]:5} [{gnames}]\n")
log.info("Wrote %s" % fName)
def getGlyphID(glyphTag, fontGlyphList):
if glyphTag in fontGlyphList:
return fontGlyphList.index(glyphTag)
return None
def getGlyphNames(glyphTag, fontGlyphList, fontFileName):
glyphNameList = []
rangeList = glyphTag.split("-")
prevGID = getGlyphID(rangeList[0], fontGlyphList)
if prevGID is None:
if len(rangeList) > 1:
log.warning("glyph ID <%s> in range %s from glyph selection "
"list option is not in font. <%s>.",
rangeList[0], glyphTag, fontFileName)
else:
log.warning("glyph ID <%s> from glyph selection list option "
"is not in font. <%s>.", rangeList[0], fontFileName)
return None
glyphNameList.append(fontGlyphList[prevGID])
for glyphTag2 in rangeList[1:]:
gid = getGlyphID(glyphTag2, fontGlyphList)
if gid is None:
log.warning("glyph ID <%s> in range %s from glyph selection "
"list option is not in font. <%s>.",
glyphTag2, glyphTag, fontFileName)
return None
for i in range(prevGID + 1, gid + 1):
glyphNameList.append(fontGlyphList[i])
prevGID = gid
return glyphNameList
def filterGlyphList(options, fontGlyphList, fontFileName):
# Return the list of glyphs which are in the intersection of the argument
# list and the glyphs in the font.
# Complain about glyphs in the argument list which are not in the font.
if not options.glyphList:
glyphList = fontGlyphList
else:
# expand ranges:
glyphList = []
for glyphTag in options.glyphList:
glyphNames = getGlyphNames(glyphTag, fontGlyphList, fontFileName)
if glyphNames is not None:
glyphList.extend(glyphNames)
if options.excludeGlyphList:
glyphList = [n for n in fontGlyphList if n not in glyphList]
return glyphList
fontInfoKeywordList = [
'FontName', # string
'OrigEmSqUnits',
'LanguageGroup',
'DominantV', # array
'DominantH', # array
'FlexOK', # string
'BlueFuzz',
'VCounterChars', # counter
'HCounterChars', # counter
'BaselineYCoord',
'BaselineOvershoot',
'CapHeight',
'CapOvershoot',
'LcHeight',
'LcOvershoot',
'AscenderHeight',
'AscenderOvershoot',
'FigHeight',
'FigOvershoot',
'Height5',
'Height5Overshoot',
'Height6',
'Height6Overshoot',
'DescenderOvershoot',
'DescenderHeight',
'SuperiorOvershoot',
'SuperiorBaseline',
'OrdinalOvershoot',
'OrdinalBaseline',
'Baseline5Overshoot',
'Baseline5',
'Baseline6Overshoot',
'Baseline6',
]
def openFile(path, options):
font_format = get_font_format(path)
if font_format is None:
raise FontParseError(f"{path} is not a supported font format")
if font_format == "UFO":
font = UFOFontData(path, options.logOnly, options.writeToDefaultLayer)
else:
font = CFFFontData(path, font_format)
return font
def get_glyph_list(options, font, path):
filename = os.path.basename(path)
# filter specified list, if any, with font list.
glyph_list = filterGlyphList(options, font.getGlyphList(), filename)
if not glyph_list:
raise FontParseError("Selected glyph list is empty for font <%s>." %
filename)
return glyph_list
def get_bez_glyphs(options, font, glyph_list):
glyphs = {}
for name in glyph_list:
# Convert to bez format
try:
bez_glyph = font.convertToBez(name, options.read_hints,
options.round_coords,
options.hintAll)
if bez_glyph is None or "mt" not in bez_glyph:
# skip empty glyphs.
continue
except KeyError:
# Source fonts may be sparse, e.g. be a subset of the
# reference font.
bez_glyph = None
glyphs[name] = GlyphEntry(bez_glyph, font)
total = len(glyph_list)
processed = len(glyphs)
if processed != total:
log.info("Skipped %s of %s glyphs.", total - processed, total)
return glyphs
def get_fontinfo_list(options, font, glyph_list, is_var=False):
# Check counter glyphs, if any.
counter_glyphs = options.hCounterGlyphs + options.vCounterGlyphs
if counter_glyphs:
missing = [n for n in counter_glyphs if n not in font.getGlyphList()]
if missing:
log.error("H/VCounterChars glyph named in fontinfo is "
"not in font: %s", missing)
# For Type1 name keyed fonts, psautohint supports defining
# different alignment zones for different glyphs by FontDict
# entries in the fontinfo file. This is NOT supported for CID
# or CFF2 fonts, as these have FDArrays, can can truly support
# different Font.Dict.Private Dicts for different groups of glyphs.
if font.hasFDArray():
return get_fontinfo_list_withFDArray(options, font, glyph_list, is_var)
else:
return get_fontinfo_list_withFontInfo(options, font, glyph_list)
def get_fontinfo_list_withFDArray(options, font, glyph_list, is_var=False):
lastFDIndex = None
fontinfo_list = {}
for name in glyph_list:
# get new fontinfo string if FDarray index has changed,
# as each FontDict has different alignment zones.
fdIndex = font.getfdIndex(name)
if not fdIndex == lastFDIndex:
lastFDIndex = fdIndex
fddict = font.getFontInfo(options.allow_no_blues,
options.noFlex,
options.vCounterGlyphs,
options.hCounterGlyphs,
fdIndex)
fontinfo = fddict.getFontInfo()
fontinfo_list[name] = (fontinfo, None, None)
return fontinfo_list
def get_fontinfo_list_withFontInfo(options, font, glyph_list):
# Build alignment zone string
if options.printDefaultFDDict:
print("Showing default FDDict Values:")
fddict = font.getFontInfo(options.allow_no_blues,
options.noFlex,
options.vCounterGlyphs,
options.hCounterGlyphs)
# Exit by printing default FDDict with all lines indented by one tab
sys.exit("\t" + "\n\t".join(fddict.getFontInfo().split("\n")))
fdglyphdict, fontDictList = font.getfdInfo(options.allow_no_blues,
options.noFlex,
options.vCounterGlyphs,
options.hCounterGlyphs,
glyph_list)
if options.printFDDictList:
# Print the user defined FontDicts, and exit.
if fdglyphdict:
print("Showing user-defined FontDict Values:\n")
for fi, fontDict in enumerate(fontDictList):
print(fontDict.DictName)
print(fontDict.getFontInfo())
gnameList = []
# item = [glyphName, [fdIndex, glyphListIndex]]
itemList = sorted(fdglyphdict.items(), key=lambda x: x[1][1])
for gName, entry in itemList:
if entry[0] == fi:
gnameList.append(gName)
print("%d glyphs:" % len(gnameList))
if len(gnameList) > 0:
gTxt = " ".join(gnameList)
else:
gTxt = "None"
print(gTxt + "\n")
else:
print("There are no user-defined FontDict Values.")
return
if fdglyphdict is None:
fddict = fontDictList[0]
fontinfo = fddict.getFontInfo()
else:
log.info("Using alternate FDDict global values from fontinfo "
"file for some glyphs.")
lastFDIndex = None
fontinfo_list = {}
for name in glyph_list:
if fdglyphdict is not None:
fdIndex = fdglyphdict[name][0]
if lastFDIndex != fdIndex:
lastFDIndex = fdIndex
fddict = fontDictList[fdIndex]
fontinfo = fddict.getFontInfo()
fontinfo_list[name] = (fontinfo, fddict, fdglyphdict)
return fontinfo_list
class MMHintInfo:
def __init__(self, glyph_name=None):
self.defined = False
self.h_order = None
self.v_order = None
self.hint_masks = []
self.glyph_name = glyph_name
# bad_hint_idxs contains the hint pair indices for all the bad
# hint pairs in any of the fonts for the current glyph.
self.bad_hint_idxs = set()
self.cntr_masks = []
self.new_cntr_masks = []
self.glyph_programs = None
@property
def needs_fix(self):
return len(self.bad_hint_idxs) > 0
def hint_glyph(options, name, bez_glyph, fontinfo):
try:
hinted = hint_bez_glyph(fontinfo, bez_glyph, options.allowChanges,
not options.noHintSub, options.round_coords,
options.report_zones, options.report_stems,
options.report_all_stems)
except PsAutoHintCError:
raise ACHintError("%s: Failure in processing outline data." %
options.nameAliases.get(name, name))
return hinted
def hint_compatible_glyphs(options, name, bez_glyphs, masters, fontinfo):
# This function is used by both
# hint_with_reference_font->hint_compatible_fonts
# and hint_vf_font.
try:
ref_master = masters[0]
# *************************************************************
# *********** DO NOT DELETE THIS COMMENTED-OUT CODE ***********
# If you're tempted to "clean up", work on solving
# https://github.com/adobe-type-tools/psautohint/issues/202
# first, then you can uncomment the "hint_compatible_bez_glyphs"
# line and remove this and other related comments, as well as
# the workaround block following "# else:", below. Thanks.
# *************************************************************
#
# if False:
# # This is disabled because it causes crashes on the CI servers
# # which are not reproducible locally. The branch below is a hack
# # to avoid the crash and should be dropped once the crash is
# # fixed, https://github.com/adobe-type-tools/psautohint/pull/131
# hinted = hint_compatible_bez_glyphs(
# fontinfo, bez_glyphs, masters)
# *** see https://github.com/adobe-type-tools/psautohint/issues/202 ***
# else:
hinted = []
hinted_ref_bez = hint_glyph(options, name, bez_glyphs[0], fontinfo)
for i, bez in enumerate(bez_glyphs[1:]):
if bez is None:
out = [hinted_ref_bez, None]
else:
in_bez = [hinted_ref_bez, bez]
in_masters = [ref_master, masters[i + 1]]
out = hint_compatible_bez_glyphs(fontinfo,
in_bez,
in_masters)
# out is [hinted_ref_bez, new_hinted_region_bez]
if i == 0:
hinted = out
else:
hinted.append(out[1])
except PsAutoHintCError:
raise ACHintError("%s: Failure in processing outline data." %
options.nameAliases.get(name, name))
return hinted
def get_glyph_reports(options, font, glyph_list, fontinfo_list):
reports = GlyphReports()
glyphs = get_bez_glyphs(options, font, glyph_list)
for name in glyphs:
if name == ".notdef":
continue
bez_glyph = glyphs[name][0]
fontinfo = fontinfo_list[name][0]
report = hint_glyph(options, name, bez_glyph, fontinfo)
reports.addGlyphReport(name, report.strip())
return reports
GlyphEntry = namedtuple("GlyphEntry", "bez_data,font")
def hint_font(options, font, glyph_list, fontinfo_list):
aliases = options.nameAliases
hinted = {}
glyphs = get_bez_glyphs(options, font, glyph_list)
for name in glyphs:
g_entry = glyphs[name]
fontinfo, fddict, fdglyphdict = fontinfo_list[name]
if fdglyphdict:
log.info("%s: Begin hinting (using fdDict %s).",
aliases.get(name, name), fddict.DictName)
else:
log.info("%s: Begin hinting.", aliases.get(name, name))
# Call auto-hint library on bez string.
new_bez_glyph = hint_glyph(options, name, g_entry.bez_data, fontinfo)
options.baseMaster[name] = new_bez_glyph
if not ("ry" in new_bez_glyph or "rb" in new_bez_glyph or
"rm" in new_bez_glyph or "rv" in new_bez_glyph):
log.info("%s: No hints added!", aliases.get(name, name))
continue
if options.logOnly:
continue
hinted[name] = GlyphEntry(new_bez_glyph, font)
return hinted
def hint_compatible_fonts(options, paths, glyphs,
fontinfo_list):
# glyphs is a list of dicts, one per font. Each dict is keyed by glyph name
# and references a tuple of (src bez file, font)
aliases = options.nameAliases
hinted_glyphs = set()
reference_font = None
for name in glyphs[0]:
fontinfo, _, _ = fontinfo_list[name]
log.info("%s: Begin hinting.", aliases.get(name, name))
masters = [os.path.basename(path) for path in paths]
bez_glyphs = [g[name].bez_data for g in glyphs]
new_bez_glyphs = hint_compatible_glyphs(options, name, bez_glyphs,
masters, fontinfo)
if options.logOnly:
continue
if reference_font is None:
fonts = [g[name].font for g in glyphs]
reference_font = fonts[0]
mm_hint_info = MMHintInfo()
for i, new_bez_glyph in enumerate(new_bez_glyphs):
if new_bez_glyph is not None:
g_entry = glyphs[i][name]
g_entry.font.updateFromBez(new_bez_glyph, name, mm_hint_info)
hinted_glyphs.add(name)
# Now check if we need to fix any hint lists.
if mm_hint_info.needs_fix:
reference_font.fix_glyph_hints(name, mm_hint_info,
is_reference_font=True)
for font in fonts[1:]:
font.fix_glyph_hints(name,
mm_hint_info,
is_reference_font=False)
return len(hinted_glyphs) > 0
def hint_vf_font(options, font_path, out_path):
font = openFile(font_path, options)
options.noFlex = True # work around for incompatibel flex args.
aliases = options.nameAliases
glyph_names = get_glyph_list(options, font, font_path)
log.info("Hinting font %s. Start time: %s.", font_path, time.asctime())
fontinfo_list = get_fontinfo_list(options, font, glyph_names, True)
hinted_glyphs = set()
for name in glyph_names:
fontinfo, _, _ = fontinfo_list[name]
log.info("%s: Begin hinting.", aliases.get(name, name))
bez_glyphs = font.get_vf_bez_glyphs(name)
num_masters = len(bez_glyphs)
masters = [f"Master-{i}" for i in range(num_masters)]
new_bez_glyphs = hint_compatible_glyphs(options, name, bez_glyphs,
masters, fontinfo)
if None in new_bez_glyphs:
log.info(f"Error while hinting glyph {name}.")
continue
if options.logOnly:
continue
hinted_glyphs.add(name)
# First, convert bez to fontTools T2 programs,
# and check if any hints conflict.
mm_hint_info = MMHintInfo()
for i, new_bez_glyph in enumerate(new_bez_glyphs):
if new_bez_glyph is not None:
font.updateFromBez(new_bez_glyph, name, mm_hint_info)
# Now check if we need to fix any hint lists.
if mm_hint_info.needs_fix:
font.fix_glyph_hints(name, mm_hint_info)
# Now merge the programs into a singel CFF2 charstring program
font.merge_hinted_glyphs(name)
if hinted_glyphs:
log.info(f"Saving font file {out_path} with new hints...")
font.save(out_path)
else:
log.info("No glyphs were hinted.")
font.close()
log.info("Done with font %s. End time: %s.", font_path, time.asctime())
def hint_with_reference_font(options, fonts, paths, outpaths):
# We are doing compatible, AKA multiple master, hinting.
log.info("Start time: %s.", time.asctime())
options.noFlex = True # work-around for mm-hinting
# Get the glyphs and font info of the reference font. We assume the
# fonts have the same glyph set, glyph dict and in general are
# compatible. If not bad things will happen.
glyph_names = get_glyph_list(options, fonts[0], paths[0])
fontinfo_list = get_fontinfo_list(options, fonts[0], glyph_names)
glyphs = []
for i, font in enumerate(fonts):
glyphs.append(get_bez_glyphs(options, font, glyph_names))
have_hinted_glyphs = hint_compatible_fonts(options, paths,
glyphs, fontinfo_list)
if have_hinted_glyphs:
log.info("Saving font files with new hints...")
for i, font in enumerate(fonts):
font.save(outpaths[i])
else:
log.info("No glyphs were hinted.")
font.close()
log.info("End time: %s.", time.asctime())
def hint_regular_fonts(options, fonts, paths, outpaths):
# Regular fonts, just iterate over the list and hint each one.
for i, font in enumerate(fonts):
path = paths[i]
outpath = outpaths[i]
glyph_names = get_glyph_list(options, font, path)
fontinfo_list = get_fontinfo_list(options, font, glyph_names)
log.info("Hinting font %s. Start time: %s.", path, time.asctime())
if options.report_zones or options.report_stems:
reports = get_glyph_reports(options, font, glyph_names,
fontinfo_list)
reports.save(outpath)
else:
hinted = hint_font(options, font, glyph_names, fontinfo_list)
if hinted:
log.info("Saving font file with new hints...")
for name in hinted:
g_entry = hinted[name]
font.updateFromBez(g_entry.bez_data, name)
font.save(outpath)
else:
log.info("No glyphs were hinted.")
font.close()
log.info("Done with font %s. End time: %s.", path, time.asctime())
def get_outpath(options, font_path, i):
if options.outputPaths is not None and i < len(options.outputPaths):
outpath = options.outputPaths[i]
else:
outpath = font_path
return outpath
def hintFiles(options):
fonts = []
paths = []
outpaths = []
# If there is a reference font, prepend it to font paths.
# It must be the first font in the list, code below assumes that.
if options.reference_font:
font = openFile(options.reference_font, options)
fonts.append(font)
paths.append(options.reference_font)
outpaths.append(options.reference_font)
if hasattr(font, 'ttFont'):
assert 'fvar' not in font.ttFont, ("Can't use a CFF2 VF font as a "
"default font in a set of MM "
"fonts.")
# Open the rest of the fonts and handle output paths.
for i, path in enumerate(options.inputPaths):
font = openFile(path, options)
out_path = get_outpath(options, path, i)
if hasattr(font, 'ttFont') and 'fvar' in font.ttFont:
assert not options.report_zones or options.report_stems
# Certainly not supported now, also I think it only makes sense
# to ask for zone reports for the source fonts for the VF font.
# You can't easily change blue values in a VF font.
hint_vf_font(options, path, out_path)
else:
fonts.append(font)
paths.append(path)
outpaths.append(out_path)
if fonts:
if fonts[0].isCID():
options.noFlex = True # Flex hinting in CJK fonts doed bad things.
# For CFF fonts, being a CID font is a good indicator of being CJK.
if options.reference_font:
hint_with_reference_font(options, fonts, paths, outpaths)
else:
hint_regular_fonts(options, fonts, paths, outpaths)
|
|
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
from oslo_serialization import jsonutils
import requests
from glance.tests import functional
TENANT1 = str(uuid.uuid4())
class TestMetadefObjects(functional.FunctionalTest):
def setUp(self):
super(TestMetadefObjects, self).setUp()
self.cleanup()
self.api_server.deployment_flavor = 'noauth'
self.start_servers(**self.__dict__.copy())
def _url(self, path):
return 'http://127.0.0.1:%d%s' % (self.api_port, path)
def _headers(self, custom_headers=None):
base_headers = {
'X-Identity-Status': 'Confirmed',
'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96',
'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e',
'X-Tenant-Id': TENANT1,
'X-Roles': 'admin',
}
base_headers.update(custom_headers or {})
return base_headers
def test_metadata_objects_lifecycle(self):
# Namespace should not exist
path = self._url('/v2/metadefs/namespaces/MyNamespace')
response = requests.get(path, headers=self._headers())
self.assertEqual(404, response.status_code)
# Create a namespace
path = self._url('/v2/metadefs/namespaces')
headers = self._headers({'content-type': 'application/json'})
namespace_name = 'MyNamespace'
data = jsonutils.dumps({
"namespace": namespace_name,
"display_name": "My User Friendly Namespace",
"description": "My description",
"visibility": "public",
"protected": False,
"owner": "The Test Owner"
}
)
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
# Metadata objects should not exist
path = self._url('/v2/metadefs/namespaces/MyNamespace/objects/object1')
response = requests.get(path, headers=self._headers())
self.assertEqual(404, response.status_code)
# Create a object
path = self._url('/v2/metadefs/namespaces/MyNamespace/objects')
headers = self._headers({'content-type': 'application/json'})
metadata_object_name = "object1"
data = jsonutils.dumps(
{
"name": metadata_object_name,
"description": "object1 description.",
"required": [
"property1"
],
"properties": {
"property1": {
"type": "integer",
"title": "property1",
"description": "property1 description",
"operators": ["<all-in>"],
"default": 100,
"minimum": 100,
"maximum": 30000369
},
"property2": {
"type": "string",
"title": "property2",
"description": "property2 description ",
"default": "value2",
"minLength": 2,
"maxLength": 50
}
}
}
)
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
# Get the metadata object created above
path = self._url('/v2/metadefs/namespaces/%s/objects/%s' %
(namespace_name, metadata_object_name))
response = requests.get(path,
headers=self._headers())
self.assertEqual(200, response.status_code)
metadata_object = jsonutils.loads(response.text)
self.assertEqual("object1", metadata_object['name'])
# Returned object should match the created object
metadata_object = jsonutils.loads(response.text)
checked_keys = set([
u'name',
u'description',
u'properties',
u'required',
u'self',
u'schema',
u'created_at',
u'updated_at'
])
self.assertEqual(set(metadata_object.keys()), checked_keys)
expected_metadata_object = {
"name": metadata_object_name,
"description": "object1 description.",
"required": [
"property1"
],
"properties": {
'property1': {
'type': 'integer',
"title": "property1",
'description': 'property1 description',
'operators': ['<all-in>'],
'default': 100,
'minimum': 100,
'maximum': 30000369
},
"property2": {
"type": "string",
"title": "property2",
"description": "property2 description ",
"default": "value2",
"minLength": 2,
"maxLength": 50
}
},
"self": "/v2/metadefs/namespaces/%("
"namespace)s/objects/%(object)s" %
{'namespace': namespace_name,
'object': metadata_object_name},
"schema": "v2/schemas/metadefs/object"
}
# Simple key values
checked_values = set([
u'name',
u'description',
])
for key, value in expected_metadata_object.items():
if(key in checked_values):
self.assertEqual(metadata_object[key], value, key)
# Complex key values - properties
for key, value in \
expected_metadata_object["properties"]['property2'].items():
self.assertEqual(
metadata_object["properties"]["property2"][key],
value, key
)
# The metadata_object should be mutable
path = self._url('/v2/metadefs/namespaces/%s/objects/%s' %
(namespace_name, metadata_object_name))
media_type = 'application/json'
headers = self._headers({'content-type': media_type})
metadata_object_name = "object1-UPDATED"
data = jsonutils.dumps(
{
"name": metadata_object_name,
"description": "desc-UPDATED",
"required": [
"property2"
],
"properties": {
'property1': {
'type': 'integer',
"title": "property1",
'description': 'p1 desc-UPDATED',
'default': 500,
'minimum': 500,
'maximum': 1369
},
"property2": {
"type": "string",
"title": "property2",
"description": "p2 desc-UPDATED",
'operators': ['<or>'],
"default": "value2-UPDATED",
"minLength": 5,
"maxLength": 150
}
}
}
)
response = requests.put(path, headers=headers, data=data)
self.assertEqual(200, response.status_code, response.text)
# Returned metadata_object should reflect the changes
metadata_object = jsonutils.loads(response.text)
self.assertEqual('object1-UPDATED', metadata_object['name'])
self.assertEqual('desc-UPDATED', metadata_object['description'])
self.assertEqual('property2', metadata_object['required'][0])
updated_property1 = metadata_object['properties']['property1']
updated_property2 = metadata_object['properties']['property2']
self.assertEqual('integer', updated_property1['type'])
self.assertEqual('p1 desc-UPDATED', updated_property1['description'])
self.assertEqual('500', updated_property1['default'])
self.assertEqual(500, updated_property1['minimum'])
self.assertEqual(1369, updated_property1['maximum'])
self.assertEqual(['<or>'], updated_property2['operators'])
self.assertEqual('string', updated_property2['type'])
self.assertEqual('p2 desc-UPDATED', updated_property2['description'])
self.assertEqual('value2-UPDATED', updated_property2['default'])
self.assertEqual(5, updated_property2['minLength'])
self.assertEqual(150, updated_property2['maxLength'])
# Updates should persist across requests
path = self._url('/v2/metadefs/namespaces/%s/objects/%s' %
(namespace_name, metadata_object_name))
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
self.assertEqual('object1-UPDATED', metadata_object['name'])
self.assertEqual('desc-UPDATED', metadata_object['description'])
self.assertEqual('property2', metadata_object['required'][0])
updated_property1 = metadata_object['properties']['property1']
updated_property2 = metadata_object['properties']['property2']
self.assertEqual('integer', updated_property1['type'])
self.assertEqual('p1 desc-UPDATED', updated_property1['description'])
self.assertEqual('500', updated_property1['default'])
self.assertEqual(500, updated_property1['minimum'])
self.assertEqual(1369, updated_property1['maximum'])
self.assertEqual(['<or>'], updated_property2['operators'])
self.assertEqual('string', updated_property2['type'])
self.assertEqual('p2 desc-UPDATED', updated_property2['description'])
self.assertEqual('value2-UPDATED', updated_property2['default'])
self.assertEqual(5, updated_property2['minLength'])
self.assertEqual(150, updated_property2['maxLength'])
# Deletion of metadata_object object1
path = self._url('/v2/metadefs/namespaces/%s/objects/%s' %
(namespace_name, metadata_object_name))
response = requests.delete(path, headers=self._headers())
self.assertEqual(204, response.status_code)
# metadata_object object1 should not exist
path = self._url('/v2/metadefs/namespaces/%s/objects/%s' %
(namespace_name, metadata_object_name))
response = requests.get(path, headers=self._headers())
self.assertEqual(404, response.status_code)
|
|
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions as lib_exc
from tempest.api.object_storage import base
from tempest import config
from tempest import test
CONF = config.CONF
class ObjectACLsNegativeTest(base.BaseObjectTest):
credentials = [['operator', CONF.object_storage.operator_role],
['operator_alt', CONF.object_storage.operator_role]]
@classmethod
def setup_credentials(cls):
super(ObjectACLsNegativeTest, cls).setup_credentials()
cls.os = cls.os_roles_operator
cls.os_operator = cls.os_roles_operator_alt
@classmethod
def resource_setup(cls):
super(ObjectACLsNegativeTest, cls).resource_setup()
cls.test_auth_data = cls.os_operator.auth_provider.auth_data
def setUp(self):
super(ObjectACLsNegativeTest, self).setUp()
self.container_name = data_utils.rand_name(name='TestContainer')
self.container_client.create_container(self.container_name)
def tearDown(self):
self.delete_containers([self.container_name])
super(ObjectACLsNegativeTest, self).tearDown()
@test.attr(type=['negative'])
@test.idempotent_id('af587587-0c24-4e15-9822-8352ce711013')
def test_write_object_without_using_creds(self):
# trying to create object with empty headers
# X-Auth-Token is not provided
object_name = data_utils.rand_name(name='Object')
self.object_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=None
)
self.assertRaises(lib_exc.Unauthorized,
self.object_client.create_object,
self.container_name, object_name, 'data', headers={})
@test.attr(type=['negative'])
@test.idempotent_id('af85af0b-a025-4e72-a90e-121babf55720')
def test_delete_object_without_using_creds(self):
# create object
object_name = data_utils.rand_name(name='Object')
resp, _ = self.object_client.create_object(self.container_name,
object_name, 'data')
# trying to delete object with empty headers
# X-Auth-Token is not provided
self.object_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=None
)
self.assertRaises(lib_exc.Unauthorized,
self.object_client.delete_object,
self.container_name, object_name)
@test.attr(type=['negative'])
@test.idempotent_id('63d84e37-55a6-42e2-9e5f-276e60e26a00')
def test_write_object_with_non_authorized_user(self):
# attempt to upload another file using non-authorized user
# User provided token is forbidden. ACL are not set
object_name = data_utils.rand_name(name='Object')
# trying to create object with non-authorized user
self.object_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=self.test_auth_data
)
self.assertRaises(lib_exc.Forbidden,
self.object_client.create_object,
self.container_name, object_name, 'data', headers={})
@test.attr(type=['negative'])
@test.idempotent_id('abf63359-be52-4feb-87dd-447689fc77fd')
def test_read_object_with_non_authorized_user(self):
# attempt to read object using non-authorized user
# User provided token is forbidden. ACL are not set
object_name = data_utils.rand_name(name='Object')
resp, _ = self.object_client.create_object(
self.container_name, object_name, 'data')
self.assertHeaders(resp, 'Object', 'PUT')
# trying to get object with non authorized user token
self.object_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=self.test_auth_data
)
self.assertRaises(lib_exc.Forbidden,
self.object_client.get_object,
self.container_name, object_name)
@test.attr(type=['negative'])
@test.idempotent_id('7343ac3d-cfed-4198-9bb0-00149741a492')
def test_delete_object_with_non_authorized_user(self):
# attempt to delete object using non-authorized user
# User provided token is forbidden. ACL are not set
object_name = data_utils.rand_name(name='Object')
resp, _ = self.object_client.create_object(
self.container_name, object_name, 'data')
self.assertHeaders(resp, 'Object', 'PUT')
# trying to delete object with non-authorized user token
self.object_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=self.test_auth_data
)
self.assertRaises(lib_exc.Forbidden,
self.object_client.delete_object,
self.container_name, object_name)
@test.attr(type=['negative'])
@test.idempotent_id('9ed01334-01e9-41ea-87ea-e6f465582823')
def test_read_object_without_rights(self):
# attempt to read object using non-authorized user
# update X-Container-Read metadata ACL
cont_headers = {'X-Container-Read': 'badtenant:baduser'}
resp_meta, body = self.container_client.update_container_metadata(
self.container_name, metadata=cont_headers,
metadata_prefix='')
self.assertHeaders(resp_meta, 'Container', 'POST')
# create object
object_name = data_utils.rand_name(name='Object')
resp, _ = self.object_client.create_object(self.container_name,
object_name, 'data')
self.assertHeaders(resp, 'Object', 'PUT')
# Trying to read the object without rights
self.object_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=self.test_auth_data
)
self.assertRaises(lib_exc.Forbidden,
self.object_client.get_object,
self.container_name, object_name)
@test.attr(type=['negative'])
@test.idempotent_id('a3a585a7-d8cf-4b65-a1a0-edc2b1204f85')
def test_write_object_without_rights(self):
# attempt to write object using non-authorized user
# update X-Container-Write metadata ACL
cont_headers = {'X-Container-Write': 'badtenant:baduser'}
resp_meta, body = self.container_client.update_container_metadata(
self.container_name, metadata=cont_headers,
metadata_prefix='')
self.assertHeaders(resp_meta, 'Container', 'POST')
# Trying to write the object without rights
self.object_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=self.test_auth_data
)
object_name = data_utils.rand_name(name='Object')
self.assertRaises(lib_exc.Forbidden,
self.object_client.create_object,
self.container_name,
object_name, 'data', headers={})
@test.attr(type=['negative'])
@test.idempotent_id('8ba512ad-aa6e-444e-b882-2906a0ea2052')
def test_write_object_without_write_rights(self):
# attempt to write object using non-authorized user
# update X-Container-Read and X-Container-Write metadata ACL
tenant_name = self.os_operator.credentials.tenant_name
username = self.os_operator.credentials.username
cont_headers = {'X-Container-Read':
tenant_name + ':' + username,
'X-Container-Write': ''}
resp_meta, body = self.container_client.update_container_metadata(
self.container_name, metadata=cont_headers,
metadata_prefix='')
self.assertHeaders(resp_meta, 'Container', 'POST')
# Trying to write the object without write rights
self.object_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=self.test_auth_data
)
object_name = data_utils.rand_name(name='Object')
self.assertRaises(lib_exc.Forbidden,
self.object_client.create_object,
self.container_name,
object_name, 'data', headers={})
@test.attr(type=['negative'])
@test.idempotent_id('b4e366f8-f185-47ab-b789-df4416f9ecdb')
def test_delete_object_without_write_rights(self):
# attempt to delete object using non-authorized user
# update X-Container-Read and X-Container-Write metadata ACL
tenant_name = self.os_operator.credentials.tenant_name
username = self.os_operator.credentials.username
cont_headers = {'X-Container-Read':
tenant_name + ':' + username,
'X-Container-Write': ''}
resp_meta, body = self.container_client.update_container_metadata(
self.container_name, metadata=cont_headers,
metadata_prefix='')
self.assertHeaders(resp_meta, 'Container', 'POST')
# create object
object_name = data_utils.rand_name(name='Object')
resp, _ = self.object_client.create_object(self.container_name,
object_name, 'data')
self.assertHeaders(resp, 'Object', 'PUT')
# Trying to delete the object without write rights
self.object_client.auth_provider.set_alt_auth_data(
request_part='headers',
auth_data=self.test_auth_data
)
self.assertRaises(lib_exc.Forbidden,
self.object_client.delete_object,
self.container_name,
object_name)
|
|
import numpy as np
from copy import copy
from bokeh.core.properties import field
from bokeh.io import curdoc
from bokeh.layouts import layout,column,row
from bokeh.models import (
ColumnDataSource, HoverTool, SingleIntervalTicker, Slider, Button, Label,
CategoricalColorMapper,
)
from bokeh.models.widgets import Panel, Tabs
from bokeh.models import ColumnDataSource, CustomJS, Rect,Spacer
from bokeh.models import HoverTool,TapTool,FixedTicker,Circle
from bokeh.models import BoxSelectTool, LassoSelectTool
from bokeh.models.mappers import LinearColorMapper
from bokeh.plotting import figure
from bokeh.palettes import Spectral6,Inferno256,Viridis256,Greys256,Magma256,Plasma256
from bokeh.models import LogColorMapper, LogTicker, ColorBar,BasicTicker,LinearColorMapper
def create_plot(data,xcol,ycol,ccol,plt_name):
xval=copy(data[:,xcol])
n=len(xval)
yval=copy(data[:,ycol])
cval=copy(data[:,ccol])
colors,colorbar=set_colors(cval,plt_name)
datasrc = ColumnDataSource(
data=dict(
x=xval,
y=yval,
z=cval,
colors=colors
)
)
# color_mapper = LinearColorMapper(palette="Viridis256", low=min(cval), high=max(cval))
# colorbar = ColorBar(color_mapper=color_mapper, ticker=BasicTicker(),label_standoff=12, border_line_color=None, location=(0,0))
s2 = ColumnDataSource(data=dict(xs=[], ys=[]))
source = ColumnDataSource({'xs': [], 'ys': [], 'wd': [], 'ht': []})
jscode="""
var data = source.get('data');
var start = range.get('start');
var end = range.get('end');
data['%s'] = [start + (end - start) / 2];
data['%s'] = [end - start];
source.trigger('change');
"""
initial_circle = Circle(x='x', y='y')
selected_circle = Circle(fill_alpha=1, fill_color="firebrick", size=20)
nonselected_circle = Circle(fill_alpha=0.4,fill_color='colors',line_color=None)
title=" "
TOOLS="crosshair,pan,wheel_zoom,box_zoom,reset,box_select,lasso_select,tap,save"
#TOOLS="pan,wheel_zoom,box_select,lasso_select,reset"
# The main Plot of tab 1
p1 = figure(title=title,tools=TOOLS,active_scroll="wheel_zoom",height=500,width=500,toolbar_location="above")
# p1.toolbar.active_scroll=None
p1.circle('x','y',source=datasrc,size=5,fill_color='colors', fill_alpha=0.6, line_color=None,name="mycircle")
p1.add_layout(colorbar, 'left')
p1.x_range.callback = CustomJS(
args=dict(source=source, range=p1.x_range), code=jscode % ('xs', 'wd'))
p1.y_range.callback = CustomJS(
args=dict(source=source, range=p1.y_range), code=jscode % ('ys', 'ht'))
renderer = p1.select(name="mycircle")
renderer.selection_glyph = selected_circle
renderer.nonselection_glyph = nonselected_circle
p1.xgrid.grid_line_color = None
p1.ygrid.grid_line_color = None
p1.xaxis[0].ticker=FixedTicker(ticks=[])
p1.yaxis[0].ticker=FixedTicker(ticks=[])
p1.outline_line_width = 0
p1.outline_line_color = "white"
p1.xaxis.axis_line_width = 0
p1.xaxis.axis_line_color = "white"
p1.yaxis.axis_line_width = 0
p1.yaxis.axis_line_color = "white"
# The overview plot
p2 = figure(tools='',height=250,width=250)
p2.xgrid.grid_line_color = None
p2.ygrid.grid_line_color = None
p2.xaxis[0].ticker=FixedTicker(ticks=[])
p2.yaxis[0].ticker=FixedTicker(ticks=[])
p2.circle('x','y',source=datasrc,size=5,fill_color=colors, fill_alpha=0.8, line_color=None,name='mycircle')
renderer = p2.select(name="mycircle")
renderer.selection_glyph = selected_circle
renderer.nonselection_glyph = nonselected_circle
p2.outline_line_width = 0
p2.outline_line_alpha = 0.3
p2.outline_line_color = "white"
p2.xaxis.axis_line_width = 0
p2.xaxis.axis_line_color = "white"
p2.yaxis.axis_line_width = 0
p2.yaxis.axis_line_color = "white"
rect = Rect(x='xs', y='ys', width='wd', height='ht', fill_alpha=0.1,
line_color='black', fill_color='black')
p2.add_glyph(source, rect)
callback=CustomJS(code="""
var inds = cb_obj.get('selected')['1d'].indices[0];
var str = "" + inds;
var pad = "0000";
var indx = pad.substring(0, pad.length - str.length) + str;
var settings= "connect 1.0 1.2 (carbon) (hydrogen) SINGLE CREATE ; connect 1.0 1.2 (nitrogen) (hydrogen) SINGLE CREATE ; connect 1.0 4.2 (carbon) (nitrogen) SINGLE CREATE ; connect 3.0 5 (phosphorus) (iodine) SINGLE CREATE ; set perspectiveDepth OFF "
var file= "javascript:Jmol.script(jmolApplet0," + "'load plot-server/static/xyz/set."+ indx+ ".xyz ;" + settings + "')" ;
location.href=file;
""")
# def slider_callback2(src=datasrc,source=s2, window=None):
# data = source.data
# xval=src.data['x']
# yval=src.data['y']
# ind = cb_obj.value # NOQA
# data['xs']=[xval[ind]]
# data['ys']=[yval[ind]]
# source.trigger('change');
taptool = p1.select(type=TapTool)
taptool.callback = callback
# taptool.js_on_change('value', callback=CustomJS.from_py_func(slider_callback2))
slider_callback=CustomJS( code="""
var inds = cb_obj.value;
var str = "" + inds;
var pad = "0000";
var indx = pad.substring(0, pad.length - str.length) + str;
var settings= "connect 1.0 1.2 (carbon) (hydrogen) SINGLE CREATE ; connect 1.0 1.2 (nitrogen) (hydrogen) SINGLE CREATE ; connect 1.0 4.2 (carbon) (nitrogen) SINGLE CREATE ; connect 3.0 5 (phosphorus) (iodine) SINGLE CREATE ; set perspectiveDepth OFF "
var file= "javascript:Jmol.script(jmolApplet1," + "'load plot-server/static/xyz/set."+ indx+ ".xyz ;" + settings + "')" ;
location.href=file;
""")
# slider = Slider(start=0, end=2600, value=0, step=1, title="selected", callback=CustomJS.from_py_func(slider_callback2))
slider = Slider(start=0, end=n-1, value=0, step=1, title="Frame:", width=800)
# slider.js_on_change('value', CustomJS.from_py_func(slider_callback2))
slider.js_on_change('value', slider_callback)
# draw selected point on slider change
# p1.circle('xs', 'ys', source=s2, fill_alpha=1, fill_color="firebrick", size=10,name="mycircle")
return p1,p2,slider
# return column(row(p1,p2),row(slider,Spacer(width=20, height=30)))
def cosmo_colors(cval):
color_palatte=cosmo_palatte()
colormap=RGBAColorMapper(min(cval),max(cval),color_palatte)
rgb=colormap.color(cval)
colors = ["#%02x%02x%02x" % (c[0],c[1],c[2]) for c in rgb]
return colors
def set_colors(cval,plt_name='cosmo'):
if (plt_name == 'cosmo'):
plt=cosmo_palatte()
# colormap1=RGBAColorMapper(min(cval),max(cval),plt1)
# rgb1=colormap1.color(cval)
# plt = ["#%02x%02x%02x" % (c[0],c[1],c[2]) for c in rgb1]
if (plt_name == 'Spectral6'): plt=Spectral6
if (plt_name == 'Inferno256'): plt=Inferno256
if (plt_name == 'Viridis256'): plt=Viridis256
if (plt_name == 'Greys256'): plt=Greys256
if (plt_name == 'Magma256'): plt=Magma256
if (plt_name == 'Plasma256'): plt=Plasma256
colormap=RGBAColorMapper(min(cval),max(cval),plt)
rgb=colormap.color(cval)
colors = ["#%02x%02x%02x" % (c[0],c[1],c[2]) for c in rgb]
color_mapper=LinearColorMapper(palette=plt, low=min(cval), high=max(cval))
colorbar = ColorBar(color_mapper=color_mapper, ticker=BasicTicker(),label_standoff=12, border_line_color=None, location=(0,0))
return colors,colorbar
def hex_to_rgb(value):
"""Given a color in hex format, return it in RGB."""
values = value.lstrip('#')
lv = len(values)
rgb = list(int(values[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))
return rgb
class RGBAColorMapper(object):
"""Maps floating point values to rgb values over a palette"""
def __init__(self, low, high, palette):
self.range = np.linspace(low, high, len(palette))
# self.r, self.g, self.b = np.array(zip(*[hex_to_rgb(i) for i in palette])) #python 2.7
self.r, self.g, self.b = np.array(list(zip(*[hex_to_rgb(i) for i in palette])))
def color(self, data):
"""Maps your data values to the pallette with linear interpolation"""
red = np.interp(data, self.range, self.r)
blue = np.interp(data, self.range, self.b)
green = np.interp(data, self.range, self.g)
# Style plot to return a grey color when value is 'nan'
red[np.isnan(red)] = 240
blue[np.isnan(blue)] = 240
green[np.isnan(green)] = 240
colors = np.dstack([red.astype(np.uint8),
green.astype(np.uint8),
blue.astype(np.uint8),
np.full_like(data, 255, dtype=np.uint8)])
#return colors.view(dtype=np.uint32).reshape(data.shape)
c=[]
for i in range(len(data)):
c.append([red[i],green[i],blue[i]])
return c
def cosmo_palatte(self):
color_palatte=["#ddf0fe", "#dcf0fe", "#dcf0fe", "#dbf0fe", "#dbf0fe", "#dbeffe", \
"#daeffe", "#daeffe", "#d9effe", "#d9effe", "#d8eefe", "#d8eefe", \
"#d7eefe", "#d7eefe", "#d6edfe", "#d6edfe", "#d5edfe", "#d5edfe", \
"#d4edfe", "#d4ecfe", "#d3ecfe", "#d3ecfe", "#d2ecfe", "#d2ebfe", \
"#d1ebfe", "#d1ebfe", "#d0ebfe", "#d0eafe", "#d0eafe", "#cfeafe", \
"#cfeafe", "#cee9fe", "#cee9fd", "#cde9fd", "#cce9fd", "#cce9fd", \
"#cbe8fd", "#cbe8fd", "#cae8fd", "#cae8fd", "#c9e7fd", "#c9e7fd", \
"#c8e7fd", "#c8e7fd", "#c7e6fd", "#c7e6fd", "#c6e6fd", "#c6e6fd", \
"#c5e5fd", "#c5e5fd", "#c4e5fd", "#c4e5fd", "#c3e4fd", "#c3e4fd", \
"#c2e4fd", "#c2e3fd", "#c1e3fd", "#c0e3fd", "#c0e3fd", "#bfe2fd", \
"#bfe2fd", "#bee2fd", "#bee2fd", "#bde1fd", "#bde1fd", "#bce1fd", \
"#bce1fd", "#bbe0fd", "#bae0fd", "#bae0fd", "#b9dffd", "#b9dffd", \
"#b8dffd", "#b8dffd", "#b7defd", "#b7defd", "#b6defd", "#b5defd", \
"#b5ddfc", "#b4ddfc", "#b4ddfc", "#b3dcfc", "#b3dcfc", "#b2dcfc", \
"#b2dcfc", "#b1dbfc", "#b0dbfc", "#b0dbfc", "#afdbfc", "#afdafc", \
"#aedafc", "#aedafc", "#add9fc", "#acd9fc", "#acd9fc", "#abd9fc", \
"#abd8fc", "#aad8fc", "#a9d8fc", "#a9d7fc", "#a8d7fc", "#a8d7fc", \
"#a7d7fc", "#a7d6fc", "#a6d6fc", "#a5d6fc", "#a5d5fc", "#a4d5fc", \
"#a4d5fc", "#a3d4fc", "#a2d4fc", "#a2d4fc", "#a1d4fc", "#a1d3fc", \
"#a0d3fc", "#9fd3fc", "#9fd2fc", "#9ed2fc", "#9ed2fb", "#9dd2fb", \
"#9cd1fb", "#9cd1fb", "#9bd1fb", "#9bd0fb", "#9ad0fb", "#99d0fb", \
"#99cffb", "#98cffb", "#98cffb", "#97cffb", "#96cefb", "#96cefb", \
"#95cefb", "#95cdfb", "#94cdfb", "#93cdfb", "#93ccfb", "#92ccfb", \
"#92ccfb", "#91cbfb", "#90cbfb", "#90cbfb", "#8fcbfb", "#8fcafb", \
"#8ecafb", "#8dcafb", "#8dc9fb", "#8cc9fb", "#8bc9fb", "#8bc8fb", \
"#8ac8fb", "#8ac8fb", "#89c7fb", "#88c7fb", "#88c7fa", "#88c6fa", \
"#87c6fa", "#87c6fa", "#87c5fa", "#86c5fa", "#86c5fa", "#86c4fa", \
"#85c4fa", "#85c4fa", "#85c3f9", "#84c3f9", "#84c2f9", "#84c2f9", \
"#83c2f9", "#83c1f9", "#83c1f9", "#82c1f9", "#82c0f9", "#82c0f8", \
"#82bff8", "#81bff8", "#81bff8", "#81bef8", "#80bef8", "#80bef8", \
"#80bdf8", "#7fbdf8", "#7fbdf7", "#7fbcf7", "#7ebcf7", "#7ebbf7", \
"#7ebbf7", "#7dbbf7", "#7dbaf7", "#7dbaf7", "#7dbaf7", "#7cb9f6", \
"#7cb9f6", "#7cb8f6", "#7bb8f6", "#7bb8f6", "#7bb7f6", "#7bb7f6", \
"#7ab6f6", "#7ab6f5", "#7ab6f5", "#79b5f5", "#79b5f5", "#79b5f5", \
"#78b4f5", "#78b4f5", "#78b3f5", "#78b3f5", "#77b3f4", "#77b2f4", \
"#77b2f4", "#76b2f4", "#76b1f4", "#76b1f4", "#76b0f4", "#75b0f4", \
"#75b0f3", "#75aff3", "#74aff3", "#74aef3", "#74aef3", "#74aef3", \
"#73adf3", "#73adf3", "#73adf2", "#73acf2", "#72acf2", "#72abf2", \
"#72abf2", "#71abf2", "#71aaf2", "#71aaf2", "#71a9f1", "#70a9f1", \
"#70a9f1", "#70a8f1", "#70a8f1", "#6fa7f1", "#6fa7f1", "#6fa7f1", \
"#6fa6f0", "#6ea6f0", "#6ea5f0", "#6ea5f0", "#6ea5f0", "#6da4f0", \
"#6da4f0", "#6da4f0", "#6da3ef", "#6ca3ef", "#6ca2ef", "#6ca2ef", \
"#6ca2ef", "#6ba1ef", "#6ba1ef", "#6ba0ee", "#6ba0ee", "#6aa0ee", \
"#6a9fee", "#6a9fee", "#6a9eee", "#699eee", "#699eee", "#699ded", \
"#699ded", "#689ced", "#689ced", "#689ced", "#689bed", "#679bed", \
"#679aec", "#679aec", "#679aec", "#6699ec", "#6699ec", "#6698ec", \
"#6698ec", "#6598ec", "#6597eb", "#6597eb", "#6596eb", "#6596eb", \
"#6496eb", "#6495eb", "#6495eb", "#6494ea", "#6394ea", "#6394ea", \
"#6393ea", "#6393ea", "#6392ea", "#6292ea", "#6292e9", "#6291e9", \
"#6291e9", "#6191e9", "#6190e9", "#6190e9", "#618fe9", "#618fe8", \
"#608fe8", "#608ee8", "#608ee8", "#608de8", "#5f8de8", "#5f8de8", \
"#5f8ce8", "#5f8ce7", "#5f8be7", "#5e8be7", "#5e8be7", "#5e8ae7", \
"#5e8ae7", "#5e89e7", "#5d89e6", "#5d89e6", "#5d88e6", "#5d88e6", \
"#5d87e6", "#5c87e6", "#5c87e6", "#5c86e5", "#5c86e5", "#5c85e5", \
"#5b85e5", "#5b85e5", "#5b84e5", "#5b84e5", "#5b83e4", "#5a83e4", \
"#5a83e4", "#5a82e4", "#5a82e4", "#5a81e4", "#5981e4", "#5981e3", \
"#5980e3", "#5980e3", "#597fe3", "#587fe3", "#587fe3", "#587ee3", \
"#587ee2", "#587de2", "#577de2", "#577de2", "#577ce2", "#577ce2", \
"#577be2", "#577be1", "#567be1", "#567ae1", "#567ae1", "#5679e1", \
"#5679e1", "#5579e1", "#5578e0", "#5578e0", "#5577e0", "#5577e0", \
"#5577e0", "#5476e0", "#5476df", "#5475df", "#5475df", "#5475df", \
"#5474df", "#5374df", "#5373df", "#5373de", "#5373de", "#5372de", \
"#5272de", "#5271de", "#5271de", "#5271de", "#5270dd", "#5270dd", \
"#5170dd", "#516fdd", "#516fdd", "#516edd", "#516edd", "#516edc", \
"#506ddc", "#506ddc", "#506cdc", "#506cdc", "#506cdc", "#506bdc", \
"#506bdb", "#4f6adb", "#4f6adb", "#4f6adb", "#4f69db", "#4f69db", \
"#4f68da", "#4e68da", "#4e68da", "#4e67da", "#4e67da", "#4e67da", \
"#4e66da", "#4d66d9", "#4d65d9", "#4d65d9", "#4d65d9", "#4d64d9", \
"#4d64d9", "#4d63d9", "#4c63d8", "#4c63d8", "#4c62d8", "#4c62d8", \
"#4c61d8", "#4c61d8", "#4c61d7", "#4c60d7", "#4c60d7", "#4c60d7", \
"#4c5fd6", "#4c5fd6", "#4b5fd6", "#4b5ed6", "#4b5ed6", "#4b5ed5", \
"#4b5ed5", "#4b5dd5", "#4b5dd5", "#4b5dd4", "#4b5cd4", "#4b5cd4", \
"#4b5cd4", "#4b5bd3", "#4b5bd3", "#4b5bd3", "#4b5ad3", "#4b5ad3", \
"#4b5ad2", "#4b59d2", "#4b59d2", "#4b59d2", "#4b58d1", "#4b58d1", \
"#4b58d1", "#4b57d1", "#4b57d0", "#4b57d0", "#4b56d0", "#4b56d0", \
"#4b56d0", "#4b55cf", "#4b55cf", "#4b55cf", "#4b54cf", "#4b54ce", \
"#4b54ce", "#4b54ce", "#4a53ce", "#4a53cd", "#4a53cd", "#4a52cd", \
"#4a52cd", "#4a52cd", "#4a51cc", "#4a51cc", "#4a51cc", "#4a50cc", \
"#4a50cb", "#4a50cb", "#4a4fcb", "#4a4fcb", "#4a4fca", "#4a4eca", \
"#4a4eca", "#4a4eca", "#4a4ec9", "#4a4dc9", "#4a4dc9", "#4a4dc9", \
"#4a4cc9", "#4a4cc8", "#4a4cc8", "#4a4bc8", "#4a4bc8", "#4a4bc7", \
"#4a4ac7", "#4a4ac7", "#4a4ac7", "#4a4ac6", "#4a49c6", "#4a49c6", \
"#4a49c6", "#4a48c5", "#4a48c5", "#4a48c5", "#4a47c5", "#4a47c5", \
"#4a47c4", "#4a46c4", "#4a46c4", "#4a46c4", "#4a46c3", "#4a45c3", \
"#4a45c3", "#4a45c3", "#4a44c2", "#4a44c2", "#4a44c2", "#4a43c2", \
"#4a43c1", "#4a43c1", "#4a43c1", "#4a42c1", "#4a42c0", "#4a42c0", \
"#4a41c0", "#4a41c0", "#4a41c0", "#4a40bf", "#4a40bf", "#4b40bf", \
"#4b40bf", "#4b3fbe", "#4b3fbe", "#4b3fbe", "#4b3ebe", "#4b3ebd", \
"#4b3ebd", "#4b3ebd", "#4b3dbd", "#4b3dbc", "#4b3dbc", "#4b3cbc", \
"#4b3cbc", "#4b3cbb", "#4b3bbb", "#4b3bbb", "#4b3bbb", "#4b3bba", \
"#4b3aba", "#4b3aba", "#4b3aba", "#4b39ba", "#4b39b9", "#4b39b9", \
"#4b39b9", "#4b38b9", "#4b38b8", "#4b38b8", "#4b37b8", "#4b37b8", \
"#4b37b7", "#4b37b7", "#4b36b7", "#4b36b7", "#4b36b6", "#4b35b6", \
"#4b35b6", "#4c35b6", "#4c35b5", "#4c34b5", "#4c34b5", "#4c34b5", \
"#4c33b4", "#4c33b4", "#4c33b4", "#4c33b4", "#4c32b3", "#4c32b3", \
"#4c32b3", "#4c32b3", "#4c31b3", "#4c31b2", "#4c31b2", "#4c30b2", \
"#4c30b2", "#4c30b1", "#4c30b1", "#4c2fb1", "#4c2fb1", "#4c2fb0", \
"#4d2eb0", "#4d2eb0", "#4d2eb0", "#4d2eaf", "#4d2daf", "#4d2daf", \
"#4d2daf", "#4d2dae", "#4d2cae", "#4d2cae", "#4d2cae", "#4d2cad", \
"#4d2bad", "#4d2bad", "#4d2bad", "#4d2aac", "#4d2aac", "#4d2aac", \
"#4d2aac", "#4e29ab", "#4e29ab", "#4e29ab", "#4e29ab", "#4e28aa", \
"#4e28aa", "#4e28aa", "#4e28aa", "#4e27a9", "#4e27a9", "#4e27a9", \
"#4e27a9", "#4e26a8", "#4e26a8", "#4e26a8", "#4f26a8", "#4f25a7", \
"#4f25a7", "#4f25a7", "#4f25a7", "#4f24a6", "#4f24a6", "#4f24a6", \
"#4f24a6", "#4f23a5", "#4f23a5", "#4f23a5", "#4f23a5", "#4f22a4", \
"#5022a4", "#5022a4", "#5022a4", "#5021a3", "#5021a3", "#5021a3", \
"#5021a3", "#5020a2", "#5020a2", "#5020a2", "#5020a1", "#501fa1", \
"#511fa1", "#511fa1", "#511fa0", "#511ea0", "#511ea0", "#511ea0", \
"#511e9f", "#511d9f", "#511d9f", "#511d9f", "#511d9e", "#521d9e", \
"#521c9e", "#521c9e", "#521c9d", "#521c9d", "#521b9d", "#521b9d", \
"#521b9c", "#521b9c", "#521a9c", "#531a9b", "#531a9b", "#531a9b", \
"#531a9b", "#53199a", "#53199a", "#53199a", "#53199a", "#531999", \
"#531899", "#541899", "#541899", "#541898", "#541798", "#541798", \
"#541797", "#541797", "#541797", "#551697", "#551696", "#551696", \
"#551696", "#551696", "#551595", "#551595", "#551595", "#551594", \
"#551594", "#551594", "#551593", "#551593", "#551593", "#551592", \
"#551592", "#551592", "#551591", "#551591", "#551591", "#551590", \
"#551590", "#551590", "#55158f", "#55158f", "#55158f", "#55158e", \
"#55158e", "#55158e", "#55158d", "#55158d", "#54158c", "#54158c", \
"#54158c", "#54158b", "#54158b", "#54158b", "#54158a", "#54158a", \
"#54158a", "#541589", "#541589", "#541589", "#541588", "#541588", \
"#541588", "#541587", "#541587", "#541586", "#541586", "#541586", \
"#541585", "#541585", "#541585", "#541584", "#541584", "#541584", \
"#541583", "#541583", "#541583", "#551582", "#551582", "#551581", \
"#551581", "#551581", "#551580", "#551580", "#551580", "#55157f", \
"#55167f", "#55167e", "#55167e", "#55167e", "#55167d", "#55167d", \
"#55167d", "#55167c", "#55167c", "#55167b", "#55167b", "#55167b", \
"#56167a", "#56167a", "#56167a", "#561679", "#561679", "#561678", \
"#561778", "#561778", "#561777", "#561777", "#561777", "#561776", \
"#571776", "#571775", "#571775", "#571775", "#571774", "#571774", \
"#571773", "#571873", "#571873", "#581872", "#581872", "#581871", \
"#581871", "#581871", "#581870", "#581870", "#59186f", "#59196f", \
"#59196f", "#59196e", "#59196e", "#59196d", "#5a196d", "#5a196d", \
"#5a196c", "#5a196c", "#5a1a6b", "#5a1a6b", "#5b1a6b", "#5b1a6a", \
"#5b1a6a", "#5b1a69", "#5b1a69", "#5c1b68", "#5c1b68", "#5c1b68", \
"#5c1b67", "#5c1b67", "#5d1b66", "#5d1b66", "#5d1c66", "#5d1c65", \
"#5d1c65", "#5e1c64", "#5e1c64", "#5e1c63", "#5e1d63", "#5f1d63", \
"#5f1d62", "#5f1d62", "#5f1d61", "#601d61", "#601e60", "#601e60", \
"#611e5f", "#611e5f", "#611e5f", "#611f5e", "#621f5e", "#621f5d", \
"#621f5d", "#631f5c", "#63205c", "#63205b", "#64205b", "#64205b", \
"#64205a", "#65215a", "#652159", "#652159", "#662158", "#662258", \
"#662257", "#672257", "#672256", "#682356", "#682356", "#682355", \
"#692355", "#692454", "#6a2454", "#6a2453", "#6a2453", "#6b2552", \
"#6b2552", "#6c2551", "#6c2651", "#6d2650", "#6d2650", "#6d264f", \
"#6e274f", "#6e274e", "#6f274e", "#6f284d", "#70284d", "#70284c", \
"#71294c", "#71294c", "#72294b", "#72294b", "#732a4a", "#732a4a", \
"#742b49", "#742b49", "#752b48", "#762c48", "#762c47", "#772c47", \
"#772d46", "#782d46", "#782d45", "#792e45", "#7a2e44", "#7a2f43", \
"#7b2f43", "#7b2f42", "#7c3042", "#7d3041", "#7d3041", "#7e3140", \
"#7f3140", "#7f323f", "#80323f", "#81333e", "#81333e", "#82333d", \
"#83343d", "#84343c", "#84353c", "#85353b", "#86363b", "#86363a", \
"#87373a", "#883739", "#893838", "#8a3838", "#8a3937", "#8b3937", \
"#8c3a36", "#8d3a36", "#8e3b35", "#8e3b35", "#8f3c34", "#903c34", \
"#913d33", "#923d32", "#933e32", "#943e31", "#943f31", "#953f30", \
"#964030", "#97412f", "#98412e", "#99422e", "#9a422d", "#9b432d", \
"#9c442c", "#9d442c", "#9e452b", "#9f452a", "#a0462a", "#a14729", \
"#a24729", "#a34828", "#a44928", "#a54927", "#a64a26", "#a74b26", \
"#a94b25", "#aa4c25", "#ab4d24", "#ac4d23", "#ad4e23", "#ae4f22", \
"#af4f22", "#b15021", "#b25120", "#b35220", "#b4521f", "#b5531f", \
"#b7541e", "#b8551d", "#b9551d", "#bb561c", "#bc571c", "#bd581b", \
"#be591a", "#c0591a", "#c15a19", "#c25b18", "#c45c18", "#c55d17", \
"#c75e17", "#c85e16", "#c95f15", "#cb6015", "#cc6114", "#ce6213", \
"#cf6313", "#d16412", "#d26511", "#d46611", "#d56610", "#d767f", \
"#d968f", "#da69e", "#dc6ae", "#dd6bd", "#df6cc", "#e16dc", "#e26eb", \
"#e46fa", "#e670a", "#e7719", "#e9728", "#eb738", "#ed747", "#ee756", \
"#f0776", "#f2785", "#f4794", "#f67a3", "#f77b3", "#f97c2", "#fb7d1", \
"#fd7e1", "#ff800"]
color_palatte.reverse()
return color_palatte
|
|
import collections
import datetime
import dateutil.parser
import dateutil.relativedelta
import re
class _Enum(tuple):
"""A simple way to make enum types."""
__getattr__ = tuple.index
# 'new' only makes sense for recurring actions. It represents a recurring
# action which hasn't been done yet.
DateStates = _Enum(['invisible', 'ready', 'new', 'due', 'late'])
Actions = _Enum(['MarkDONE', 'UpdateLASTDONE', 'DefaultCheckoff'])
def PreviousTime(date_and_time, time_string=None, due=True):
"""The last datetime before 'date_and_time' that the time was 'time'.
Args:
date_and_time: A datetime.datetime object.
time_string: A string representing a time, in HH:MM format.
due: Whether this is for a due date (as opposed to a visible date).
Returns:
A datetime.datetime object; the last datetime before 'date_and_time'
whose time was 'time_string'.
"""
try:
time = datetime.datetime.strptime(time_string, '%H:%M').time()
except:
time = datetime.time()
new_datetime = datetime.datetime.combine(date_and_time.date(), time)
if new_datetime > date_and_time:
new_datetime -= datetime.timedelta(days=1)
assert new_datetime <= date_and_time
return new_datetime
def PreviousWeekDay(date_and_time, weekday_string=None, due=True):
"""The last datetime before 'date_and_time' on the given day of the week.
Args:
date_and_time: A datetime.datetime object.
weekday_string: A string representing a day of the week (and
optionally, a time).
due: Whether this is for a due date (as opposed to a visible date).
Returns:
A datetime.datetime object; the last datetime before 'date_and_time'
whose time and day-of-week match 'weekday_string'.
"""
try:
weekday_and_time = dateutil.parser.parse(weekday_string)
if due and not re.search('\d:\d\d', weekday_string):
weekday_and_time = weekday_and_time.replace(hour=23, minute=59)
except:
weekday_and_time = dateutil.parser.parse('Sun 00:00')
if date_and_time.weekday() == weekday_and_time.weekday():
new_datetime = datetime.datetime.combine(date_and_time.date(),
weekday_and_time.time())
if new_datetime > date_and_time:
new_datetime += datetime.timedelta(days=-7)
else:
new_datetime = datetime.datetime.combine(
date_and_time.date() +
datetime.timedelta(days=-((date_and_time.weekday() -
weekday_and_time.weekday()) % 7)),
weekday_and_time.time())
assert new_datetime <= date_and_time
return new_datetime
def PreviousMonthDay(date_and_time, monthday_string=None, due=True):
"""The last datetime before 'date_and_time' on the given day of the month.
Numbers below 1 are acceptable: 0 is the last day of the previous month, -1
is the second-last day, etc.
Args:
date_and_time: A datetime.datetime object.
monthday_string: A string representing a day of the month (and
optionally, a time).
due: Whether this is for a due date (as opposed to a visible date).
Returns:
A datetime.datetime object; the last datetime before 'date_and_time'
whose time and day-of-month match 'monthday_string'.
"""
def DayOfMonth(date_and_time, offset):
"""Date which is 'offset' days from the end of the prior month.
Args:
date_and_time: A datetime.datetime object; only the year and month
matters.
offset: The number of days to offset from the last day in the
previous month. (So 1 corresponds to the first day of the
month; -1 corresponds to the second-to-last day of the previous
month; etc.)
Returns:
A datetime.date object for the given day of the month.
"""
return (date_and_time.date().replace(day=1) +
datetime.timedelta(days=offset - 1))
# Set up the day of the month, and the time of day.
time = datetime.time(0, 0)
try:
m = re.match(r'(?P<day>-?\d+)(\s+(?P<time>\d\d?:\d\d))?',
monthday_string)
month_day = int(m.group('day'))
if m.group('time'):
time = dateutil.parser.parse(m.group('time')).time()
except:
month_day = 0
if due:
if not monthday_string or not re.search(r'\d:\d\d', monthday_string):
time = datetime.time(23, 59)
new_datetime = datetime.datetime.combine(
DayOfMonth(date_and_time, month_day), time)
from_start = (month_day > 0)
while new_datetime < date_and_time:
new_datetime = AdvanceByMonths(new_datetime, 1, from_start)
while new_datetime > date_and_time:
new_datetime = AdvanceByMonths(new_datetime, -1, from_start)
return new_datetime
def AdvanceByMonths(date_and_time, num, from_start):
"""Advance 'date_and_time' by 'num' months.
If 'from_start' is false, we count from the end of the month (so, e.g.,
March 28 would map to April 27, the fourth-last day of the month in each
case).
Args:
date_and_time: A datetime.datetime object to advance.
num: The number of months to advance it by.
from_start: Boolean indicating whether to count from the start of the
month, or the end.
Returns:
datetime.datetime object corresponding to 'date_and_time' advanced by
'num' months.
"""
if from_start:
return date_and_time + dateutil.relativedelta.relativedelta(months=num)
# If we're still here, we need to count backwards from the end of the
# month. We do this by computing an offset which takes us to the beginning
# of the next month. Add the offset before changing the month; subtract it
# before returning the result.
time = date_and_time.time()
first_day_next_month = ((date_and_time.date() +
dateutil.relativedelta.relativedelta(months=1))
.replace(day=1))
offset = first_day_next_month - date_and_time.date()
date = (date_and_time.date() + offset +
dateutil.relativedelta.relativedelta(months=num)) - offset
return datetime.datetime.combine(date, time)
class Node(object):
# The "_level" of a Node defines nesting behaviour. No Node may nest
# inside a Node with a smaller _level. Only certain types of Node may nest
# within Nodes of the *same* _level (depending on _can_nest_same_type).
#
# The actual value of _level doesn't matter; it's only used for relative
# comparisons.
_level = 0
_can_nest_same_type = False
# Handy patterns and regexes.
# Tags either start at the beginning of the line, or with a space.
_r_start = r'(^| )'
_r_end = r'(?=[.!?)"'';:]*(\s|$))'
_date_format = r'%Y-%m-%d'
_datetime_format = r'%Y-%m-%d %H:%M'
_date_pattern = (r'(?P<datetime>\d{4}-\d{2}-\d{2}'
r'( (?P<time>\d{2}:\d{2}))?)')
_due_within_pattern = r'(\((?P<due_within>\d+)\))?'
_due_date_pattern = re.compile(_r_start + r'<' + _date_pattern +
_due_within_pattern + _r_end)
_vis_date_pattern = re.compile(_r_start + r'>' + _date_pattern + _r_end)
_context = re.compile(_r_start + r'(?P<prefix>@{1,2})(?P<cancel>!?)' +
r'(?P<context>\w+)' + _r_end)
_cancel_inheritance = re.compile(_r_start + r'@!' + _r_end)
_priority_pattern = re.compile(_r_start + r'@p:(?P<priority>[01234])' +
_r_end)
_reserved_contexts = ['inbox', 'waiting']
def __init__(self, text, priority, *args, **kwargs):
super(Node, self).__init__(*args, **kwargs)
# Public properties.
self.children = []
self.parent = None
self._visible_date = None
# Private variables
self._contexts = []
self._canceled_contexts = []
self._cancel_inheriting_all_contexts = False
self._due_date = None
self._ready_date = None
self._priority = priority
self._raw_text = []
self._text = text
# A function which takes no arguments and returns a patch (as from
# diff). Default is the identity patch (i.e., the empty string).
self._diff_functions = collections.defaultdict(lambda: lambda d: '')
for i in Node._reserved_contexts:
setattr(self, i, False)
def AbsorbText(self, text, raw_text=None):
"""Strip out special sequences and add whatever's left to the text.
"Special sequences" include sequences which *every* Node type is
allowed to have: visible-dates, due-dates, contexts,
priorities, etc.
Args:
text: The text to parse and add.
Returns:
A boolean indicating success. Note that if it returns False, this
Node must be in the same state as it was before the function was
called.
"""
if not self._CanAbsorbText(text):
return False
self._raw_text.append(raw_text if raw_text else text)
# Tokens which are common to all Node instances: due date;
# visible-after date; contexts; priority.
text = self._due_date_pattern.sub(self._ParseDueDate, text)
text = self._vis_date_pattern.sub(self._ParseVisDate, text)
text = self._context.sub(self._ParseContext, text)
text = self._cancel_inheritance.sub(self._ParseCancelInheritance, text)
text = self._priority_pattern.sub(self._ParsePriority, text)
# Optional extra parsing and stripping for subclasses.
text = self._ParseSpecializedTokens(text)
self._text = (self._text + '\n' if self._text else '') + text.strip()
return True
def AddChild(self, other):
"""Add 'other' as a child of 'self' (and 'self' as parent of 'other').
Args:
other: Another Node object.
Returns:
A boolean indicating success.
"""
if not self._CanContain(other):
return False
other.parent = self
self.children.append(other)
return True
def AddContext(self, context, cancel=False):
"""Add context to this Node's contexts list."""
canonical_context = context.lower()
if canonical_context in Node._reserved_contexts:
setattr(self, canonical_context, True)
return
context_list = self._canceled_contexts if cancel else self._contexts
if canonical_context not in context_list:
context_list.append(canonical_context)
def DebugName(self):
old_name = ('{} :: '.format(self.parent.DebugName()) if self.parent
else '')
return '{}[{}] {}'.format(old_name, self.__class__.__name__, self.text)
def Patch(self, action, now=None):
"""A patch to perform the requested action.
Should be applied against this Node's file, if any.
Args:
action: An element of the libvtd.node.Actions enum.
now: datetime.datetime object representing the current timestamp.
Defaults to the current time; can be overridden for testing.
Returns:
A string equivalent to the output of the 'diff' program; when
applied to the file, it performs the requested action.
"""
assert action in range(len(Actions))
if not now:
now = datetime.datetime.now()
return self._diff_functions[action](now)
def Source(self):
"""The source which generated this Node.
Typically, this would be a file name and a line number. If other Node
types get added later, this could be a URL (say, for a GitHub issue).
Returns:
A (file name, line number) tuple. (The return type could change in
the future.)
"""
line = self._line_in_file if '_line_in_file' in self.__dict__ else 1
return (self.file_name, line)
@property
def contexts(self):
context_list = list(self._contexts)
if self.parent and not self._cancel_inheriting_all_contexts:
context_list.extend(self.parent.contexts)
return [c for c in context_list if c not in self._canceled_contexts]
@property
def due_date(self):
parent_due_date = self.parent.due_date if self.parent else None
if not self._due_date:
return parent_due_date
if not parent_due_date:
return self._due_date
return min(self._due_date, parent_due_date)
@property
def file_name(self):
return self.parent.file_name if self.parent else None
@property
def priority(self):
if self._priority is not None:
return self._priority
return self.parent.priority if self.parent else None
@property
def ready_date(self):
parent_ready_date = self.parent.ready_date if self.parent else None
if not self._ready_date:
return parent_ready_date
if not parent_ready_date:
return self._ready_date
return min(self._ready_date, parent_ready_date)
@property
def text(self):
return self._text.strip()
@property
def visible_date(self):
parent_visible_date = self.parent.visible_date if self.parent else None
if not self._visible_date:
return parent_visible_date
if not parent_visible_date:
return self._visible_date
return max(self._visible_date, parent_visible_date)
def _CanAbsorbText(self, text):
"""Indicates whether this Node can absorb the given line of text.
The default is to absorb only if there is no pre-existing text;
subclasses may specialize this behaviour.
"""
return not self._text
def _CanContain(self, other):
return self._level < other._level or (self._level == other._level and
self._can_nest_same_type)
def _ParseContext(self, match):
"""Parses the context from a match object.
Args:
match: A match from the self._context regex.
Returns:
The text to replace match with. If successful, this should be
either the empty string, or (for @@-prefixed contexts) the bare
context name; else, the original text.
"""
cancel = (match.group('cancel') == '!')
self.AddContext(match.group('context'), cancel=cancel)
return (' ' + match.group('context') if match.group('prefix') == '@@'
else '')
def _ParseCancelInheritance(self, match):
"""Cancels inheritance of contexts from parents.
Args:
match: A match from the self._context regex.
Returns:
The text to replace match with: in this case, the empty string.
"""
self._cancel_inheriting_all_contexts = True
return ''
def _ParseDueDate(self, match):
"""Parses the due date from a match object.
Args:
match: A match from the self._due_date_pattern regex.
Returns:
The text to replace match with. If successful, this should be the
empty string; else, the original text.
"""
strptime_format = (self._datetime_format if match.group('time')
else self._date_format)
try:
date = datetime.datetime.strptime(match.group('datetime'),
strptime_format)
self._due_date = date
# Date-only due dates occur at the *end* of the day.
if not match.group('time'):
self._due_date = (self._due_date +
datetime.timedelta(days=1, seconds=-1))
due_within = match.group('due_within')
days_before = (int(due_within) if due_within else 1)
self._ready_date = (self._due_date -
datetime.timedelta(days=days_before))
return ''
except ValueError:
return match.group(0)
def _ParsePriority(self, match):
"""Parses the priority from a match object.
Args:
match: A match from the self._priority_pattern regex.
Returns:
The text to replace match with, i.e., the empty string.
"""
self._priority = int(match.group('priority'))
return ''
def _ParseSpecializedTokens(self, text):
"""Parse tokens which only make sense for a particular subclass.
For example, a field for the time it takes to complete a task only
makes sense for the NextAction subclass.
"""
return text
def _ParseVisDate(self, match):
"""Parses the visible-after date from a match object.
Args:
match: A match from the self._vis_date_pattern regex.
Returns:
The text to replace match with. If successful, this should be the
empty string; else, the original text.
"""
strptime_format = (self._datetime_format if match.group('time')
else self._date_format)
try:
date = datetime.datetime.strptime(match.group('datetime'),
strptime_format)
self._visible_date = date
return ''
except ValueError:
return match.group(0)
class IndentedNode(Node):
"""A Node which supports multiple lines, at a given level of indentation.
"""
def __init__(self, indent=0, *args, **kwargs):
super(IndentedNode, self).__init__(*args, **kwargs)
self.indent = indent
self.text_indent = indent + 2
def _CanContain(self, other):
return super(IndentedNode, self)._CanContain(other) and (self.indent <
other.indent)
def _CanAbsorbText(self, text):
# If we have no text, don't worry about checking indenting.
if not self._text:
return True
# For subsequent text: accept blank lines, or text which is
# sufficiently indented.
return (not text.strip()) or text.startswith(' ' * self.text_indent)
class DoableNode(Node):
"""A Node which can be sensibly marked as DONE."""
_done_pattern = re.compile(Node._r_start +
r'\((DONE|WONTDO)( {})?\)'.format(
Node._date_pattern)
+ Node._r_end)
_id_word = r'(?P<id>\w+)'
_id_pattern = re.compile(Node._r_start + r'#' + _id_word + Node._r_end)
_after_pattern = re.compile(Node._r_start + r'@after:' + _id_word +
Node._r_end)
# Patterns related to recurring actions.
_last_done_pattern = re.compile(Node._r_start +
r'\(LASTDONE {}\)'.format(
Node._date_pattern)
+ Node._r_end)
_recur_unit_pattern = r'(?P<unit>day|week|month)'
_recur_min_pattern = r'(?P<min>\d+)'
_recur_max_pattern = r'(?P<max>\d+)'
_recur_subunit_vis_pattern = r'(?P<vis>[^]]+)'
_recur_unit_boundary_pattern = r'(?P<due>[^]]+)'
_recur_pattern = re.compile(Node._r_start +
r'\s*'.join([
r'EVERY',
# How many:
r'( ({}-)?{})?'.format(_recur_min_pattern,
_recur_max_pattern),
# Which units:
r' {}s?'.format(_recur_unit_pattern),
# Which part of the unit:
r'(\[({} - )?{}\])?'.format(
_recur_subunit_vis_pattern,
_recur_unit_boundary_pattern),
]) +
Node._r_end)
# Functions which reset a datetime to the beginning of an interval
# boundary: a day, a week, a month, etc. This boundary can be arbitrary
# (e.g., reset to "the previous 14th of a month" or "the previous Tuesday
# at 17:00".) One function for each type of interval.
#
# Related to recurring actions.
#
# Args:
# d: A datetime to reset.
# b: The boundary of the interval: a string to be parsed.
_interval_boundary_function = {
'day': PreviousTime,
'week': PreviousWeekDay,
'month': PreviousMonthDay,
}
# Functions which advance a datetime by some number of units.
#
# Related to recurring actions.
#
# Args:
# d: A datetime to advance.
# n: Number of units to advance.
# from_start: Whether to count from the beginning of the unit or the end.
# Only relevant for variable-length units, such as months.
_date_advancing_function = {
'day': lambda d, n, from_start: d + datetime.timedelta(days=n),
'week': lambda d, n, from_start: d + datetime.timedelta(days=7 * n),
'month': AdvanceByMonths
}
def __init__(self, *args, **kwargs):
super(DoableNode, self).__init__(*args, **kwargs)
self.done = False
self.recurring = False
self.last_done = None
self._diff_functions[Actions.MarkDONE] = self._PatchMarkDone
self._diff_functions[Actions.UpdateLASTDONE] = \
self._PatchUpdateLastdone
self._diff_functions[Actions.DefaultCheckoff] = self._PatchMarkDone
# A list of ids for DoableNode objects which must be marked DONE before
# *this* DoableNode will be visible.
self.blockers = []
# A list of ids for this DoableNode. The initial id is for internal
# usage only; note that it can never match the _id_pattern regex.
# Other IDs may be added using the _id_pattern regex.
self.ids = ['*{}'.format(id(self))]
def DateState(self, now):
"""The state of this node relative to now: late; ready; due; invisible.
Args:
now: datetime.datetime object giving the current time.
Returns:
An element of the DateStates enum.
"""
if self.recurring:
if not self.last_done:
return DateStates.new
self._SetRecurringDates()
if self.visible_date and now < self.visible_date:
return DateStates.invisible
if self.due_date is None:
return DateStates.ready
if self.due_date < now:
return DateStates.late
if self.ready_date < now:
return DateStates.due
return DateStates.ready
def _ParseAfter(self, match):
self.blockers.extend([match.group('id')])
return ''
def _ParseDone(self, match):
self.done = True
return ''
def _ParseId(self, match):
self.ids.extend([match.group('id')])
return ''
def _ParseLastDone(self, match):
try:
last_done = datetime.datetime.strptime(match.group('datetime'),
self._datetime_format)
self.last_done = last_done
return ''
except ValueError:
return match.group(0)
def _ParseRecur(self, match):
self._recur_raw_string = match
self.recurring = True
self._diff_functions[Actions.DefaultCheckoff] = \
self._PatchUpdateLastdone
self._recur_max = int(match.group('max')) if match.group('max') else 1
self._recur_min = int(match.group('min')) if match.group('min') else \
self._recur_max
self._recur_unit = match.group('unit')
self._recur_unit_boundary = match.group('due')
self._recur_subunit_visible = match.group('vis')
return ''
def _ParseSpecializedTokens(self, text):
"""Parse tokens specific to indented blocks.
"""
text = super(DoableNode, self)._ParseSpecializedTokens(text)
text = self._done_pattern.sub(self._ParseDone, text)
text = self._id_pattern.sub(self._ParseId, text)
text = self._after_pattern.sub(self._ParseAfter, text)
text = self._recur_pattern.sub(self._ParseRecur, text)
text = self._last_done_pattern.sub(self._ParseLastDone, text)
return text
def _PatchMarkDone(self, now):
"""A patch which marks this DoableNode as 'DONE'."""
if not self.done:
return '\n'.join([
'@@ -{0} +{0} @@',
'-{1}',
'+{1} (DONE {2})',
''
]).format(self._line_in_file, self._raw_text[0],
now.strftime('%Y-%m-%d %H:%M'))
return ''
def _PatchUpdateLastdone(self, now):
"""A patch which updates a recurring DoableNode's 'LASTDONE' timestamp.
"""
if self.done or not self.recurring:
return ''
patch_lines = ['@@ -{0},{1} +{0},{1} @@'.format(self._line_in_file,
len(self._raw_text))]
patch_lines.extend(['-{}'.format(line) for line in self._raw_text])
current = lambda x=None: now.strftime('%Y-%m-%d %H:%M')
updater = lambda m: re.sub(self._date_pattern,
current,
m.group(0))
new_lines = ['+{}'.format(self._last_done_pattern.sub(updater, line))
for line in self._raw_text]
if self.DateState(now) == DateStates.new:
new_lines[0] += ' (LASTDONE {})'.format(current())
patch_lines.extend(new_lines)
patch_lines.append('')
return '\n'.join(patch_lines)
def _SetRecurringDates(self):
"""Set dates (visible, due, etc.) based on last-done date."""
unit = self._recur_unit
# For computing the due date and visible date: do we go from the
# beginning of the interval, or the end? (The distinction is only
# relevant for variable-length intervals, such as months.)
due_from_start = vis_from_start = True
if unit == 'month':
if self._recur_unit_boundary and \
int(self._recur_unit_boundary.split()[0]) < 1:
due_from_start = False
if self._recur_subunit_visible and \
int(self._recur_subunit_visible.split()[0]) < 1:
vis_from_start = False
# Find the previous datetime (before the last-done time) which bounds
# the time interval (day, week, month, ...).
base_datetime = self._interval_boundary_function[unit](
self.last_done, self._recur_unit_boundary)
# If an action was completed after the due date, but before the next
# visible date, associate it with the previous interval. (An example
# of the kind of disaster this prevents: suppose the rent is due on the
# 1st, and we pay it on the 2nd. Then we risk the system thinking the
# rent is paid for the *new* month.
if self._recur_subunit_visible:
# This kind of operation doesn't really make sense if the task is
# visible for the entire interval.
previous_vis_date = self._interval_boundary_function[unit](
self.last_done, self._recur_subunit_visible, due=False)
# If we did the task after the due time, but before it was visible,
# then the previous due date comes *after* the previous visible
# date. So, put the base datetime back in the *previous* unit.
if base_datetime > previous_vis_date:
base_datetime = self._date_advancing_function[unit](
base_datetime, -1, due_from_start)
# Set visible, ready, and due dates relative to base_datetime.
self._visible_date = self._date_advancing_function[unit](
base_datetime, self._recur_min, vis_from_start)
if self._recur_subunit_visible:
# Move the visible date forward to the subunit boundary (if any).
# To do this, move it forward one full unit, then move it back
# until it matches the visible subunit boundary.
self._visible_date = self._date_advancing_function[unit](
self._visible_date, 1, vis_from_start)
self._visible_date = self._interval_boundary_function[unit](
self._visible_date, self._recur_subunit_visible, due=False)
self._ready_date = self._date_advancing_function[unit](
base_datetime, self._recur_max, due_from_start)
self._due_date = self._date_advancing_function[unit](
base_datetime, self._recur_max + 1, due_from_start)
class File(Node):
_level = Node._level + 1
_can_nest_same_type = False
# These compiled regexes inidicate line patterns which correspond to the
# various Node subclasses. Each should have a named matchgroup named
# 'text', to indicate the "core" text, i.e., everything *except* the
# pattern. e.g.,
# Section:
# = <Core text> =
# NextAction:
# @ <Core text>
# etc.
_indent = r'(?P<indent>^\s*)'
_text_pattern = r' (?P<text>.*)'
_section_pattern = re.compile(r'^(?P<level>=+)' + _text_pattern +
r' (?P=level)$')
_next_action_pattern = re.compile(_indent + r'@' + _text_pattern)
_comment_pattern = re.compile(_indent + r'\*' + _text_pattern)
_project_pattern = re.compile(_indent + r'(?P<type>[#-])' + _text_pattern)
def __init__(self, file_name=None, *args, **kwargs):
super(File, self).__init__(text='', priority=None, *args, **kwargs)
self.bad_lines = []
self._file_name = file_name
self._node_with_id = {}
if file_name:
# Read file contents and create a tree of Nodes from them.
with open(file_name) as vtd_file:
# Parse the file, one line at a time, as follows.
# Try creating a Node from the line.
# - If successful, make the node a child of the previous node
# -- or at least, the first *ancestor* of the previous node
# which can contain the new one.
# - If unsuccessful, try absorbing the text into the previous
# node.
previous_node = self
for (line_num, line) in enumerate(vtd_file, 1):
raw_text = line.rstrip('\n')
new_node = self.CreateNodeFromLine(raw_text, line_num)
if new_node:
while (previous_node and not
previous_node.AddChild(new_node)):
previous_node = previous_node.parent
previous_node = new_node
else:
if not previous_node.AbsorbText(raw_text):
self.bad_lines.append((line_num, raw_text))
try:
self._TrackIdNode(previous_node)
except KeyError:
self.bad_lines.append((line_num, raw_text))
@staticmethod
def CreateNodeFromLine(line, line_num=1):
"""Create the specific type of Node which this line represents.
Args:
line: A line of text.
Returns:
An instance of a Node subclass, or None if this line doesn't
represent a valid Node.
"""
(new_node, text) = File._CreateCorrectNodeType(line)
if new_node:
new_node.AbsorbText(text, line)
new_node._line_in_file = line_num
return new_node
@property
def file_name(self):
return self._file_name
@staticmethod
def _CreateCorrectNodeType(text):
"""Creates the Node object and returns the raw text.
Args:
text: The line of text to parse.
Returns:
A tuple (new_node, raw_text):
new_node: An instance of the appropriate Node subclass.
raw_text: Whatever text is leftover after the ID pattern has
been stripped out, but *before* other information (e.g.,
due dates, priority, etc.) has been stripped out.
"""
section_match = File._section_pattern.match(text)
if section_match:
section = Section(level=len(section_match.group('level')))
return (section, section_match.group('text'))
project_match = File._project_pattern.match(text)
if project_match:
is_ordered = (project_match.group('type') == '#')
indent = len(project_match.group('indent'))
project = Project(is_ordered=is_ordered, indent=indent)
return (project, project_match.group('text'))
next_action_match = File._next_action_pattern.match(text)
if next_action_match:
indent = len(next_action_match.group('indent'))
action = NextAction(indent=indent)
return (action, next_action_match.group('text'))
comment_match = File._comment_pattern.match(text)
if comment_match:
indent = len(comment_match.group('indent'))
comment = Comment(indent=indent)
return (comment, comment_match.group('text'))
return (None, '')
def _CanAbsorbText(self, unused_text):
return False
def _TrackIdNode(self, node):
"""If this node has an ID, add it to the (id -> node) map.
Args:
node: A libvtd.node.Node object.
"""
try:
for id in node.ids:
if id in self._node_with_id.keys():
if self._node_with_id[id] != node:
raise KeyError
else:
self._node_with_id[id] = node
except AttributeError:
return
def NodeWithId(self, id):
"""The child node of this flie with the given ID (None if none).
Args:
id: A string indicating which node to retrieve.
"""
if id not in self._node_with_id.keys():
return None
return self._node_with_id[id]
class Section(Node):
_level = File._level + 1
_can_nest_same_type = True
def __init__(self, level=1, text=None, priority=None, *args, **kwargs):
super(Section, self).__init__(text=text, priority=priority, *args,
**kwargs)
self.level = level
def _CanContain(self, other):
if issubclass(other.__class__, Section):
return self.level < other.level
return super(Section, self)._CanContain(other)
class Project(DoableNode, IndentedNode):
_level = Section._level + 1
_can_nest_same_type = True
def __init__(self, is_ordered=False, text=None, priority=None, *args,
**kwargs):
super(Project, self).__init__(text=text, priority=priority, *args,
**kwargs)
self.ordered = is_ordered
def AddChild(self, other):
if super(Project, self).AddChild(other):
if self.ordered and isinstance(other, DoableNode):
# If this Project is ordered, the new DoableNode will be
# blocked by the most recent not-done DoableNode child.
last_doable_node = None
for child in self.children:
if (isinstance(child, DoableNode) and not child.done and
child != other):
last_doable_node = child
if last_doable_node and last_doable_node != other:
temp_id = last_doable_node.ids[0]
other.blockers.extend([temp_id])
if self.recurring and isinstance(other, DoableNode):
other._ParseRecur(self._recur_raw_string)
return True
return False
class NextAction(DoableNode, IndentedNode):
_level = Project._level + 1
_time = re.compile(Node._r_start + r'@t:(?P<time>\d+)' + Node._r_end)
def __init__(self, text=None, priority=None, *args, **kwargs):
super(NextAction, self).__init__(text=text, priority=priority, *args,
**kwargs)
def _ParseTime(self, match):
"""Parses the time from a match object.
Args:
match: A match from the self._time regex.
Returns:
The text to replace match with, i.e., the empty string.
"""
self.minutes = int(match.group('time'))
return ''
def _ParseSpecializedTokens(self, text):
"""Parse NextAction-specific tokens.
"""
text = super(NextAction, self)._ParseSpecializedTokens(text)
text = self._time.sub(self._ParseTime, text)
return text
class NeedsNextActionStub(NextAction):
"""A stub to remind the user that a Project needs a NextAction."""
_stub_text = '{MISSING Next Action}'
def __init__(self, project, *args, **kwargs):
super(NeedsNextActionStub, self).__init__(
text=NeedsNextActionStub._stub_text, *args, **kwargs)
self.parent = project
self.Patch = lambda action, now=None: self.parent.Patch(action, now)
self.Source = lambda: self.parent.Source()
class Comment(IndentedNode):
_level = NextAction._level + 1
_can_nest_same_type = True
def __init__(self, text=None, priority=None, *args, **kwargs):
super(Comment, self).__init__(text=text, priority=priority, *args,
**kwargs)
|
|
from __future__ import division, absolute_import
from sys import stdout
import attr
import json
from io import BytesIO
from io import BufferedReader
from collections import namedtuple
from twisted.logger import Logger
from twisted.internet import defer
from twisted.internet import reactor
from twisted.internet.task import LoopingCall
from twisted.internet.protocol import Protocol
from twisted.internet.protocol import connectionDone
from twisted.internet import error
from . import _meta
from .config_state import ServerInfo
from .config_state import ClientInfo
from .config_state import SubscriptionArgs
from . import actions
from .errors import NatsError
LANG = "py.twisted"
CLIENT_NAME = "txnats"
DISCONNECTED = 0
CONNECTED = 1
class NatsProtocol(Protocol):
server_settings = None
log = Logger()
def __init__(self, own_reactor=None, verbose=True, pedantic=False,
ssl_required=False, auth_token=None, user="",
password="", on_msg=None, on_connect=None,
event_subscribers=None, subscriptions=None, unsubs=None):
"""
@param own_reactor: A Twisted Reactor, defaults to standard. Chiefly
customizable for testing.
@param verbose: Turns on +OK protocol acknowledgements.
@param pedantic: Turns on additional strict format checking, e.g.
for properly formed subjects
@param ssl_required: Indicates whether the client requires
an SSL connection.
@param auth_token: Client authorization token
@param user: Connection username (if auth_required is set)
@param pass: Connection password (if auth_required is set)
@param on_msg: Handler for messages for subscriptions that don't have
their own on_msg handler. Default behavior is to write to stdout.
A callable that takes the following params:
@param nats_protocol: An instance of NatsProtocol.
@param sid: A unique alphanumeric subscription ID.
@param subject: A valid NATS subject.
@param reply_to: The reply to.
@param payload: Bytes of the payload.
@param on_connect: Callable that takes this instance of NatsProtocol
which will be called upon the first successful connection.
@param event_subscribers: A collection of functions that take an
event/action entity.
@param subscriptions: A dict of sids and SubscriptionArgs.
@param unsubs: A dict of sids and ints representing the number
of messages for the sid before automatic unsubscription.
"""
self.reactor = own_reactor if own_reactor else reactor
self.status = DISCONNECTED
self.verbose = verbose
# Set the number of PING sent out
self.ping_loop = LoopingCall(self.ping)
self.pout = 0
self.remaining_bytes = b''
self.client_info = ClientInfo(
verbose, pedantic, ssl_required,
auth_token, user, password,
CLIENT_NAME, LANG, _meta.version)
if on_msg:
# Ensure the on_msg signature fits.
on_msg(nats_protocol=self, sid="0", subject="testSubject",
reply_to='inBox', payload=b'hello, world')
self.on_msg = on_msg
self.on_connect = on_connect
self.on_connect_d = defer.Deferred()
if on_connect:
self.on_connect_d.addCallback(on_connect)
self.on_connect_d.addErrback(self._eb_trace_and_raise)
self.sids = {}
self.subscriptions = subscriptions if subscriptions is not None else {}
self.unsubs = unsubs if unsubs else {}
self.event_subscribers = event_subscribers if event_subscribers is not None else []
def __repr__(self):
return r'<NatsProtocol connected={} server_info={}>'.format(self.status, self.server_settings)
def _eb_trace_and_raise(self, failure):
failure.printTraceback()
failure.raiseException()
def dispatch(self, event):
"""Call each event subscriber with the event.
"""
if self.event_subscribers is None:
return
for event_subscriber in self.event_subscribers:
event_subscriber(event)
return
def connectionLost(self, reason=connectionDone):
"""Called when the connection is shut down.
Clear any circular references here, and any external references
to this Protocol. The connection has been closed.
Clear left over remaining bytes because they won't be continued
properly upon reconnection.
@type reason: L{twisted.python.failure.Failure}
"""
self.status = DISCONNECTED
self.remaining_bytes = b''
if reason.check(error.ConnectionLost):
self.dispatch(actions.ConnectionLost(self, reason=reason))
else:
self.dispatch(actions.Disconnected(self, reason=reason))
def dataReceived(self, data):
"""
Parse the NATS.io protocol from chunks of data streaming from
the connected gnatsd.
The server settings will be set and connect will be sent with this
client's info upon an INFO, which should happen when the
transport connects.
Registered message callback functions will be called with MSGs
once parsed.
PONG will be called upon a ping.
An exception will be raised upon an ERR from gnatsd.
An +OK doesn't do anything.
"""
if self.remaining_bytes:
data = self.remaining_bytes + data
self.remaining_bytes = b''
data_buf = BufferedReader(BytesIO(data))
while True:
command = data_buf.read(4)
if command == b"-ERR":
raise NatsError(data_buf.read())
elif command == b"+OK\r":
val = data_buf.read(1)
if val != b'\n':
self.remaining_bytes += command
break
elif command == b"MSG ":
val = data_buf.readline()
if not val:
self.remaining_bytes += command
break
if not val.endswith(b'\r\n'):
self.remaining_bytes += command + val
break
meta_data = val.split(b" ")
n_bytes = int(meta_data[-1])
subject = meta_data[0].decode()
if len(meta_data) == 4:
reply_to = meta_data[2].decode()
elif len(meta_data) == 3:
reply_to = None
else:
self.remaining_bytes += command + val
break
sid = meta_data[1].decode()
if sid in self.sids:
on_msg = self.sids[sid]
else:
on_msg = self.on_msg
payload = data_buf.read(n_bytes)
if len(payload) != n_bytes:
self.remaining_bytes += command + val + payload
break
if on_msg:
on_msg(nats_protocol=self, sid=sid, subject=subject,
reply_to=reply_to, payload=payload)
else:
stdout.write(command.decode())
stdout.write(val.decode())
stdout.write(payload.decode())
self.dispatch(
actions.ReceivedMsg(
sid, self,
subject=subject,
payload=payload,
reply_to=reply_to)
)
if sid in self.unsubs:
self.unsubs[sid] -= 1
if self.unsubs[sid] <= 0:
self._forget_subscription(sid)
self.dispatch(actions.UnsubMaxReached(sid, protocol=self))
payload_post = data_buf.readline()
if payload_post != b'\r\n':
self.remaining_bytes += (command + val + payload
+ payload_post)
break
elif command == b"PING":
self.dispatch(actions.ReceivedPing(self))
self.pong()
val = data_buf.readline()
if val != b'\r\n':
self.remaining_bytes += command + val
break
elif command == b"PONG":
self.pout -= 1
self.dispatch(actions.ReceivedPong(self, outstanding_pings=self.pout))
val = data_buf.readline()
if val != b'\r\n':
self.remaining_bytes += command + val
break
elif command == b"INFO":
val = data_buf.readline()
if not val.endswith(b'\r\n'):
self.remaining_bytes += command + val
break
settings = json.loads(val.decode('utf8'))
self.server_settings = ServerInfo(**settings)
self.dispatch(actions.ReceivedInfo(
self, server_info=self.server_settings))
self.status = CONNECTED
self.pout = 0
self.sids = {}
self.connect()
if self.on_connect_d:
self.on_connect_d.callback(self)
self.on_connect_d = None
else:
self.dispatch(actions.UnhandledCommand(self, command=command))
val = data_buf.read()
self.remaining_bytes += command + val
if not data_buf.peek(1):
break
def connect(self):
"""
Tell the NATS server about this client and it's options.
"""
action = actions.SendConnect(self, client_info=self.client_info)
payload = 'CONNECT {}\r\n'.format(json.dumps(
self.client_info.asdict_for_connect()
, separators=(',', ':')))
self.transport.write(payload.encode())
self.dispatch(action)
def pub(self, subject, payload, reply_to=None):
"""
Publish a payload of bytes to a subject.
@param subject: The destination subject to publish to.
@param reply_to: The reply inbox subject that subscribers can use
to send a response back to the publisher/requestor.
@param payload: The message payload data, in bytes.
"""
action = actions.SendPub(self, subject, payload, reply_to)
reply_part = ""
if reply_to:
reply_part = "{} ".format(reply_to)
# TODO: deal with the payload if it is bigger than the server max.
op = "PUB {} {}{}\r\n".format(
subject, reply_part, len(payload)).encode()
op += payload + b'\r\n'
self.transport.write(op)
self.dispatch(action)
def apply_subscriptions(self):
"""
Subscribe all the subscriptions and unsubscribe all of
the unsubscriptions.
Builds the state of subscriptions and unsubscriptions
with max messages.
"""
if self.status == CONNECTED:
for sid, sub_args in self.subscriptions.items():
self.sub(
sub_args.subject, sub_args.sid,
sub_args.queue_group, sub_args.on_msg)
if sid in self.unsubs:
self.unsub(sid, max_msgs=self.unsubs[sid])
def sub(self, subject, sid, queue_group=None, on_msg=None):
"""
Subscribe to a subject.
@param subject: The subject name to subscribe to.
@param sid: A unique alphanumeric subscription ID.
@param queue_group: If specified, the subscriber will
join this queue group.
@param on_msg: A callable that takes the following params:
@param nats_protocol: An instance of NatsProtocol.
@param sid: A unique alphanumeric subscription ID.
@param subject: A valid NATS subject.
@param reply_to: The reply to.
@param payload: Bytes of the payload.
"""
self.sids[sid] = on_msg
self.subscriptions[sid] = SubscriptionArgs(
subject, sid, queue_group, on_msg)
self.dispatch(actions.SendSub(
sid=sid,
protocol=self,
subject=subject,
queue_group=queue_group,
on_msg=on_msg
))
queue_group_part = ""
if queue_group:
queue_group_part = "{} ".format(queue_group)
op = "SUB {} {}{}\r\n".format(subject, queue_group_part, sid)
self.transport.write(op.encode('utf8'))
def _forget_subscription(self, sid):
"""Undeclare a subscription. Any on_msg declared for the subscription
will no longer be called.
If a apply_subscriptions is called,
which it is during a reconnect, These subscriptions will not be
established. """
if sid in self.unsubs:
del self.unsubs[sid]
if sid in self.subscriptions:
del self.subscriptions[sid]
if sid in self.sids:
del self.sids[sid]
def unsub(self, sid, max_msgs=None):
"""
Unsubcribes the connection from the specified subject,
or auto-unsubscribes after the specified
number of messages has been received.
@param sid: The unique alphanumeric subscription ID of
the subject to unsubscribe from.
@type sid: str
@param max_msgs: Optional number of messages to wait for before
automatically unsubscribing.
@type max_msgs: int
"""
action = actions.SendUnsub(sid=sid, protocol=self, max_msgs=max_msgs)
max_msgs_part = ""
if max_msgs:
max_msgs_part = "{}".format(max_msgs)
self.unsubs[sid] = max_msgs
else:
self._forget_subscription(sid)
op = "UNSUB {} {}\r\n".format(sid, max_msgs_part)
self.transport.write(op.encode('utf8'))
self.dispatch(action)
def ping(self):
"""
Send ping.
"""
op = b"PING\r\n"
self.transport.write(op)
self.pout += 1
self.dispatch(actions.SendPing(self, outstanding_pings=self.pout))
def pong(self):
"""
Send pong.
"""
op = b"PONG\r\n"
self.transport.write(op)
self.dispatch(actions.SendPong(self))
def request(self, sid, subject):
"""
Make a synchronous request for a subject.
Make a reply to.
Subscribe to the subject.
Make a Deferred and add it to the inbox under the reply to.
Do auto unsubscribe for one message.
"""
raise NotImplementedError()
|
|
# This module contains the analysis options.
# All variables with names of all caps will be registered as a state option to SimStateOptions.
import string
from .sim_state_options import SimStateOptions
# This option controls whether or not constraints are tracked in the analysis.
TRACK_CONSTRAINTS = "TRACK_CONSTRAINTS"
# This option causes constraints to be flushed at the beginning of every instruction.
INSTRUCTION_SCOPE_CONSTRAINTS = "INSTRUCTION_SCOPE_CONSTRAINTS"
BLOCK_SCOPE_CONSTRAINTS = "BLOCK_SCOPE_CONSTRAINTS"
# This option controls whether or not various entities (IRExpr constants, reads, writes, etc) get simplified automatically
SIMPLIFY_EXPRS = "SIMPLIFY_EXPRS"
SIMPLIFY_MEMORY_READS = "SIMPLIFY_MEMORY_READS"
SIMPLIFY_MEMORY_WRITES = "SIMPLIFY_MEMORY_WRITES"
SIMPLIFY_REGISTER_READS = "SIMPLIFY_REGISTER_READS"
SIMPLIFY_REGISTER_WRITES = "SIMPLIFY_REGISTER_WRITES"
SIMPLIFY_RETS = "SIMPLIFY_RETS"
SIMPLIFY_EXIT_STATE = "SIMPLIFY_EXIT_STATE"
SIMPLIFY_EXIT_TARGET = "SIMPLIFY_EXIT_TARGET"
SIMPLIFY_EXIT_GUARD = "SIMPLIFY_EXIT_GUARD"
SIMPLIFY_CONSTRAINTS = "SIMPLIFY_CONSTRAINTS"
# This option controls whether the helper functions are actually executed for CCALL expressions.
# Without this, the arguments are parsed, but the calls aren't executed, and an unconstrained symbolic
# variable is returned, instead.
DO_CCALLS = "DO_CCALLS"
# Whether we should use the simplified ccalls or not.
USE_SIMPLIFIED_CCALLS = "USE_SIMPLIFIED_CCALLS"
# This option controls whether or not emulated exits and coderefs are added from a call instruction to its ret site.
DO_RET_EMULATION = "DO_RET_EMULATION"
# If this option is present, the guards to emulated ret exits are True instead of False
TRUE_RET_EMULATION_GUARD = "TRUE_RET_EMULATION_GUARD"
# This option causes the analysis to immediately concretize any symbolic value that it comes across
CONCRETIZE = "CONCRETIZE"
# This option prevents angr from doing hundreds of constraint solves to resolve symbolic jump targets
NO_SYMBOLIC_JUMP_RESOLUTION = "NO_SYMBOLIC_JUMP_RESOLUTION"
# This option prevents angr from doing hundreds of constraint solves when it hits a symbolic syscall
NO_SYMBOLIC_SYSCALL_RESOLUTION = "NO_SYMBOLIC_SYSCALL_RESOLUTION"
# The absense of this option causes the analysis to avoid reasoning about most symbolic values.
SYMBOLIC = "SYMBOLIC"
# This variable causes claripy to use a string solver (CVC4)
STRINGS_ANALYSIS = "STRINGS_ANALYSIS"
# Generate symbolic values for non-existent values. The absence of this option causes Unconstrained() to return default concrete values (like 0)
SYMBOLIC_INITIAL_VALUES = "SYMBOLIC_INITIAL_VALUES"
# this causes angr to use SimAbstractMemory for the memory region
ABSTRACT_MEMORY = "ABSTRACT_MEMORY"
# This causes symbolic memory to avoid performing symbolic reads and writes. Unconstrained results
# are returned instead, if these options are present.
AVOID_MULTIVALUED_READS = "AVOID_MULTIVALUED_READS"
AVOID_MULTIVALUED_WRITES = "AVOID_MULTIVALUED_WRITES"
# This option concretizes symbolically sized writes
CONCRETIZE_SYMBOLIC_WRITE_SIZES = "CONCRETIZE_SYMBOLIC_WRITE_SIZES"
# This option concretizes the read size if it's symbolic from the file
CONCRETIZE_SYMBOLIC_FILE_READ_SIZES = "CONCRETIZE_SYMBOLIC_FILE_READ_SIZES"
# If absent, treat the end of files as a frontier at which new data will be created
# If present, treat the end of files as an EOF
FILES_HAVE_EOF = "FILES_HAVE_EOF"
UNKNOWN_FILES_HAVE_EOF = FILES_HAVE_EOF
# Attempting to open an unkown file will result in creating it with a symbolic length
ALL_FILES_EXIST = "ALL_FILES_EXIST"
# Reads from devices will have a symbolic size
SHORT_READS = "SHORT_READS"
# This causes angr to support fully symbolic writes. It is very likely that speed will suffer.
SYMBOLIC_WRITE_ADDRESSES = "SYMBOLIC_WRITE_ADDRESSES"
# This causes symbolic memory to avoid concretizing memory address to a single value when the
# range check fails.
CONSERVATIVE_WRITE_STRATEGY = "CONSERVATIVE_WRITE_STRATEGY"
CONSERVATIVE_READ_STRATEGY = "CONSERVATIVE_READ_STRATEGY"
# This enables dependency tracking for all Claripy ASTs.
AST_DEPS = "AST_DEPS"
# This controls whether the temps are treated as symbolic values (for easier debugging) or just written as the z3 values
SYMBOLIC_TEMPS = "SYMBOLIC_TEMPS"
# These are options for tracking various types of actions
TRACK_MEMORY_ACTIONS = "TRACK_MEMORY_ACTIONS"
TRACK_REGISTER_ACTIONS = "TRACK_REGISTER_ACTIONS"
TRACK_TMP_ACTIONS = "TRACK_TMP_ACTIONS"
TRACK_JMP_ACTIONS = "TRACK_JMP_ACTIONS"
TRACK_CONSTRAINT_ACTIONS = "TRACK_CONSTRAINT_ACTIONS"
# note that TRACK_OP_ACTIONS is not enabled in symbolic mode by default, since Yan is worried about its performance
# impact. someone should measure it and make a final decision.
TRACK_OP_ACTIONS = "TRACK_OP_ACTIONS"
# track the history of actions through a path (multiple states). This action affects things on the angr level
TRACK_ACTION_HISTORY = "TRACK_ACTION_HISTORY"
# track memory mapping and permissions
TRACK_MEMORY_MAPPING = "TRACK_MEMORY_MAPPING"
# track constraints in solver. This is required to enable unsat_core()
CONSTRAINT_TRACKING_IN_SOLVER = "CONSTRAINT_TRACKING_IN_SOLVER"
# this is an internal option to automatically track dependencies in SimProcedures
AUTO_REFS = "AUTO_REFS"
# Whether we should track dependencies in SimActions
# If none of the ref options above exist, this option does nothing
ACTION_DEPS = "ACTION_DEPS"
# This enables the tracking of reverse mappings (name->addr and hash->addr) in SimSymbolicMemory
REVERSE_MEMORY_NAME_MAP = "REVERSE_MEMORY_NAME_MAP"
REVERSE_MEMORY_HASH_MAP = "REVERSE_MEMORY_HASH_MAP"
# This enables tracking of which bytes in the state are symbolic
MEMORY_SYMBOLIC_BYTES_MAP = "MEMORY_SYMBOLIC_BYTES_MAP"
# this makes engine copy states
COPY_STATES = "COPY_STATES"
COW_STATES = COPY_STATES
# this replaces calls with an unconstraining of the return register
CALLLESS = "CALLLESS"
# these enables independent constraint set optimizations. The first is a master toggle, and the second controls
# splitting constraint sets during simplification
COMPOSITE_SOLVER = "COMPOSITE_SOLVER"
ABSTRACT_SOLVER = "ABSTRACT_SOLVER"
# this stops SimRun for checking the satisfiability of successor states
LAZY_SOLVES = "LAZY_SOLVES"
# This makes angr downsize solvers wherever reasonable.
DOWNSIZE_Z3 = "DOWNSIZE_Z3"
# Turn-on superfastpath mode
SUPER_FASTPATH = "SUPER_FASTPATH"
# use FastMemory for memory
FAST_MEMORY = "FAST_MEMORY"
# use FastMemory for registers
FAST_REGISTERS = "FAST_REGISTERS"
# Under-constrained symbolic execution
UNDER_CONSTRAINED_SYMEXEC = "UNDER_CONSTRAINED_SYMEXEC"
# enable unicorn engine
UNICORN = "UNICORN"
UNICORN_ZEROPAGE_GUARD = "UNICORN_ZEROPAGE_GUARD"
UNICORN_SYM_REGS_SUPPORT = "UNICORN_SYM_REGS_SUPPORT"
UNICORN_TRACK_BBL_ADDRS = "UNICORN_TRACK_BBL_ADDRS"
UNICORN_TRACK_STACK_POINTERS = "UNICORN_TRACK_STACK_POINTERS"
# concretize symbolic data when we see it "too often"
UNICORN_THRESHOLD_CONCRETIZATION = "UNICORN_THRESHOLD_CONCRETIZATION"
# aggressively concretize symbolic data when we see it in unicorn
UNICORN_AGGRESSIVE_CONCRETIZATION = "UNICORN_AGGRESSIVE_CONCRETIZATION"
UNICORN_HANDLE_TRANSMIT_SYSCALL = "UNICORN_HANDLE_TRANSMIT_SYSCALL"
# floating point support
SUPPORT_FLOATING_POINT = "SUPPORT_FLOATING_POINT"
# Turn on memory region mapping logging
REGION_MAPPING = 'REGION_MAPPING'
# Resilience options
BYPASS_UNSUPPORTED_IROP = "BYPASS_UNSUPPORTED_IROP"
BYPASS_ERRORED_IROP = "BYPASS_ERRORED_IROP"
BYPASS_UNSUPPORTED_IREXPR = "BYPASS_UNSUPPORTED_IREXPR"
BYPASS_UNSUPPORTED_IRSTMT = "BYPASS_UNSUPPORTED_IRSTMT"
BYPASS_UNSUPPORTED_IRDIRTY = "BYPASS_UNSUPPORTED_IRDIRTY"
BYPASS_UNSUPPORTED_IRCCALL = "BYPASS_UNSUPPORTED_IRCCALL"
BYPASS_ERRORED_IRCCALL = "BYPASS_ERRORED_IRCCALL"
BYPASS_UNSUPPORTED_SYSCALL = "BYPASS_UNSUPPORTED_SYSCALL"
BYPASS_ERRORED_IRSTMT = "BYPASS_ERRORED_IRSTMT"
UNSUPPORTED_BYPASS_ZERO_DEFAULT = "UNSUPPORTED_BYPASS_ZERO_DEFAULT"
UNINITIALIZED_ACCESS_AWARENESS = 'UNINITIALIZED_ACCESS_AWARENESS'
BEST_EFFORT_MEMORY_STORING = 'BEST_EFFORT_MEMORY_STORING'
# Approximation options (to optimize symbolic execution)
APPROXIMATE_GUARDS = "APPROXIMATE_GUARDS"
APPROXIMATE_SATISFIABILITY = "APPROXIMATE_SATISFIABILITY" # does GUARDS and the rest of the constraints
APPROXIMATE_MEMORY_SIZES = "APPROXIMATE_MEMORY_SIZES"
APPROXIMATE_MEMORY_INDICES = "APPROXIMATE_MEMORY_INDICES"
VALIDATE_APPROXIMATIONS = "VALIDATE_APPROXIMATIONS"
# use an experimental replacement solver
REPLACEMENT_SOLVER = "REPLACEMENT_SOLVER"
# use a cache-less solver in claripy
CACHELESS_SOLVER = "CACHELESS_SOLVER"
# IR optimization
OPTIMIZE_IR = "OPTIMIZE_IR"
SPECIAL_MEMORY_FILL = "SPECIAL_MEMORY_FILL"
# using this option the value inside the register ip is kept symbolic
KEEP_IP_SYMBOLIC = "KEEP_IP_SYMBOLIC"
# Do not try to concretize a symbolic IP. With this option, all states with symbolic IPs will be seen as unconstrained.
NO_IP_CONCRETIZATION = "NO_IP_CONCRETIZATION"
# Do not union values from different locations when reading from the memory for a reduced loss in precision
# It is only applied to SimAbstractMemory
KEEP_MEMORY_READS_DISCRETE = "KEEP_MEMORY_READS_DISCRETE"
# Raise a SimSegfaultError on illegal memory accesses
STRICT_PAGE_ACCESS = "STRICT_PAGE_ACCESS"
# Raise a SimSegfaultError when executing nonexecutable memory
ENABLE_NX = "ENABLE_NX"
# Ask the SimOS to handle segfaults
EXCEPTION_HANDLING = "EXCEPTION_HANDLING"
# Use system timestamps in simprocedures instead of returning symbolic values
USE_SYSTEM_TIMES = "USE_SYSTEM_TIMES"
# Track variables in state.solver.all_variables
TRACK_SOLVER_VARIABLES = "TRACK_SOLVER_VARIABLES"
# Efficient state merging requires potential state ancestors being kept in memory
EFFICIENT_STATE_MERGING = "EFFICIENT_STATE_MERGING"
# Return 0 any unspecified bytes in memory/registers
ZERO_FILL_UNCONSTRAINED_MEMORY = 'ZERO_FILL_UNCONSTRAINED_MEMORY'
ZERO_FILL_UNCONSTRAINED_REGISTERS = 'ZERO_FILL_UNCONSTRAINED_REGISTERS'
INITIALIZE_ZERO_REGISTERS = ZERO_FILL_UNCONSTRAINED_REGISTERS
# Return a new symbolic variable for any unspecified bytes in memory/registers. If neither these nor the above options
# are specified, a warning will be issued and an unconstrained symbolic variable will be generated
SYMBOL_FILL_UNCONSTRAINED_MEMORY = 'SYMBOL_FILL_UNCONSTRAINED_MEMORY'
SYMBOL_FILL_UNCONSTRAINED_REGISTERS = 'SYMBOL_FILL_UNCONSTRAINED_REGISTERS'
# Attempt to support wacky ops not found in libvex
EXTENDED_IROP_SUPPORT = 'EXTENDED_IROP_SUPPORT'
# For each division operation, produce a successor state with the constraint that the divisor is zero
PRODUCE_ZERODIV_SUCCESSORS = 'PRODUCE_ZERODIV_SUCCESSORS'
SYNC_CLE_BACKEND_CONCRETE = 'SYNC_CLE_BACKEND_CONCRETE'
# Allow POSIX API send() to fail
ALLOW_SEND_FAILURES = 'ALLOW_SEND_FAILURES'
# Use hybrid solver
HYBRID_SOLVER = 'HYBRID_SOLVER'
# This tells the hybrid solver to use the approximate backend first. The exact backend will be used
# only if the number of possible approximate solutions is greater than what was request by user.
# Note, that this option will only take effect if a hybrid solver used.
APPROXIMATE_FIRST = 'APPROXIMATE_FIRST'
# Disable optimizations based on symbolic memory bytes being single-valued in SimSymbolicMemory. This is useful in
# tracing mode since such optimizations are unreliable since preconstraints will be removed after tracing is done.
SYMBOLIC_MEMORY_NO_SINGLEVALUE_OPTIMIZATIONS = 'SYMBOLIC_MEMORY_NO_SINGLEVALUE_OPTIMIZATIONS'
# When SimMemory.find() is called, apply a strict size-limit condition check. This is mainly used in tracing mode. When
# this option is enabled, constraint replacement and solves will kick in when testing the byte-equivalence constraints
# built in SimMemory._find(), and less cases will be satisfiable. In tracing mode, this means the character to find will
# have to show up at the exact location as specified in the concrete input. When this option is disabled, constraint
# replacement and solves will not be triggered when testing byte-equivalence constraints, which in tracing mode, will
# allow some flexibility regarding the position of character to find at the cost of larger constraints built in
# SimMemory._find() and more time and memory consumption.
MEMORY_FIND_STRICT_SIZE_LIMIT = 'MEMORY_FIND_STRICT_SIZE_LIMIT'
#
# CGC specific state options
#
CGC_ZERO_FILL_UNCONSTRAINED_MEMORY = ZERO_FILL_UNCONSTRAINED_MEMORY
# Make sure the receive syscall always read as many bytes as the program wants
CGC_NO_SYMBOLIC_RECEIVE_LENGTH = 'CGC_NO_SYMBOLIC_RECEIVE_LENGTH'
BYPASS_VERITESTING_EXCEPTIONS = 'BYPASS_VERITESTING_EXCEPTIONS'
# Make sure filedescriptors on transmit and receive are always 1 and 0
CGC_ENFORCE_FD = 'CGC_ENFORCE_FD'
# FDWAIT will always return FDs as non blocking
CGC_NON_BLOCKING_FDS = 'CGC_NON_BLOCKING_FDS'
# Allows memory breakpoints to get more accurate sizes in case of reading large chunks
# Sacrafice performance for more fine tune memory read size
MEMORY_CHUNK_INDIVIDUAL_READS = "MEMORY_CHUNK_INDIVIDUAL_READS"
# Synchronize memory mapping reported by angr with the concrete process.
SYMBION_SYNC_CLE = "SYMBION_SYNC_CLE"
# Removes stubs SimProc on synchronization with concrete process.
# We will execute SimProc for functions for which we have one, and the real function for the one we have not.
SYMBION_KEEP_STUBS_ON_SYNC = "SYMBION_KEEP_STUBS_ON_SYNC"
#
# Register those variables as Boolean state options
#
_g = globals().copy()
for k, v in _g.items():
if all([ char in string.ascii_uppercase + "_" + string.digits for char in k ]) and type(v) is str:
if k in ("UNKNOWN_FILES_HAVE_EOF", "CGC_ZERO_FILL_UNCONSTRAINED_MEMORY", "COW_STATES", "INITIALIZE_ZERO_REGISTERS"):
# UNKNOWN_FILES_HAVE_EOF == FILES_HAVE_EOF
# CGC_ZERO_FILL_UNCONSTRAINED_MEMORY == ZERO_FILL_UNCONSTRAINED_MEMORY
# INITIALIZE_ZERO_REGISTERS == ZERO_FILL_UNCONSTRAINED_REGISTERS
continue
SimStateOptions.register_bool_option(v)
# useful sets of options
resilience = { BYPASS_UNSUPPORTED_IROP, BYPASS_UNSUPPORTED_IREXPR, BYPASS_UNSUPPORTED_IRSTMT, BYPASS_UNSUPPORTED_IRDIRTY, BYPASS_UNSUPPORTED_IRCCALL, BYPASS_ERRORED_IRCCALL, BYPASS_UNSUPPORTED_SYSCALL, BYPASS_ERRORED_IROP, BYPASS_VERITESTING_EXCEPTIONS, BYPASS_ERRORED_IRSTMT }
resilience_options = resilience # alternate name?
refs = { TRACK_REGISTER_ACTIONS, TRACK_MEMORY_ACTIONS, TRACK_TMP_ACTIONS, TRACK_JMP_ACTIONS, ACTION_DEPS, TRACK_CONSTRAINT_ACTIONS }
approximation = { APPROXIMATE_SATISFIABILITY, APPROXIMATE_MEMORY_SIZES, APPROXIMATE_MEMORY_INDICES }
symbolic = { DO_CCALLS, SYMBOLIC, TRACK_CONSTRAINTS, SYMBOLIC_INITIAL_VALUES, COMPOSITE_SOLVER }
simplification = { SIMPLIFY_MEMORY_WRITES, SIMPLIFY_REGISTER_WRITES }
common_options = { COW_STATES, OPTIMIZE_IR, TRACK_MEMORY_MAPPING, SUPPORT_FLOATING_POINT, EXTENDED_IROP_SUPPORT, ALL_FILES_EXIST, FILES_HAVE_EOF } | simplification
unicorn = { UNICORN, UNICORN_SYM_REGS_SUPPORT, ZERO_FILL_UNCONSTRAINED_REGISTERS, UNICORN_HANDLE_TRANSMIT_SYSCALL, UNICORN_TRACK_BBL_ADDRS, UNICORN_TRACK_STACK_POINTERS }
concrete = { SYNC_CLE_BACKEND_CONCRETE }
modes = {
'symbolic': common_options | symbolic | { TRACK_CONSTRAINT_ACTIONS }, #| approximation | { VALIDATE_APPROXIMATIONS }
'symbolic_approximating': common_options | symbolic | approximation | { TRACK_CONSTRAINT_ACTIONS },
'static': (common_options - simplification) | { REGION_MAPPING, BEST_EFFORT_MEMORY_STORING, SYMBOLIC_INITIAL_VALUES, DO_CCALLS, DO_RET_EMULATION, TRUE_RET_EMULATION_GUARD, BLOCK_SCOPE_CONSTRAINTS, TRACK_CONSTRAINTS, ABSTRACT_MEMORY, ABSTRACT_SOLVER, USE_SIMPLIFIED_CCALLS, REVERSE_MEMORY_NAME_MAP },
'fastpath': (common_options - simplification ) | (symbolic - { SYMBOLIC, DO_CCALLS }) | resilience | { TRACK_OP_ACTIONS, BEST_EFFORT_MEMORY_STORING, AVOID_MULTIVALUED_READS, AVOID_MULTIVALUED_WRITES, SYMBOLIC_INITIAL_VALUES, DO_RET_EMULATION, NO_SYMBOLIC_JUMP_RESOLUTION, NO_SYMBOLIC_SYSCALL_RESOLUTION, FAST_REGISTERS },
'tracing': (common_options - simplification - {SUPPORT_FLOATING_POINT, ALL_FILES_EXIST}) | symbolic | resilience | (unicorn - { UNICORN_TRACK_STACK_POINTERS }) | { CGC_NO_SYMBOLIC_RECEIVE_LENGTH, REPLACEMENT_SOLVER, EXCEPTION_HANDLING, ZERO_FILL_UNCONSTRAINED_MEMORY, PRODUCE_ZERODIV_SUCCESSORS, ALLOW_SEND_FAILURES, SYMBOLIC_MEMORY_NO_SINGLEVALUE_OPTIMIZATIONS, MEMORY_FIND_STRICT_SIZE_LIMIT },
}
|
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import pytest
from itertools import chain, product
import os.path
import uuid
import numpy as np
from filestore.handlers_base import HandlerBase
def _verify_shifted_resource(last_res, new_res):
'''Check that resources are identical except for root/rpath'''
for k in set(chain(new_res, last_res)):
if k not in ('root', 'resource_path'):
assert new_res[k] == last_res[k]
else:
assert new_res[k] != last_res[k]
n_fp = os.path.join(new_res['root'],
new_res['resource_path'])
l_fp = os.path.join(last_res['root'],
last_res['resource_path']).rstrip('/')
assert n_fp == l_fp
def num_paths(start, stop):
return os.path.join(*(str(_)
for _ in range(start, stop)))
@pytest.mark.parametrize("step,sign", product([1, 3, 5, 7], [1, -1]))
def test_root_shift(fs_v1, step, sign):
fs = fs_v1
n_paths = 15
if sign > 0:
root = ''
rpath = '/' + num_paths(0, n_paths)
elif sign < 0:
root = '/' + num_paths(0, n_paths)
rpath = ''
last_res = fs.insert_resource('root-test',
rpath,
{'a': 'fizz', 'b': 5},
root=root)
for n, j in enumerate(range(step, n_paths, step)):
new_res, log = fs.shift_root(last_res, sign * step)
assert last_res == log['old']
if sign > 0:
left_count = j
elif sign < 0:
left_count = n_paths - j
assert new_res['root'] == '/' + num_paths(0, left_count)
assert new_res['resource_path'] == num_paths(left_count, n_paths)
_verify_shifted_resource(last_res, new_res)
last_res = new_res
@pytest.mark.parametrize("root", ['', '///', None])
def test_pathological_root(fs_v1, root):
fs = fs_v1
rpath = '/foo/bar/baz'
last_res = fs.insert_resource('root-test',
rpath,
{'a': 'fizz', 'b': 5},
root=root)
new_res, _ = fs.shift_root(last_res, 2)
assert new_res['root'] == '/foo/bar'
assert new_res['resource_path'] == 'baz'
def test_history(fs_v1):
fs = fs_v1
rpath = num_paths(0, 15)
root = '/'
shift_count = 5
last_res = fs.insert_resource('root-test',
rpath,
{'a': 'fizz', 'b': 5},
root=root)
for j in range(shift_count):
new_res, log = fs.shift_root(last_res, 1)
last_time = 0
cnt = 0
for doc in fs.get_history(last_res['uid']):
assert doc['time'] > last_time
assert doc['cmd'] == 'shift_root'
assert doc['cmd_kwargs'] == {'shift': 1}
assert doc['old'] == last_res
last_res = doc['new']
last_time = doc['time']
cnt += 1
assert cnt == shift_count
@pytest.mark.parametrize('shift', [-5, 5])
def test_over_step(fs_v1, shift):
fs = fs_v1
last_res = fs.insert_resource('root-test',
'a/b',
{'a': 'fizz', 'b': 5},
root='/c')
with pytest.raises(RuntimeError):
fs.shift_root(last_res, shift)
class FileMoveTestingHandler(HandlerBase):
specs = {'npy_series'} | HandlerBase.specs
def __init__(self, fpath, fmt):
self.fpath = fpath
self.fmt = fmt
def __call__(self, point_number):
fname = os.path.join(self.fpath,
self.fmt.format(point_number=point_number))
return np.load(fname)
def get_file_list(self, datumkw_gen):
return [os.path.join(self.fpath,
self.fmt.format(**dkw))
for dkw in datumkw_gen]
@pytest.fixture()
def moving_files(request, fs_v1, tmpdir):
tmpdir = str(tmpdir)
cnt = 15
shape = (7, 13)
local_path = '2016/04/28/aardvark'
fmt = 'cub_{point_number:05}.npy'
res = fs_v1.insert_resource('npy_series',
local_path,
{'fmt': fmt},
root=tmpdir)
datum_uids = []
fnames = []
os.makedirs(os.path.join(tmpdir, local_path))
for j in range(cnt):
fpath = os.path.join(tmpdir, local_path,
fmt.format(point_number=j))
np.save(fpath, np.ones(shape) * j)
d = fs_v1.insert_datum(res, str(uuid.uuid4()),
{'point_number': j})
datum_uids.append(d['datum_id'])
fnames.append(fpath)
return fs_v1, res, datum_uids, shape, cnt, fnames
@pytest.mark.parametrize("remove", [True, False])
def test_moving(moving_files, remove):
fs, res, datum_uids, shape, cnt, fnames = moving_files
fs.register_handler('npy_series', FileMoveTestingHandler)
# sanity check on the way in
for j, d_id in enumerate(datum_uids):
datum = fs.retrieve(d_id)
assert np.prod(shape) * j == np.sum(datum)
old_root = res['root']
new_root = os.path.join(old_root, 'archive')
for f in fnames:
assert os.path.exists(f)
res2, log = fs.change_root(res, new_root, remove_origin=remove)
print(res2['root'])
for f in fnames:
if old_root:
assert os.path.exists(f.replace(old_root, new_root))
else:
assert os.path.exists(os.path.join(new_root, f[1:]))
if remove:
assert not os.path.exists(f)
else:
assert os.path.exists(f)
# sanity check on the way out
for j, d_id in enumerate(datum_uids):
datum = fs.retrieve(d_id)
assert np.prod(shape) * j == np.sum(datum)
def test_no_root(fs_v1, tmpdir):
fs = fs_v1
fs.register_handler('npy_series', FileMoveTestingHandler)
local_path = 'aardvark'
fmt = 'cub_{point_number:05}.npy'
res = fs.insert_resource('npy_series',
os.path.join(str(tmpdir),
local_path),
{'fmt': fmt})
fs_v1.change_root(res, '/foobar')
def test_get_resource(moving_files):
fs, res, datum_uids, shape, cnt, fnames = moving_files
for d in datum_uids:
d_res = fs.resource_given_eid(d)
assert d_res == res
print(d_res, res)
def test_temporary_root(fs_v1):
fs = fs_v1
print(fs._db)
fs.set_root_map({'bar': 'baz'})
print(fs.root_map)
print(fs._handler_cache)
res = fs.insert_resource('root-test', 'foo', {}, root='bar')
dm = fs.insert_datum(res, str(uuid.uuid4()), {})
if fs.version == 1:
assert res['root'] == 'bar'
def local_handler(rpath):
return lambda: rpath
with fs.handler_context({'root-test': local_handler}) as fs:
print(fs._handler_cache)
assert not len(fs._handler_cache)
path = fs.retrieve(dm['datum_id'])
assert path == os.path.join('baz', 'foo')
|
|
# -*- coding: utf-8 -*-
import shutil
import tempfile
from contextlib import contextmanager
from unittest import TestCase, skip
from damnode import Damnode
from os import path as osp
class LinkTest(TestCase):
def test_read_remote_links(self):
d = create_damnode()
entries = d.read_links('https://nodejs.org/dist/latest-v5.x/')
self.assertTrue('https://nodejs.org/dist/latest-v5.x/node-v5.12.0-linux-arm64.tar.gz' in entries, entries)
def test_read_dir_links(self):
d = create_damnode()
entries = d.read_links(data_dir('local-index'))
expected = [
data_dir('local-index/node-v8.1.2-linux-arm64.tar.gz'),
data_dir('local-index/old.html'),
data_dir('local-index/v7.10.0'),
]
self.assertEqual(expected, entries)
def test_read_local_links(self):
d = create_damnode()
entries = d.read_links(data_dir('local-index/old.html'))
expected = [
data_dir('local-index/v3'),
"https://nodejs.org/dist/latest-v4.x",
"https://nodejs.org/dist/latest-v5.x",
"https://nodejs.org/dist/latest-v6.x",
data_dir('local-index/v7.10.0'),
]
self.assertEqual(expected, entries)
def test_read_links_package(self):
d = create_damnode()
self.assertEqual(['node-v6.xz'], d.read_links('node-v6.xz'))
# TODO: thorough test
def test_find_package(self):
d = TestDamnode()
link = d.find_package(data_dir('find-package-index'), (7, 10, 1))
exp_link = data_dir('find-package-index/v7.10.1/node-v7.10.1-linux-x64.tar.gz')
self.assertEqual(exp_link, link)
def test_find_latest_package(self):
d = TestDamnode()
link = d.find_package(data_dir('find-package-index'), None)
exp_link = data_dir('find-package-index/v8.2.1/node-v8.2.1-linux-x64.tar.gz')
self.assertEqual(exp_link, link)
def test_find_remote_package(self):
d = TestDamnode()
link = d.find_package(TestDamnode.default_index, (7, 10, 1))
self.assertIsNotNone(link)
self.assertRegexpMatches(link, r'node-v7\.10\.1-linux-x64\.tar\.gz$')
class NameTest(TestCase):
def test_has_package_suffix(self):
d = Damnode()
self.assertTrue(d.has_package_suffix('file.tar.gz'))
self.assertTrue(d.has_package_suffix('file.zip'))
def test_is_url(self):
d = Damnode()
self.assertTrue(d.is_url('http://localhost'))
self.assertTrue(d.is_url('https://localhost'))
self.assertTrue(d.is_url('file://localhost'))
self.assertFalse(d.is_url('~/Download'))
def test_parse_package_name(self):
d = Damnode()
self.assertRaisesRegexp(
ValueError,
r"Invalid package name 'node.*', suffix must be one of \[",
d.parse_package_name, 'node-v8.1.2-win-x64.superzip')
self.assertEqual(((8, 1, 2), 'linux', 'x64', 'tar.gz'),
d.parse_package_name('node-v8.1.2-linux-x64.tar.gz'))
self.assertRaisesRegexp(
ValueError,
r"Invalid package name 'foobar.*', it does not match regex \^node-",
d.parse_package_name, 'foobar-v8.1.2-darwin-x64.tar.gz')
def test_parse_version(self):
d = Damnode()
self.assertEqual((4, None, None), d.parse_version('v4'))
self.assertEqual((5, 12, None), d.parse_version('5.12'))
self.assertEqual((6, 11, 0), d.parse_version('v6.11.0'))
self.assertRaisesRegexp(
ValueError,
r"Invalid version '6.11.0.0', it does not match regex ",
d.parse_version, '6.11.0.0')
self.assertRaises(ValueError, d.parse_version, '6.11.0.0')
self.assertRaises(ValueError, d.parse_version, 'node-v6.11.0')
def test_get_system(self):
d = Damnode()
test = lambda a, b: self.assertEqual(d._get_system(a), b)
test('AIX', 'aix')
test('Darwin', 'darwin')
test('Linux', 'linux')
test('Solaris', 'sunos')
test('Windows', 'win')
def test_get_compatible_arch(self):
d = Damnode()
test = lambda m, p, a: self.assertEqual(d._get_compatible_arch(m, p), a)
# https://en.wikipedia.org/wiki/Uname
test('armv7l', '', 'armv7l')
test('armv6l', '', 'armv6l')
test('i686', '', 'x86')
test('i686', 'i686', 'x86')
test('x86_64', '', 'x64')
test('x86_64', 'x86_64', 'x64')
test('i686-AT386', '', 'x86')
test('amd64', '', 'x64')
test('amd64', 'amd64', 'x64')
test('x86', 'Intel_x86_Family6_Model28_Stepping10', 'x86')
test('i686-64', 'x64', 'x64')
# PowerPC
# - https://en.wikipedia.org/wiki/Uname
# - https://github.com/ansible/ansible/pull/2311
test('ppc', '', 'ppc64') # no ppc packages
test('ppc64', 'ppc64', 'ppc64')
test('ppc64le', 'ppc64le', 'ppc64le')
test('00C57D4D4C00', 'powerpc', 'ppc64')
test('Power Macintosh', 'powerpc', 'ppc64')
# https://stackoverflow.com/questions/31851611/differences-between-arm64-and-aarch64
test('arm64', '', 'arm64')
test('aarch64', '', 'arm64')
# https://en.wikipedia.org/wiki/Linux_on_z_Systems
test('s390', 's390', 's390x') # no s390 packages
test('s390x', 's390x', 's390x')
# Unsupported
test('sparc64', 'sparc64', 'sparc64')
class DownloadTest(TestCase):
def test_download_local_package(self):
d = create_damnode()
with d.download_package(data_dir('local-index/node-v8.1.2-linux-arm64.tar.gz')) as filename:
self.assertEqual(data_dir('cache/node-v8.1.2-linux-arm64.tar.gz'), filename)
def test_download_remote_package(self):
d = create_damnode()
url = 'https://nodejs.org/dist/latest-v6.x/node-v6.11.0-win-x64.zip'
with d.download_package(url) as filename:
self.assertEqual(osp.join(d.cache_dir, 'node-v6.11.0-win-x64.zip'), filename)
mtime = osp.getmtime(filename)
with d.download_package(url) as filename:
self.assertEqual(mtime, osp.getmtime(filename))
def test_download_none_package(self):
d = create_damnode()
try:
with d.download_package('https://nodejs.org/dist/not-node.zip') as filename:
pass
except ValueError as e:
pass
else:
self.fail('Exception not raised')
def test_download_cached_package(self):
d = create_damnode()
url = 'https://nodejs.org/dist/latest-v6.x/node-v6.11.0-darwin-x64.tar.gz'
cached_file = osp.join(d.cache_dir, 'node-v6.11.0-darwin-x64.tar.gz')
with d.download_package(url) as filename:
self.assertEqual(cached_file, filename)
mtime = int(osp.getmtime(filename)) # shutil.copystat() is not perfect
d.cache_dir = data_dir('cache2')
if osp.exists(d.cache_dir):
shutil.rmtree(d.cache_dir)
with d.download_package(cached_file) as filename:
self.assertEqual(data_dir('cache2/node-v6.11.0-darwin-x64.tar.gz'), filename)
self.assertEqual(mtime, int(osp.getmtime(filename)))
def test_download_package_no_cache(self):
d = create_damnode()
d.enable_cache = False
unused_cache_dir = data_dir('cache3')
d.cache_dir = unused_cache_dir
with d.download_package(data_dir('local-index/node-v8.1.2-linux-arm64.tar.gz')) as filename:
self.assertNotEqual(unused_cache_dir, d.cache_dir)
self.assertTrue(filename.startswith(d.cache_dir))
self.assertEqual(unused_cache_dir, d.cache_dir)
self.assertFalse(osp.exists(unused_cache_dir))
class InstallTest(TestCase):
def test_install_wrong_system(self):
with temp_dir() as prefix:
url = 'https://nodejs.org/dist/latest-v8.x/node-v8.1.2-aix-ppc64.tar.gz'
self.assertRaisesRegexp(
ValueError,
r"Package '.*/node-v8.1.2-aix-ppc64.tar.gz' is for aix-ppc64, not for current .*",
self.download_install, url, prefix, True)
def test_install_tgz(self):
with temp_dir() as prefix:
url = 'https://nodejs.org/dist/v8.1.2/node-v8.1.2-linux-x64.tar.gz'
self.download_install(url, prefix)
self.assertFalse(osp.exists(osp.join(prefix, 'CHANGELOG.md')))
self.assertFalse(osp.exists(osp.join(prefix, 'LICENSE')))
self.assertFalse(osp.exists(osp.join(prefix, 'README.md')))
self.assertTrue(osp.isfile(osp.join(prefix, 'bin/node')))
def test_install_win_zip(self):
with temp_dir() as prefix:
url = 'https://nodejs.org/dist/v8.1.2/node-v8.1.2-win-x64.zip'
self.download_install(url, prefix)
self.assertFalse(osp.exists(osp.join(prefix, 'README.md')))
self.assertTrue(osp.isdir(osp.join(prefix, 'node_modules')))
self.assertTrue(osp.isfile(osp.join(prefix, 'node.exe')))
def download_install(self, url, prefix, check_sys_arch=False):
d = Damnode()
d.prefix = prefix
# d.verbose = True
d.check_sys_arch = check_sys_arch
d.download_install_package(url)
# TODO: test uninstall
class TestDamnode(Damnode):
system = 'linux'
architecture = 'x64'
def __init__(self):
super(TestDamnode, self).__init__()
self.cache_dir = data_dir('cache')
def data_dir(*path):
return osp.abspath(osp.join(osp.dirname(__file__), 'test.d', *path))
def create_damnode():
d = Damnode()
d.cache_dir = data_dir('cache')
d.verbose = True
return d
@contextmanager
def temp_dir(clean=True):
dirname = tempfile.mkdtemp()
try:
yield dirname
finally:
if clean:
shutil.rmtree(dirname)
|
|
import tools as ts
import waveletcodec.wave as wvt
import math
from numpy.numarray.numerictypes import Int
import numpy as np
import waveletcodec.entropy as tpy
class speck(object):
#wavelet object
wv = 0
#wavelet data
dt = 0
LIS = []
LSP = []
nextLIS = []
S = []
I = []
n = 0
output = []
i_size_partition = 0
bit_bucket = 0
log = []
debug = True
f_log = []
logger = None
_idx = 0
_logidx = 0
out_idx = []
def __init__(self):
pass
def compress(self, wavelet, bpp):
self.wv = wavelet
self.dt = wavelet.data
self.bit_bucket = bpp * self.wv.shape[0] * self.wv.shape[1]
self.initialization()
wise_bit = self.n
#sorting
try:
while self.n > 0:
print self.n
last_list = self.LIS.tail
last_pixel = self.LSP.tail
while self.LIS.index != last_list:
l = self.LIS.pop()
self.ProcessS(l)
self.ProcessI()
self.refinement(last_pixel)
self.n -= 1
except EOFError:
#print type(e)
#return [self.wv.cols, self.wv.rows, self.wv.level,
# wise_bit, self.output]
pass
#print "Elegant!!"
r = {}
r['colums'] = self.wv.cols
r['rows'] = self.wv.rows
r['level'] = self.wv.level
r['wisebit'] = self.wv.wise_bit
r['payload'] = self.output
return r
def compress_abac(self, wavelet, bpp):
self.wv = wavelet
self.dt = wavelet.data
self.bit_bucket = bpp * self.wv.rows * self.wv.cols
self.initialization()
wise_bit = self.n
#sorting
try:
while self.n > 0:
print self.n
last_list = self.LIS.tail
last_pixel = self.LSP.tail
while self.LIS.index != last_list:
l = self.LIS.pop()
self.ProcessS(l)
self.ProcessI()
self.refinement(last_pixel)
self.n -= 1
except EOFError as e:
print type(e)
return [self.wv.cols, self.wv.rows, self.wv.level,
wise_bit, self.output]
#print "Elegant!!"
return [self.wv.cols, self.wv.rows, self.wv.level,
wise_bit, self.output]
def initialization(self):
X = wvt.get_z_order(self.wv.rows * self.wv.cols)
self.LIS = ts.CircularStack(self.wv.cols * self.wv.rows)
self.nextLIS = ts.CircularStack(self.wv.cols * self.wv.rows)
self.LSP = ts.CircularStack(self.wv.cols * self.wv.rows)
s_size = (self.wv.rows * self.wv.cols / 2 ** (2 * self.wv.level))
self.S = X[:s_size]
del X[:s_size]
self.I = X
maxs = abs(self.wv.data)
self.n = int(math.log(maxs.max(), 2))
self.LIS.push(self.S)
self.i_partition_size = (self.wv.rows / 2 ** self.wv.level) ** 2
self.output = []
self.out_idx = 0
def S_n(self, S):
if len(S) == 0:
return False
T = np.array([i.tolist() for i in S])
return int((abs(self.dt[T[:, 0], T[:, 1]]).max() >= 2 ** self.n))
def ProcessS(self, S):
sn = self.S_n(S)
self.out(sn)
#self.writeLog("ProcessS", "Sn", "S", len(S), sn)
if sn == 1:
if len(S) == 1:
self.out(self.sign(S))
self.push(S)
else:
self.CodeS(S)
else:
self.LIS.push(S)
def CodeS(self, S):
O = self.splitList(S)
for o in O:
sn = self.S_n(o)
self.out(sn)
#self.writeLog("CodeS", "Sn", "S", len(S), sn)
if sn == 1:
if len(o) == 1:
self.out(self.sign(o))
self.push(o)
else:
self.CodeS(o)
else:
self.LIS.push(o)
pass
def ProcessI(self):
sn = self.S_n(self.I)
self.out(sn)
if sn == 1:
self.CodeI()
def CodeI(self):
part = self.splitList(self.I, self.i_partition_size)
self.i_partition_size = self.i_partition_size * 4
for i in range(3):
self.ProcessS(part[i])
self.I = part[3]
self.ProcessI()
def iInitialization(self, width, height, level, wise_bit):
self.wv = wvt.wavelet2D(np.zeros((width, height), dtype=Int), level)
self.dt = self.wv.data
self.wv.level = level
X = wvt.get_z_order(self.wv.rows * self.wv.cols)
self.LIS = ts.CircularStack(self.wv.cols * self.wv.rows)
self.nextLIS = ts.CircularStack(self.wv.cols * self.wv.rows)
self.LSP = ts.CircularStack(self.wv.cols * self.wv.rows)
s_size = (self.wv.rows * self.wv.cols / 2 ** (2 * self.wv.level))
self.S = X[:s_size]
del X[:s_size]
self.I = X
self.n = wise_bit
self.LIS.push(self.S)
self.i_partition_size = (self.wv.rows / 2 ** self.wv.level) ** 2
self._idx = 0
self._logidx = 0
def expand(self, stream, width, height, level, wise_bit):
self.iInitialization(width, height, level, wise_bit)
self.output = stream
#sorting
try:
while self.n > 0:
print self.n
last_list = self.LIS.tail
last_pixel = self.LSP.tail
while self.LIS.index != last_list:
l = self.LIS.pop()
self.iProcessS(l)
self.iProcessI()
self.iRefinement(last_pixel)
self.n -= 1
except EOFError as e:
print type(e)
return self.wv
#print "Elegant!!"
return self.wv
def iProcessS(self, S):
sn = self.read()
if sn == 1:
if len(S) == 1:
sg = self.read()
self.createCoeff(S[0], sg)
self.push(S)
else:
self.iCodeS(S)
else:
self.LIS.push(S)
def iCodeS(self, S):
O = self.splitList(S)
for o in O:
sn = self.read()
if sn == 1:
if len(o) == 1:
sg = self.read()
self.createCoeff(o[0], sg)
self.push(o)
else:
self.iCodeS(o)
else:
self.LIS.push(o)
pass
def iProcessI(self):
sn = self.read()
if sn == 1:
self.iCodeI()
def iCodeI(self):
part = self.splitList(self.I, self.i_partition_size)
self.i_partition_size = self.i_partition_size * 4
for i in range(3):
self.iProcessS(part[i])
self.I = part[3]
self.iProcessI()
def sign(self, S):
if self.dt[S[0][0], S[0][1]] >= 0:
return 0
else:
return 1
def splitList(self, l, size=0):
if size == 0:
if len(l) % 4 != 0:
raise IndexError
size = int(len(l) / 4)
return [l[i * size:(i + 1) * size] for i in (0, 1, 2, 3)]
else:
return [l[i * size:(i + 1) * size] for i in (0, 1, 2)] \
+ [l[size * 3:]]
def out(self, data):
if self.out_idx < self.bit_bucket:
self.output.append(data)
self.out_idx += 1
else:
raise EOFError
def read(self):
if self._idx < len(self.output):
self._idx += 1
return self.output[self._idx - 1]
else:
raise EOFError
def refinement(self, end):
c = self.LSP.index
while c != end:
i = self.LSP.data[c]
if self.dt[i[0], i[1]] > 0:
coeff = self.dt[i[0], i[1]]
else:
coeff = abs(self.dt[i[0], i[1]])
if (coeff & 2 ** self.n) > 0:
self.out(1)
else:
self.out(0)
c = (c + 1) % self.LSP.size
def iRefinement(self, end):
c = self.LSP.index
while c != end:
i = self.LSP.data[c]
if (self.read()) > 0:
if self.dt[i[0], i[1]] > 0:
self.dt[i[0], i[1]] |= 2 ** self.n
else:
self.dt[i[0], i[1]] = (abs(self.dt[i[0], i[1]])
| 2 ** self.n) * -1
c = (c + 1) % self.LSP.size
def push(self, data):
self.LSP.push(data[0])
def createCoeff(self, coords, sg, wv=None):
if wv is None:
self.dt[coords[0], coords[1]] = (2 ** self.n) * \
((sg * 2 - 1) * -1)
def writeLog(self, method, reason, obj, size, value):
if self.debug:
self.log += [method, reason, obj, size, value]
class fv_speck(speck):
def __init__(self):
pass
def compress(self, wavelet, bpp, lbpp, f_center, alpha, c, gamma):
self.Lbpp = bpp
self.lbpp = lbpp
self.alpha = alpha
self.wv = wavelet
self.dt = wavelet.data
self.P = f_center
self.c = c
self.gamma = gamma
self.calculate_fovea_length()
r = super(fv_speck, self).compress(self.wv, bpp)
r['Lbpp'] = bpp
r['lbpp'] = lbpp
r['alpha'] = alpha
r['center'] = f_center
r['c'] = c
r['gamma'] = gamma
return r
def expand(self, stream, width, height, level, wise_bit, bpp, lbpp,
f_center, alpha, c, gamma):
self.Lbpp = bpp
self.lbpp = lbpp
self.alpha = alpha
self.P = f_center
self.c = c
self.wv = wvt.wavelet2D(np.zeros((width, height), dtype=Int), level)
self.dt = self.wv.data
self.wv.level = level
self.gamma = gamma
self.calculate_fovea_length()
return super(fv_speck, self).expand(stream, width, height, level,
wise_bit)
def refinement(self, end):
print('iRefinement I' + str(len(self.I)) + ' ' + str(len(self.output)))
c = self.LSP.index
while c != end:
i = self.LSP.data[c]
fv = self.calculate_fovea_w(i)
if fv >= self.get_current_bpp():
if self.dt[i[0], i[1]] > 0:
coeff = self.dt[i[0], i[1]]
else:
coeff = abs(self.dt[i[0], i[1]])
if (coeff & 2 ** self.n) > 0:
self.out(1)
else:
self.out(0)
c = (c + 1) % self.LSP.size
def iRefinement(self, end):
c = self.LSP.index
while c != end:
i = self.LSP.data[c]
fv = self.calculate_fovea_w(i)
if fv >= (self.get_dec_bpp()):
if (self.read()) > 0:
if self.dt[i[0], i[1]] > 0:
self.dt[i[0], i[1]] |= 2 ** self.n
else:
self.dt[i[0], i[1]] = (abs(self.dt[i[0], i[1]]) |
2 ** self.n) * -1
c = (c + 1) % self.LSP.size
def calculate_fovea_w(self, ij):
try:
P = self.get_center(ij)
except NameError:
return self.Lbpp
d = self.norm(P[1] - ij[1], P[0] - ij[0]) * 2 ** P[2] / \
self.fovea_length
if d < self.alpha:
return self.Lbpp
elif d >= 1:
return self.lbpp
else:
return self.powerlaw(d) * (self.Lbpp - self.lbpp) + self.lbpp
def get_center(self, ij):
if (ij[0] == 0 and ij[1] == 0):
raise NameError("ij on HH")
else:
if ij[0] == 0:
aprx_level_r = self.wv.level + 1
else:
aprx_level_r = math.ceil(math.log(self.wv.rows /
float(ij[0]), 2))
if aprx_level_r > self.wv.level:
aprx_level_r = self.wv.level + 1
if ij[1] == 0:
aprx_level_c = self.wv.level + 1
else:
aprx_level_c = math.ceil(math.log(self.wv.rows /
float(ij[1]), 2))
if aprx_level_c > self.wv.level:
aprx_level_c = self.wv.level + 1
if (aprx_level_r > self.wv.level) and \
(aprx_level_c > self.wv.level):
# raise NameError("ij on HH")
y = float(self.P[0]) / 2 ** (aprx_level_r - 1)
x = float(self.P[1]) / 2 ** (aprx_level_r - 1)
return (y, x, aprx_level_r - 1)
if aprx_level_r <= aprx_level_c:
aprx_level = aprx_level_r
else:
aprx_level = aprx_level_c
y = float(self.P[0]) / 2 ** aprx_level
x = float(self.P[1]) / 2 ** aprx_level
if aprx_level_r == aprx_level:
y += float(self.wv.rows) / 2 ** aprx_level
if aprx_level_c == aprx_level:
x += float(self.wv.cols) / 2 ** aprx_level
return (y, x, aprx_level)
def calculate_fovea_length(self):
H = self.wv.rows
W = self.wv.cols
k = np.zeros(4)
k[0] = self.norm(self.P[0], H - self.P[1])
k[1] = self.norm(W - self.P[0], self.P[1])
k[2] = self.norm(W - self.P[0], H - self.P[1])
k[3] = self.norm(self.P[0], H - self.P[1])
self.fovea_length = k.max()
def printFoveaWindow(self):
window = np.zeros((self.wv.rows, self.wv.cols))
points = self.wv.get_z_order(self.wavelet.rows * self.wavelet.cols)
for i in points:
window[tuple(i)] = self.calculate_fovea_w(i)
return window
def get_current_bpp(self):
bpp = len(self.output)
bpp /= float(self.wv.rows * self.wv.cols)
return bpp
def get_dec_bpp(self):
bpp = self._idx
bpp /= float(self.wv.rows * self.wv.cols)
return bpp
def norm(self, x, y):
mx = abs(x)
if mx < abs(y):
mx = abs(y)
return mx # math.sqrt(float(x**2 + y ** 2))
def powerlaw(self, n):
return self.c * (1 - ((n - self.alpha) / (1 - self.alpha))) \
** self.gamma
class ar_speck(speck):
_cdc = None
def __init__(self):
pass
def compress(self, wavelet, bpp):
self._cdc = tpy.abac([0, 1])
self._cdc._initialize()
r = super(ar_speck, self).compress(wavelet, bpp)
r['abac'] = self._cdc.length()
return r
def out(self, data):
if self.out_idx < self.bit_bucket or \
self._cdc.length() < self.bit_bucket:
self.output.append(data)
self.out_idx += 1
else:
raise EOFError
class ar_fvspeck(fv_speck):
_cdc = None
def __init__(self):
pass
def compress(self, wavelet, bpp, lbpp, f_center, alpha, c, gamma):
self._cdc = tpy.abac([0, 1])
self._cdc._initialize()
r = super(ar_fvspeck, self).compress(
wavelet, bpp, lbpp, f_center, alpha, c, gamma)
r['abac'] = self._cdc.length()
return r
def out(self, data):
if self.out_idx < self.bit_bucket or \
self._cdc.length() < self.bit_bucket:
self.output.append(data)
self.out_idx += 1
else:
raise EOFError
|
|
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
class TestConnection(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud._http import Connection
return Connection
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor_defaults(self):
conn = self._make_one()
self.assertIsNone(conn.credentials)
def test_ctor_explicit(self):
import google.auth.credentials
credentials = mock.Mock(spec=google.auth.credentials.Scoped)
conn = self._make_one(credentials)
credentials.with_scopes.assert_called_once_with(conn.SCOPE)
self.assertIs(conn.credentials, credentials.with_scopes.return_value)
self.assertIsNone(conn._http)
def test_ctor_explicit_http(self):
http = object()
conn = self._make_one(http=http)
self.assertIsNone(conn.credentials)
self.assertIs(conn.http, http)
def test_ctor_credentials_wo_create_scoped(self):
credentials = object()
conn = self._make_one(credentials)
self.assertIs(conn.credentials, credentials)
self.assertIsNone(conn._http)
def test_http_w_existing(self):
conn = self._make_one()
conn._http = http = object()
self.assertIs(conn.http, http)
def test_http_wo_creds(self):
import httplib2
conn = self._make_one()
self.assertIsInstance(conn.http, httplib2.Http)
def test_http_w_creds(self):
import google.auth.credentials
import google_auth_httplib2
credentials = mock.Mock(spec=google.auth.credentials.Credentials)
conn = self._make_one(credentials)
self.assertIsInstance(conn.http, google_auth_httplib2.AuthorizedHttp)
self.assertIs(conn.http.credentials, credentials)
def test_user_agent_format(self):
from pkg_resources import get_distribution
expected_ua = 'gcloud-python/{0}'.format(
get_distribution('google-cloud-core').version)
conn = self._make_one()
self.assertEqual(conn.USER_AGENT, expected_ua)
class TestJSONConnection(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud._http import JSONConnection
return JSONConnection
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def _makeMockOne(self, *args, **kw):
class MockConnection(self._get_target_class()):
API_URL_TEMPLATE = '{api_base_url}/mock/{api_version}{path}'
API_BASE_URL = 'http://mock'
API_VERSION = 'vMOCK'
return MockConnection(*args, **kw)
def test_class_defaults(self):
klass = self._get_target_class()
self.assertIsNone(klass.API_URL_TEMPLATE)
self.assertIsNone(klass.API_BASE_URL)
self.assertIsNone(klass.API_VERSION)
def test_ctor_defaults(self):
conn = self._make_one()
self.assertIsNone(conn.credentials)
def test_ctor_explicit(self):
conn = self._make_one(mock.sentinel.credentials)
self.assertIs(conn.credentials, mock.sentinel.credentials)
def test_http_w_existing(self):
conn = self._make_one()
conn._http = http = object()
self.assertIs(conn.http, http)
def test_http_wo_creds(self):
import httplib2
conn = self._make_one()
self.assertIsInstance(conn.http, httplib2.Http)
def test_http_w_creds(self):
import google.auth.credentials
import google_auth_httplib2
credentials = mock.Mock(spec=google.auth.credentials.Credentials)
conn = self._make_one(credentials)
self.assertIsInstance(conn.http, google_auth_httplib2.AuthorizedHttp)
self.assertIs(conn.http.credentials, credentials)
def test_build_api_url_no_extra_query_params(self):
conn = self._makeMockOne()
# Intended to emulate self.mock_template
URI = '/'.join([
conn.API_BASE_URL,
'mock',
conn.API_VERSION,
'foo',
])
self.assertEqual(conn.build_api_url('/foo'), URI)
def test_build_api_url_w_extra_query_params(self):
from six.moves.urllib.parse import parse_qsl
from six.moves.urllib.parse import urlsplit
conn = self._makeMockOne()
uri = conn.build_api_url('/foo', {'bar': 'baz'})
scheme, netloc, path, qs, _ = urlsplit(uri)
self.assertEqual('%s://%s' % (scheme, netloc), conn.API_BASE_URL)
# Intended to emulate mock_template
PATH = '/'.join([
'',
'mock',
conn.API_VERSION,
'foo',
])
self.assertEqual(path, PATH)
parms = dict(parse_qsl(qs))
self.assertEqual(parms['bar'], 'baz')
def test__make_request_no_data_no_content_type_no_headers(self):
conn = self._make_one()
URI = 'http://example.com/test'
http = conn._http = _Http(
{'status': '200', 'content-type': 'text/plain'},
b'',
)
headers, content = conn._make_request('GET', URI)
self.assertEqual(headers['status'], '200')
self.assertEqual(headers['content-type'], 'text/plain')
self.assertEqual(content, b'')
self.assertEqual(http._called_with['method'], 'GET')
self.assertEqual(http._called_with['uri'], URI)
self.assertIsNone(http._called_with['body'])
expected_headers = {
'Accept-Encoding': 'gzip',
'Content-Length': '0',
'User-Agent': conn.USER_AGENT,
}
self.assertEqual(http._called_with['headers'], expected_headers)
def test__make_request_w_data_no_extra_headers(self):
conn = self._make_one()
URI = 'http://example.com/test'
http = conn._http = _Http(
{'status': '200', 'content-type': 'text/plain'},
b'',
)
conn._make_request('GET', URI, {}, 'application/json')
self.assertEqual(http._called_with['method'], 'GET')
self.assertEqual(http._called_with['uri'], URI)
self.assertEqual(http._called_with['body'], {})
expected_headers = {
'Accept-Encoding': 'gzip',
'Content-Length': '0',
'Content-Type': 'application/json',
'User-Agent': conn.USER_AGENT,
}
self.assertEqual(http._called_with['headers'], expected_headers)
def test__make_request_w_extra_headers(self):
conn = self._make_one()
URI = 'http://example.com/test'
http = conn._http = _Http(
{'status': '200', 'content-type': 'text/plain'},
b'',
)
conn._make_request('GET', URI, headers={'X-Foo': 'foo'})
self.assertEqual(http._called_with['method'], 'GET')
self.assertEqual(http._called_with['uri'], URI)
self.assertIsNone(http._called_with['body'])
expected_headers = {
'Accept-Encoding': 'gzip',
'Content-Length': '0',
'X-Foo': 'foo',
'User-Agent': conn.USER_AGENT,
}
self.assertEqual(http._called_with['headers'], expected_headers)
def test_api_request_defaults(self):
PATH = '/path/required'
conn = self._makeMockOne()
# Intended to emulate self.mock_template
URI = '/'.join([
conn.API_BASE_URL,
'mock',
'%s%s' % (conn.API_VERSION, PATH),
])
http = conn._http = _Http(
{'status': '200', 'content-type': 'application/json'},
b'{}',
)
self.assertEqual(conn.api_request('GET', PATH), {})
self.assertEqual(http._called_with['method'], 'GET')
self.assertEqual(http._called_with['uri'], URI)
self.assertIsNone(http._called_with['body'])
expected_headers = {
'Accept-Encoding': 'gzip',
'Content-Length': '0',
'User-Agent': conn.USER_AGENT,
}
self.assertEqual(http._called_with['headers'], expected_headers)
def test_api_request_w_non_json_response(self):
conn = self._makeMockOne()
conn._http = _Http(
{'status': '200', 'content-type': 'text/plain'},
b'CONTENT',
)
self.assertRaises(TypeError, conn.api_request, 'GET', '/')
def test_api_request_wo_json_expected(self):
conn = self._makeMockOne()
conn._http = _Http(
{'status': '200', 'content-type': 'text/plain'},
b'CONTENT',
)
self.assertEqual(conn.api_request('GET', '/', expect_json=False),
b'CONTENT')
def test_api_request_w_query_params(self):
from six.moves.urllib.parse import parse_qsl
from six.moves.urllib.parse import urlsplit
conn = self._makeMockOne()
http = conn._http = _Http(
{'status': '200', 'content-type': 'application/json'},
b'{}',
)
self.assertEqual(conn.api_request('GET', '/', {'foo': 'bar'}), {})
self.assertEqual(http._called_with['method'], 'GET')
uri = http._called_with['uri']
scheme, netloc, path, qs, _ = urlsplit(uri)
self.assertEqual('%s://%s' % (scheme, netloc), conn.API_BASE_URL)
# Intended to emulate self.mock_template
PATH = '/'.join([
'',
'mock',
conn.API_VERSION,
'',
])
self.assertEqual(path, PATH)
parms = dict(parse_qsl(qs))
self.assertEqual(parms['foo'], 'bar')
self.assertIsNone(http._called_with['body'])
expected_headers = {
'Accept-Encoding': 'gzip',
'Content-Length': '0',
'User-Agent': conn.USER_AGENT,
}
self.assertEqual(http._called_with['headers'], expected_headers)
def test_api_request_w_headers(self):
from six.moves.urllib.parse import urlsplit
conn = self._makeMockOne()
http = conn._http = _Http(
{'status': '200', 'content-type': 'application/json'},
b'{}',
)
self.assertEqual(
conn.api_request('GET', '/', headers={'X-Foo': 'bar'}), {})
self.assertEqual(http._called_with['method'], 'GET')
uri = http._called_with['uri']
scheme, netloc, path, qs, _ = urlsplit(uri)
self.assertEqual('%s://%s' % (scheme, netloc), conn.API_BASE_URL)
# Intended to emulate self.mock_template
PATH = '/'.join([
'',
'mock',
conn.API_VERSION,
'',
])
self.assertEqual(path, PATH)
self.assertEqual(qs, '')
self.assertIsNone(http._called_with['body'])
expected_headers = {
'Accept-Encoding': 'gzip',
'Content-Length': '0',
'User-Agent': conn.USER_AGENT,
'X-Foo': 'bar',
}
self.assertEqual(http._called_with['headers'], expected_headers)
def test_api_request_w_data(self):
import json
DATA = {'foo': 'bar'}
DATAJ = json.dumps(DATA)
conn = self._makeMockOne()
# Intended to emulate self.mock_template
URI = '/'.join([
conn.API_BASE_URL,
'mock',
conn.API_VERSION,
'',
])
http = conn._http = _Http(
{'status': '200', 'content-type': 'application/json'},
b'{}',
)
self.assertEqual(conn.api_request('POST', '/', data=DATA), {})
self.assertEqual(http._called_with['method'], 'POST')
self.assertEqual(http._called_with['uri'], URI)
self.assertEqual(http._called_with['body'], DATAJ)
expected_headers = {
'Accept-Encoding': 'gzip',
'Content-Length': str(len(DATAJ)),
'Content-Type': 'application/json',
'User-Agent': conn.USER_AGENT,
}
self.assertEqual(http._called_with['headers'], expected_headers)
def test_api_request_w_404(self):
from google.cloud.exceptions import NotFound
conn = self._makeMockOne()
conn._http = _Http(
{'status': '404', 'content-type': 'text/plain'},
b'{}'
)
self.assertRaises(NotFound, conn.api_request, 'GET', '/')
def test_api_request_w_500(self):
from google.cloud.exceptions import InternalServerError
conn = self._makeMockOne()
conn._http = _Http(
{'status': '500', 'content-type': 'text/plain'},
b'{}',
)
self.assertRaises(InternalServerError, conn.api_request, 'GET', '/')
def test_api_request_non_binary_response(self):
conn = self._makeMockOne()
http = conn._http = _Http(
{'status': '200', 'content-type': 'application/json'},
u'{}',
)
result = conn.api_request('GET', '/')
# Intended to emulate self.mock_template
URI = '/'.join([
conn.API_BASE_URL,
'mock',
conn.API_VERSION,
'',
])
self.assertEqual(result, {})
self.assertEqual(http._called_with['method'], 'GET')
self.assertEqual(http._called_with['uri'], URI)
self.assertIsNone(http._called_with['body'])
expected_headers = {
'Accept-Encoding': 'gzip',
'Content-Length': '0',
'User-Agent': conn.USER_AGENT,
}
self.assertEqual(http._called_with['headers'], expected_headers)
class _Http(object):
_called_with = None
def __init__(self, headers, content):
from httplib2 import Response
self._response = Response(headers)
self._content = content
def request(self, **kw):
self._called_with = kw
return self._response, self._content
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerBackendAddressPoolsOperations:
"""LoadBalancerBackendAddressPoolsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs: Any
) -> AsyncIterable["_models.LoadBalancerBackendAddressPoolListResult"]:
"""Gets all the load balancer backed address pools.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerBackendAddressPoolListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_05_01.models.LoadBalancerBackendAddressPoolListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancerBackendAddressPoolListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerBackendAddressPoolListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools'} # type: ignore
async def get(
self,
resource_group_name: str,
load_balancer_name: str,
backend_address_pool_name: str,
**kwargs: Any
) -> "_models.BackendAddressPool":
"""Gets load balancer backend address pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param backend_address_pool_name: The name of the backend address pool.
:type backend_address_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BackendAddressPool, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.BackendAddressPool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackendAddressPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'backendAddressPoolName': self._serialize.url("backend_address_pool_name", backend_address_pool_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BackendAddressPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools/{backendAddressPoolName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
load_balancer_name: str,
backend_address_pool_name: str,
parameters: "_models.BackendAddressPool",
**kwargs: Any
) -> "_models.BackendAddressPool":
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackendAddressPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'backendAddressPoolName': self._serialize.url("backend_address_pool_name", backend_address_pool_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'BackendAddressPool')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('BackendAddressPool', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('BackendAddressPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools/{backendAddressPoolName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
load_balancer_name: str,
backend_address_pool_name: str,
parameters: "_models.BackendAddressPool",
**kwargs: Any
) -> AsyncLROPoller["_models.BackendAddressPool"]:
"""Creates or updates a load balancer backend address pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param backend_address_pool_name: The name of the backend address pool.
:type backend_address_pool_name: str
:param parameters: Parameters supplied to the create or update load balancer backend address
pool operation.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.BackendAddressPool
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either BackendAddressPool or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_05_01.models.BackendAddressPool]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackendAddressPool"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
backend_address_pool_name=backend_address_pool_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('BackendAddressPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'backendAddressPoolName': self._serialize.url("backend_address_pool_name", backend_address_pool_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools/{backendAddressPoolName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
load_balancer_name: str,
backend_address_pool_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'backendAddressPoolName': self._serialize.url("backend_address_pool_name", backend_address_pool_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools/{backendAddressPoolName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
load_balancer_name: str,
backend_address_pool_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified load balancer backend address pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param backend_address_pool_name: The name of the backend address pool.
:type backend_address_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
backend_address_pool_name=backend_address_pool_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'backendAddressPoolName': self._serialize.url("backend_address_pool_name", backend_address_pool_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools/{backendAddressPoolName}'} # type: ignore
|
|
#! /usr/bin/python
import sys
import getopt
import time
import os.path
from subprocess import call
verbosity = int(0)
evaluateonly = False
dictionaryOutputFile = ''
corpusOutputFile = ''
evaluationOutputFile = ''
vocabularyInputFile = ''
countOutputDirectory = './counts'
order = 1
nrInputFiles = int(0)
nrTrainInstances = int(0)
dictionary = {}
dictionaryCounts = []
inputFiles = []
generateCountsFor = []
def printHelp():
print("-h print this help and exit")
print("-o, --order n order of the n-gram language model (currently only n=1 is implemented)")
print(" order can also be n, to only create sentence histories (each line contains n words)")
print("-m, --mfile f save the corpus file in f (in matrix market format)")
print("-d, --dfile f save the dictionary in f")
print("-e, --efile f save the dictionary as an evaluation file")
print("-E, --evaluateonly only save the evaluation file")
print("-V, --vocabulary f read the vocabulary from f. Each line contains one word")
print("-l, --files f read tokenised input files from f. Each file is processed into counts")
print("-c, --counts f read count files from SRILM from f")
print("-C, --countsdir d save the count files generated with -l in directory d (default=./counts)")
print("-v, --verbose n set the verbosity level (default=0)")
def countTrainInstances():
instances = 0
for inputFile in inputFiles:
instances += sum(1 for line in open(inputFile))
return instances
def createOpenDictionary():
instances = 0
for inputFile in inputFiles:
with open(inputFile, 'r') as f:
for line in f:
instances += 1
words = line.rstrip().split()
for word in words:
if not word in dictionary:
dictionary[word] = len(dictionary)+1
return instances
def readVocabularyAsDictionary():
with open(vocabularyInputFile, 'r') as f:
for line in f:
line = line.rstrip()
if not line in dictionary:
dictionary[line] = len(dictionary)+1
def condPrint(level,text):
if(level <= verbosity):
print(text)
def createCountsFor(tokFile):
(d, fn) = os.path.split(tokFile)
(f, e) = os.path.splitext(fn)
countOutputFile = "%s/%s.%s" % (countOutputDirectory, f, "count")
call(["ngram-count", "-text", tokFile, "-order", str(order), "-no-sos", "-no-eos", "-write", countOutputFile, "-gt1min", "0", "-gt2min", "0", "-gt3min", "0", "-gt4min", "0", "-gt5min", "0"])
inputFiles.append(countOutputFile)
def window(fseq, window_size=3):
for i in xrange(len(fseq) - window_size + 1):
yield fseq[i:i+window_size]
def createEvaluationFile():
with open(evaluationOutputFile, 'w') as eof:
for inputFile in generateCountsFor:
with open(inputFile, 'r') as f:
for line in f:
for seq in window(line.rstrip().split(), order):
eof.write("%s\n" % ' '.join(seq))
def createSentenceHistories():
with open(evaluationOutputFile, 'w') as eof:
for inputFile in generateCountsFor:
with open(inputFile, 'r') as f:
for line in f:
words = line.rstrip().split()
for i in range(len(words)):
eof.write("%s\n" % ' '.join(words[0:1+i]))
try:
opts, args = getopt.getopt(sys.argv[1:], 'ho:m:d:e:EV:l:c:C:v:', ['help', 'order=', 'mfile=', 'dfile=', 'efile=', 'evaluateonly', 'vocabulary=', 'files=', 'counts=', 'countsdir=', 'verbose=' ])
except getopt.GetoptError:
printHelp()
sys.exit(2)
for (opt, arg) in opts:
if opt == '-h':
printHelp()
sys.exit()
elif opt in ('-o', '--order'):
order = arg
elif opt in ('-m', '--mfile'):
corpusOutputFile = arg
elif opt in ('-d', '--dfile'):
dictionaryOutputFile = arg
elif opt in ('-e', '--efile'):
evaluationOutputFile = arg
elif opt in ('-E', '--evaluateonly'):
evaluateonly = True
elif opt in ('-V', '--vocabulary'):
vocabularyInputFile = arg
elif opt in ('-l', '--files'):
with open(arg, 'r') as f:
for line in f:
generateCountsFor.append(line.rstrip())
elif opt in ('-c', '--count'):
with open(arg, 'r') as f:
for line in f:
inputFiles.append(line.rstrip())
elif opt in ('-C', '--countdir'):
countOutputDirectory = arg
elif opt in ('-v', '--verbose'):
verbosity = int(arg)
### Evaluation output ###########################
if evaluationOutputFile:
condPrint(2, "> Processing evaluation output file")
if order == 'n':
condPrint(2, " - Creating sentence histories")
createSentenceHistories()
else:
condPrint(2, " - Creating evaluation file")
order = int(order)
createEvaluationFile()
condPrint(2, "< Done processing evaluation output file")
if evaluateonly:
condPrint(2, "-- Evaluate only mode enabled. Done")
sys.exit()
else:
condPrint(2, "-- Skipping evaluation output")
### Generate count files ########################
if isinstance(int(order), (int, long)):
print "isint"
else:
print "notint"
if len(generateCountsFor) and order.isdigit():
condPrint(2, " > Generating %d count files" % len(generateCountsFor))
countStart = time.time()
for line in generateCountsFor:
createCountsFor(line)
condPrint(5, " < Done generating count files in %f" % (time.time() - countStart))
### Output parameters ###########################
condPrint(2, "-- Order: %s" % order)
condPrint(2, "-- Dictionary file: %s" % dictionaryOutputFile)
condPrint(2, "-- Corpus file: %s" % corpusOutputFile)
condPrint(2, "-- Vocabulary file: %s" % vocabularyInputFile)
condPrint(2, "-- Counts directory: %s" % countOutputDirectory)
condPrint(2, "-- Number of input files: %d" % len(inputFiles))
condPrint(2, "-- Number of input files to process: %d" % len(generateCountsFor))
condPrint(2, "-- Verbosity level: %d" % verbosity)
### Vocabulary ##################################
condPrint(2, "-- Processing vocabulary")
vocabStart = time.time()
if vocabularyInputFile:
condPrint(2, "> Reading vocabulary")
readVocabularyAsDictionary()
nrTrainInstances = countTrainInstances()
else:
condPrint(2, "> Creating dictionary with open vocabulary")
dictStart = time.time()
nrTrainInstances = createOpenDictionary()
condPrint(4, " - Processed dictionary/vocabulary in %f seconds" % (time.time() - vocabStart))
condPrint(2, "< Processed dictionary/vocabulary with %d words" % len(dictionary))
###
dictionaryCounts = [0] * len(dictionary)
### Corpus output ###############################
if corpusOutputFile:
condPrint(2, "> Writing corpus to %s" % (corpusOutputFile))
cofStart = time.time()
cof = open(corpusOutputFile, 'w')
cof.write("%%MatrixMarket matrix coordinate real general\n")
cof.write("%============================================\n")
cof.write("% Generated for the files: \n")
for inputFile in inputFiles:
cof.write("% " + inputFile + "\n");
cof.write("%============================================\n")
cof.write("%d %d %d\n" % (len(inputFiles), len(dictionary), nrTrainInstances))
fileNumber = 1
for inputFile in inputFiles:
with open(inputFile, 'r') as f:
for line in f:
(word, value) = line.rstrip().split('\t')
if word in dictionary:
dictionaryCounts[dictionary[word]-1] += int(value)
cof.write("%d %d %d\n" % (fileNumber, dictionary[word], int(value)))
fileNumber += 1
cof.close()
condPrint(4, " - Done writing corpus in %f seconds" % (time.time() - cofStart))
condPrint(5, "< Done writing corpus")
### Dictionary output ##########################
if dictionaryOutputFile:
condPrint(2, "> Writing dictionary to %s" % (dictionaryOutputFile))
dofStart = time.time()
dof = open(dictionaryOutputFile, 'w')
for key in dictionary:
dof.write("%d\t%s\t%d\n" % (dictionary[key], key, dictionaryCounts[dictionary[key]-1]))
dof.close()
condPrint(4, " - Wrote dictionary in %f seconds" % (time.time() - dofStart))
condPrint(5, "< Done writing dictionary")
|
|
from jsonrpc import ServiceProxy
import sys
import string
import getpass
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "encryptwallet":
try:
pwd = getpass.getpass(prompt="Enter passphrase: ")
pwd2 = getpass.getpass(prompt="Repeat passphrase: ")
if pwd == pwd2:
access.encryptwallet(pwd)
print "\n---Wallet encrypted. Server stopping, restart to run with encrypted wallet---\n"
else:
print "\n---Passphrases do not match---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Polishcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Polishcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = getpass.getpass(prompt="Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = getpass.getpass(prompt="Enter old wallet passphrase: ")
pwd2 = getpass.getpass(prompt="Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
|
import numpy as np
from .experiments import run_experiments
from ..plots import colors
from .. import __version__
from . import models
from . import methods
from . import metrics
import sklearn
import io
import base64
import os
try:
import matplotlib.pyplot as pl
import matplotlib
except ImportError:
pass
metadata = {
# "runtime": {
# "title": "Runtime",
# "sort_order": 1
# },
# "local_accuracy": {
# "title": "Local Accuracy",
# "sort_order": 2
# },
# "consistency_guarantees": {
# "title": "Consistency Guarantees",
# "sort_order": 3
# },
# "keep_positive_mask": {
# "title": "Keep Positive (mask)",
# "xlabel": "Max fraction of features kept",
# "ylabel": "Mean model output",
# "sort_order": 4
# },
# "keep_negative_mask": {
# "title": "Keep Negative (mask)",
# "xlabel": "Max fraction of features kept",
# "ylabel": "Negative mean model output",
# "sort_order": 5
# },
# "keep_absolute_mask__r2": {
# "title": "Keep Absolute (mask)",
# "xlabel": "Max fraction of features kept",
# "ylabel": "R^2",
# "sort_order": 6
# },
# "keep_absolute_mask__roc_auc": {
# "title": "Keep Absolute (mask)",
# "xlabel": "Max fraction of features kept",
# "ylabel": "ROC AUC",
# "sort_order": 6
# },
# "remove_positive_mask": {
# "title": "Remove Positive (mask)",
# "xlabel": "Max fraction of features removed",
# "ylabel": "Negative mean model output",
# "sort_order": 7
# },
# "remove_negative_mask": {
# "title": "Remove Negative (mask)",
# "xlabel": "Max fraction of features removed",
# "ylabel": "Mean model output",
# "sort_order": 8
# },
# "remove_absolute_mask__r2": {
# "title": "Remove Absolute (mask)",
# "xlabel": "Max fraction of features removed",
# "ylabel": "1 - R^2",
# "sort_order": 9
# },
# "remove_absolute_mask__roc_auc": {
# "title": "Remove Absolute (mask)",
# "xlabel": "Max fraction of features removed",
# "ylabel": "1 - ROC AUC",
# "sort_order": 9
# },
# "keep_positive_resample": {
# "title": "Keep Positive (resample)",
# "xlabel": "Max fraction of features kept",
# "ylabel": "Mean model output",
# "sort_order": 10
# },
# "keep_negative_resample": {
# "title": "Keep Negative (resample)",
# "xlabel": "Max fraction of features kept",
# "ylabel": "Negative mean model output",
# "sort_order": 11
# },
# "keep_absolute_resample__r2": {
# "title": "Keep Absolute (resample)",
# "xlabel": "Max fraction of features kept",
# "ylabel": "R^2",
# "sort_order": 12
# },
# "keep_absolute_resample__roc_auc": {
# "title": "Keep Absolute (resample)",
# "xlabel": "Max fraction of features kept",
# "ylabel": "ROC AUC",
# "sort_order": 12
# },
# "remove_positive_resample": {
# "title": "Remove Positive (resample)",
# "xlabel": "Max fraction of features removed",
# "ylabel": "Negative mean model output",
# "sort_order": 13
# },
# "remove_negative_resample": {
# "title": "Remove Negative (resample)",
# "xlabel": "Max fraction of features removed",
# "ylabel": "Mean model output",
# "sort_order": 14
# },
# "remove_absolute_resample__r2": {
# "title": "Remove Absolute (resample)",
# "xlabel": "Max fraction of features removed",
# "ylabel": "1 - R^2",
# "sort_order": 15
# },
# "remove_absolute_resample__roc_auc": {
# "title": "Remove Absolute (resample)",
# "xlabel": "Max fraction of features removed",
# "ylabel": "1 - ROC AUC",
# "sort_order": 15
# },
# "remove_positive_retrain": {
# "title": "Remove Positive (retrain)",
# "xlabel": "Max fraction of features removed",
# "ylabel": "Negative mean model output",
# "sort_order": 11
# },
# "remove_negative_retrain": {
# "title": "Remove Negative (retrain)",
# "xlabel": "Max fraction of features removed",
# "ylabel": "Mean model output",
# "sort_order": 12
# },
# "keep_positive_retrain": {
# "title": "Keep Positive (retrain)",
# "xlabel": "Max fraction of features kept",
# "ylabel": "Mean model output",
# "sort_order": 6
# },
# "keep_negative_retrain": {
# "title": "Keep Negative (retrain)",
# "xlabel": "Max fraction of features kept",
# "ylabel": "Negative mean model output",
# "sort_order": 7
# },
# "batch_remove_absolute__r2": {
# "title": "Batch Remove Absolute",
# "xlabel": "Fraction of features removed",
# "ylabel": "1 - R^2",
# "sort_order": 13
# },
# "batch_keep_absolute__r2": {
# "title": "Batch Keep Absolute",
# "xlabel": "Fraction of features kept",
# "ylabel": "R^2",
# "sort_order": 8
# },
# "batch_remove_absolute__roc_auc": {
# "title": "Batch Remove Absolute",
# "xlabel": "Fraction of features removed",
# "ylabel": "1 - ROC AUC",
# "sort_order": 13
# },
# "batch_keep_absolute__roc_auc": {
# "title": "Batch Keep Absolute",
# "xlabel": "Fraction of features kept",
# "ylabel": "ROC AUC",
# "sort_order": 8
# },
# "linear_shap_corr": {
# "title": "Linear SHAP (corr)"
# },
# "linear_shap_ind": {
# "title": "Linear SHAP (ind)"
# },
# "coef": {
# "title": "Coefficents"
# },
# "random": {
# "title": "Random"
# },
# "kernel_shap_1000_meanref": {
# "title": "Kernel SHAP 1000 mean ref."
# },
# "sampling_shap_1000": {
# "title": "Sampling SHAP 1000"
# },
# "tree_shap_tree_path_dependent": {
# "title": "Tree SHAP"
# },
# "saabas": {
# "title": "Saabas"
# },
# "tree_gain": {
# "title": "Gain/Gini Importance"
# },
# "mean_abs_tree_shap": {
# "title": "mean(|Tree SHAP|)"
# },
# "lasso_regression": {
# "title": "Lasso Regression"
# },
# "ridge_regression": {
# "title": "Ridge Regression"
# },
# "gbm_regression": {
# "title": "Gradient Boosting Regression"
# }
}
benchmark_color_map = {
"tree_shap": "#1E88E5",
"deep_shap": "#1E88E5",
"linear_shap_corr": "#1E88E5",
"linear_shap_ind": "#ff0d57",
"coef": "#13B755",
"random": "#999999",
"const_random": "#666666",
"kernel_shap_1000_meanref": "#7C52FF"
}
# negated_metrics = [
# "runtime",
# "remove_positive_retrain",
# "remove_positive_mask",
# "remove_positive_resample",
# "keep_negative_retrain",
# "keep_negative_mask",
# "keep_negative_resample"
# ]
# one_minus_metrics = [
# "remove_absolute_mask__r2",
# "remove_absolute_mask__roc_auc",
# "remove_absolute_resample__r2",
# "remove_absolute_resample__roc_auc"
# ]
def get_method_color(method):
for l in getattr(methods, method).__doc__.split("\n"):
l = l.strip()
if l.startswith("color = "):
v = l.split("=")[1].strip()
if v.startswith("red_blue_circle("):
return colors.red_blue_circle(float(v[16:-1]))
else:
return v
return "#000000"
def get_method_linestyle(method):
for l in getattr(methods, method).__doc__.split("\n"):
l = l.strip()
if l.startswith("linestyle = "):
return l.split("=")[1].strip()
return "solid"
def get_metric_attr(metric, attr):
for l in getattr(metrics, metric).__doc__.split("\n"):
l = l.strip()
# string
prefix = attr+" = \""
suffix = "\""
if l.startswith(prefix) and l.endswith(suffix):
return l[len(prefix):-len(suffix)]
# number
prefix = attr+" = "
if l.startswith(prefix):
return float(l[len(prefix):])
return ""
def plot_curve(dataset, model, metric, cmap=benchmark_color_map):
experiments = run_experiments(dataset=dataset, model=model, metric=metric)
pl.figure()
method_arr = []
for (name,(fcounts,scores)) in experiments:
_,_,method,_ = name
transform = get_metric_attr(metric, "transform")
if transform == "negate":
scores = -scores
elif transform == "one_minus":
scores = 1 - scores
auc = sklearn.metrics.auc(fcounts, scores) / fcounts[-1]
method_arr.append((auc, method, scores))
for (auc,method,scores) in sorted(method_arr):
method_title = getattr(methods, method).__doc__.split("\n")[0].strip()
l = "{:6.3f} - ".format(auc) + method_title
pl.plot(
fcounts / fcounts[-1], scores, label=l,
color=get_method_color(method), linewidth=2,
linestyle=get_method_linestyle(method)
)
metric_title = getattr(metrics, metric).__doc__.split("\n")[0].strip()
pl.xlabel(get_metric_attr(metric, "xlabel"))
pl.ylabel(get_metric_attr(metric, "ylabel"))
model_title = getattr(models, dataset+"__"+model).__doc__.split("\n")[0].strip()
pl.title(metric_title + " - " + model_title)
pl.gca().xaxis.set_ticks_position('bottom')
pl.gca().yaxis.set_ticks_position('left')
pl.gca().spines['right'].set_visible(False)
pl.gca().spines['top'].set_visible(False)
ahandles, alabels = pl.gca().get_legend_handles_labels()
pl.legend(reversed(ahandles), reversed(alabels))
return pl.gcf()
def plot_human(dataset, model, metric, cmap=benchmark_color_map):
experiments = run_experiments(dataset=dataset, model=model, metric=metric)
pl.figure()
method_arr = []
for (name,(fcounts,scores)) in experiments:
_,_,method,_ = name
diff_sum = np.sum(np.abs(scores[1] - scores[0]))
method_arr.append((diff_sum, method, scores[0], scores[1]))
inds = np.arange(3) # the x locations for the groups
inc_width = (1.0 / len(method_arr)) * 0.8
width = inc_width * 0.9
pl.bar(inds, method_arr[0][2], width, label="Human Consensus", color="black", edgecolor="white")
i = 1
line_style_to_hatch = {
"dashed": "///",
"dotted": "..."
}
for (diff_sum, method, _, methods_attrs) in sorted(method_arr):
method_title = getattr(methods, method).__doc__.split("\n")[0].strip()
l = "{:.2f} - ".format(diff_sum) + method_title
pl.bar(
inds + inc_width * i, methods_attrs.flatten(), width, label=l, edgecolor="white",
color=get_method_color(method), hatch=line_style_to_hatch.get(get_method_linestyle(method), None)
)
i += 1
metric_title = getattr(metrics, metric).__doc__.split("\n")[0].strip()
pl.xlabel("Features in the model")
pl.ylabel("Feature attribution value")
model_title = getattr(models, dataset+"__"+model).__doc__.split("\n")[0].strip()
pl.title(metric_title + " - " + model_title)
pl.gca().xaxis.set_ticks_position('bottom')
pl.gca().yaxis.set_ticks_position('left')
pl.gca().spines['right'].set_visible(False)
pl.gca().spines['top'].set_visible(False)
ahandles, alabels = pl.gca().get_legend_handles_labels()
#pl.legend(ahandles, alabels)
pl.xticks(np.array([0, 1, 2, 3]) - (inc_width + width)/2, ["", "", "", ""])
pl.gca().xaxis.set_minor_locator(matplotlib.ticker.FixedLocator([0.4, 1.4, 2.4]))
pl.gca().xaxis.set_minor_formatter(matplotlib.ticker.FixedFormatter(["Fever", "Cough", "Headache"]))
pl.gca().tick_params(which='minor', length=0)
pl.axhline(0, color="#aaaaaa", linewidth=0.5)
box = pl.gca().get_position()
pl.gca().set_position([
box.x0, box.y0 + box.height * 0.3,
box.width, box.height * 0.7
])
# Put a legend below current axis
pl.gca().legend(ahandles, alabels, loc='upper center', bbox_to_anchor=(0.5, -0.15), ncol=2)
return pl.gcf()
def _human_score_map(human_consensus, methods_attrs):
""" Converts human agreement differences to numerical scores for coloring.
"""
v = 1 - min(np.sum(np.abs(methods_attrs - human_consensus)) / (np.abs(human_consensus).sum() + 1), 1.0)
return v
def make_grid(scores, dataset, model, normalize=True, transform=True):
color_vals = {}
metric_sort_order = {}
for (_,_,method,metric),(fcounts,score) in filter(lambda x: x[0][0] == dataset and x[0][1] == model, scores):
metric_sort_order[metric] = get_metric_attr(metric, "sort_order")
if metric not in color_vals:
color_vals[metric] = {}
if transform:
transform_type = get_metric_attr(metric, "transform")
if transform_type == "negate":
score = -score
elif transform_type == "one_minus":
score = 1 - score
elif transform_type == "negate_log":
score = -np.log10(score)
if fcounts is None:
color_vals[metric][method] = score
elif fcounts == "human":
color_vals[metric][method] = _human_score_map(*score)
else:
auc = sklearn.metrics.auc(fcounts, score) / fcounts[-1]
color_vals[metric][method] = auc
# print(metric_sort_order)
# col_keys = sorted(list(color_vals.keys()), key=lambda v: metric_sort_order[v])
# print(col_keys)
col_keys = list(color_vals.keys())
row_keys = list(set([v for k in col_keys for v in color_vals[k].keys()]))
data = -28567 * np.ones((len(row_keys), len(col_keys)))
for i in range(len(row_keys)):
for j in range(len(col_keys)):
data[i,j] = color_vals[col_keys[j]][row_keys[i]]
assert np.sum(data == -28567) == 0, "There are missing data values!"
if normalize:
data = (data - data.min(0)) / (data.max(0) - data.min(0) + 1e-8)
# sort by performans
inds = np.argsort(-data.mean(1))
row_keys = [row_keys[i] for i in inds]
data = data[inds,:]
return row_keys, col_keys, data
from matplotlib.colors import LinearSegmentedColormap
red_blue_solid = LinearSegmentedColormap('red_blue_solid', {
'red': ((0.0, 198./255, 198./255),
(1.0, 5./255, 5./255)),
'green': ((0.0, 34./255, 34./255),
(1.0, 198./255, 198./255)),
'blue': ((0.0, 5./255, 5./255),
(1.0, 24./255, 24./255)),
'alpha': ((0.0, 1, 1),
(1.0, 1, 1))
})
from IPython.core.display import HTML
def plot_grids(dataset, model_names, out_dir=None):
if out_dir is not None:
os.mkdir(out_dir)
scores = []
for model in model_names:
scores.extend(run_experiments(dataset=dataset, model=model))
prefix = "<style type='text/css'> .shap_benchmark__select:focus { outline-width: 0 }</style>"
out = "" # background: rgb(30, 136, 229)
# out += "<div style='font-weight: regular; font-size: 24px; text-align: center; background: #f8f8f8; color: #000; padding: 20px;'>SHAP Benchmark</div>\n"
# out += "<div style='height: 1px; background: #ddd;'></div>\n"
#out += "<div style='height: 7px; background-image: linear-gradient(to right, rgb(30, 136, 229), rgb(255, 13, 87));'></div>"
out += "<div style='position: fixed; left: 0px; top: 0px; right: 0px; height: 230px; background: #fff;'>\n" # box-shadow: 0 4px 8px 0 rgba(0, 0, 0, 0.2), 0 6px 20px 0 rgba(0, 0, 0, 0.19);
out += "<div style='position: absolute; bottom: 0px; left: 0px; right: 0px;' align='center'><table style='border-width: 1px; margin-right: 100px'>\n"
for ind,model in enumerate(model_names):
row_keys, col_keys, data = make_grid(scores, dataset, model)
# print(data)
# print(colors.red_blue_solid(0.))
# print(colors.red_blue_solid(1.))
# return
for metric in col_keys:
save_plot = False
if metric.startswith("human_"):
plot_human(dataset, model, metric)
save_plot = True
elif metric not in ["local_accuracy", "runtime", "consistency_guarantees"]:
plot_curve(dataset, model, metric)
save_plot = True
if save_plot:
buf = io.BytesIO()
pl.gcf().set_size_inches(1200.0/175,1000.0/175)
pl.savefig(buf, format='png', dpi=175)
if out_dir is not None:
pl.savefig("%s/plot_%s_%s_%s.pdf" % (out_dir, dataset, model, metric), format='pdf')
pl.close()
buf.seek(0)
data_uri = base64.b64encode(buf.read()).decode('utf-8').replace('\n', '')
plot_id = "plot__"+dataset+"__"+model+"__"+metric
prefix += "<div onclick='document.getElementById(\"%s\").style.display = \"none\"' style='display: none; position: fixed; z-index: 10000; left: 0px; right: 0px; top: 0px; bottom: 0px; background: rgba(255,255,255,0.9);' id='%s'>" % (plot_id, plot_id)
prefix += "<img width='600' height='500' style='margin-left: auto; margin-right: auto; margin-top: 230px; box-shadow: 0 4px 8px 0 rgba(0, 0, 0, 0.2), 0 6px 20px 0 rgba(0, 0, 0, 0.19);' src='data:image/png;base64,%s'>" % data_uri
prefix += "</div>"
model_title = getattr(models, dataset+"__"+model).__doc__.split("\n")[0].strip()
if ind == 0:
out += "<tr><td style='background: #fff; width: 250px'></td></td>"
for j in range(data.shape[1]):
metric_title = getattr(metrics, col_keys[j]).__doc__.split("\n")[0].strip()
out += "<td style='width: 40px; min-width: 40px; background: #fff; text-align: right;'><div style='margin-left: 10px; margin-bottom: -5px; white-space: nowrap; transform: rotate(-45deg); transform-origin: left top 0; width: 1.5em; margin-top: 8em'>" + metric_title + "</div></td>"
out += "</tr>\n"
out += "</table></div></div>\n"
out += "<table style='border-width: 1px; margin-right: 100px; margin-top: 230px;'>\n"
out += "<tr><td style='background: #fff'></td><td colspan='%d' style='background: #fff; font-weight: bold; text-align: center; margin-top: 10px;'>%s</td></tr>\n" % (data.shape[1], model_title)
for i in range(data.shape[0]):
out += "<tr>"
# if i == 0:
# out += "<td rowspan='%d' style='background: #fff; text-align: center; white-space: nowrap; vertical-align: middle; '><div style='font-weight: bold; transform: rotate(-90deg); transform-origin: left top 0; width: 1.5em; margin-top: 8em'>%s</div></td>" % (data.shape[0], model_name)
method_title = getattr(methods, row_keys[i]).__doc__.split("\n")[0].strip()
out += "<td style='background: #ffffff; text-align: right; width: 250px' title='shap.LinearExplainer(model)'>" + method_title + "</td>\n"
for j in range(data.shape[1]):
plot_id = "plot__"+dataset+"__"+model+"__"+col_keys[j]
out += "<td onclick='document.getElementById(\"%s\").style.display = \"block\"' style='padding: 0px; padding-left: 0px; padding-right: 0px; border-left: 0px solid #999; width: 42px; min-width: 42px; height: 34px; background-color: #fff'>" % plot_id
#out += "<div style='opacity: "+str(2*(max(1-data[i,j], data[i,j])-0.5))+"; background-color: rgb" + str(tuple(v*255 for v in colors.red_blue_solid(0. if data[i,j] < 0.5 else 1.)[:-1])) + "; height: "+str((30*max(1-data[i,j], data[i,j])))+"px; margin-left: auto; margin-right: auto; width:"+str((30*max(1-data[i,j], data[i,j])))+"px'></div>"
out += "<div style='opacity: "+str(1)+"; background-color: rgb" + str(tuple(int(v*255) for v in colors.red_blue_no_bounds(5*(data[i,j]-0.8))[:-1])) + "; height: "+str((30*data[i,j]))+"px; margin-left: auto; margin-right: auto; width:"+str((30*data[i,j]))+"px'></div>"
#out += "<div style='float: left; background-color: #eee; height: 10px; width: "+str((40*(1-data[i,j])))+"px'></div>"
out += "</td>\n"
out += "</tr>\n" #
out += "<tr><td colspan='%d' style='background: #fff'></td></tr>" % (data.shape[1] + 1)
out += "</table>"
out += "<div style='position: fixed; left: 0px; top: 0px; right: 0px; text-align: left; padding: 20px; text-align: right'>\n"
out += "<div style='float: left; font-weight: regular; font-size: 24px; color: #000;'>SHAP Benchmark <span style='font-size: 14px; color: #777777;'>v"+__version__+"</span></div>\n"
# select {
# margin: 50px;
# width: 150px;
# padding: 5px 35px 5px 5px;
# font-size: 16px;
# border: 1px solid #ccc;
# height: 34px;
# -webkit-appearance: none;
# -moz-appearance: none;
# appearance: none;
# background: url(http://www.stackoverflow.com/favicon.ico) 96% / 15% no-repeat #eee;
# }
#out += "<div style='display: inline-block; margin-right: 20px; font-weight: normal; text-decoration: none; font-size: 18px; color: #000;'>Dataset:</div>\n"
out += "<select id='shap_benchmark__select' onchange=\"document.location = '../' + this.value + '/index.html'\"dir='rtl' class='shap_benchmark__select' style='font-weight: normal; font-size: 20px; color: #000; padding: 10px; background: #fff; border: 1px solid #fff; -webkit-appearance: none; appearance: none;'>\n"
out += "<option value='human' "+("selected" if dataset == "human" else "")+">Agreement with Human Intuition</option>\n"
out += "<option value='corrgroups60' "+("selected" if dataset == "corrgroups60" else "")+">Correlated Groups 60 Dataset</option>\n"
out += "<option value='independentlinear60' "+("selected" if dataset == "independentlinear60" else "")+">Independent Linear 60 Dataset</option>\n"
#out += "<option>CRIC</option>\n"
out += "</select>\n"
#out += "<script> document.onload = function() { document.getElementById('shap_benchmark__select').value = '"+dataset+"'; }</script>"
#out += "<div style='display: inline-block; margin-left: 20px; font-weight: normal; text-decoration: none; font-size: 18px; color: #000;'>CRIC</div>\n"
out += "</div>\n"
# output the legend
out += "<table style='border-width: 0px; width: 100px; position: fixed; right: 50px; top: 200px; background: rgba(255, 255, 255, 0.9)'>\n"
out += "<tr><td style='background: #fff; font-weight: normal; text-align: center'>Higher score</td></tr>\n"
legend_size = 21
for i in range(legend_size-9):
out += "<tr>"
out += "<td style='padding: 0px; padding-left: 0px; padding-right: 0px; border-left: 0px solid #999; height: 34px'>"
val = (legend_size-i-1) / (legend_size-1)
out += "<div style='opacity: 1; background-color: rgb" + str(tuple(int(v*255) for v in colors.red_blue_no_bounds(5*(val-0.8)))[:-1]) + "; height: "+str(30*val)+"px; margin-left: auto; margin-right: auto; width:"+str(30*val)+"px'></div>"
out += "</td>"
out += "</tr>\n" #
out += "<tr><td style='background: #fff; font-weight: normal; text-align: center'>Lower score</td></tr>\n"
out += "</table>\n"
if out_dir is not None:
with open(out_dir + "/index.html", "w") as f:
f.write("<html><body style='margin: 0px; font-size: 16px; font-family: \"Myriad Pro\", Arial, sans-serif;'><center>")
f.write(prefix)
f.write(out)
f.write("</center></body></html>")
else:
return HTML(prefix + out)
|
|
# Author: Charles Brummitt, Kenan Huremovic, Paolo Pin,
# Matthew Bonds, Fernando Vega-Redondo
"""Agent-based model of economic complexity and contagious disruptions.
This model is used to create Figure 7 in the paper "Contagious disruptions and
complexity traps in economic development" (2017) by the above authors.
"""
import numpy as np
import itertools
import scipy
from operator import itemgetter
from functools import lru_cache
from collections import Counter
class Strategy(object):
"""A strategy is a pair of non-negative integers representing the number
of attempted inputs (`n_inputs_attempted`, called `m` in the paper) and
the number of inputs needed in order to successfully produce
(`n_inputs_needed`, called `tau` in the paper).
"""
def __init__(self, n_inputs_attempted=None, n_inputs_needed=None):
self.n_inputs_attempted = n_inputs_attempted
self.n_inputs_needed = n_inputs_needed
def __repr__(self):
template = 'Strategy(n_inputs_attempted={m}, n_inputs_needed={tau})'
return template.format(m=self.n_inputs_attempted,
tau=self.n_inputs_needed)
def as_dict(self):
return {'n_inputs_attempted': self.n_inputs_attempted,
'n_inputs_needed': self.n_inputs_needed}
def update(self, n_inputs_attempted, n_inputs_needed):
self.n_inputs_attempted = n_inputs_attempted
self.n_inputs_needed = n_inputs_needed
def customer_is_functional_after_production_attempt(
self, customer_begins_functional, n_functional_inputs):
# If no inputs are needed, then there is little to model, and the agent
# does not become functional; see the discussion after equation 1 in
# the paper.
if self.n_inputs_needed == 0:
return customer_begins_functional
else:
return (n_functional_inputs >= self.n_inputs_needed)
class Agent(object):
"""An economic agent who produces goods and services for other agents using
inputs sourced from other agents.
"""
id_generator = itertools.count()
def __init__(self, is_functional, r=1.0, xi=0, economy=None):
self._id = next(self.id_generator)
self.strategy = Strategy()
self.is_functional = bool(is_functional)
self.suppliers = []
self.customers = []
self.amount_more_likely_choose_func_supplier = r
self.pref_attachment_power = xi
self.functional_suppliers_most_recent_attempt = []
self.n_retained_suppliers = None
self.time_between_successive_dysfunction = None
self.economy = economy
def __hash__(self):
return self._id
def __repr__(self):
functional_status = (
'functional' if self.is_functional else 'dysfunctional')
template = ('Agent(id={id}, {status}, {strategy}, '
'r={r}), n_customers={n_customers})')
return template.format(
id=self._id,
status=functional_status,
strategy=self.strategy,
n_customers=len(self.customers),
r=self.amount_more_likely_choose_func_supplier)
def out_degree(self):
return len(self.customers)
def in_degree(self):
return self.strategy.n_inputs_attempted
def update_strategy_and_suppliers(
self, choose_suppliers_uniformly_at_random=False):
"""Update the number of inputs needed and attempted, find new suppliers,
and return the the number of inputs needed and attempted as a
dictionary.
The Boolean optional argument `choose_suppliers_uniformly_at_random`
determines whether suppliers are chosen with equal probability or
via some other rule. `choose_suppliers_uniformly_at_random` is set to
True when we initialize the model with preferential attachment because
in the initial condition we do not have any links with which to use the
rule for choosing new links."""
self.update_complexity_and_buffer()
self.find_new_suppliers(choose_suppliers_uniformly_at_random)
return self.strategy
def update_complexity_and_buffer(self):
"""Update the number of inputs needed in order to successfully produce
(`tau` in the paper), the number of attempted inputs (`m` in the
paper), and the choice of input sources.
"""
F = self.economy.fraction_functional_agents()
alpha, beta = self.economy.alpha, self.economy.beta
self.strategy.update(
*compute_optimal_n_inputs_attempted_and_complexity(F, alpha, beta))
def find_new_suppliers(self, choose_suppliers_uniformly_at_random=False):
"""Find new input sources by preferentially choosing suppliers who
were functional in your most recent attempt to produce.
The method also records in `self.n_retained_suppliers` the number of
suppliers who are retained after choosing new suppliers.
The Boolean optional argument `choose_suppliers_uniformly_at_random`
determines whether suppliers are chosen with equal probability or
via another rule that creates some stickiness in links and preferential
attachment to high out-degree nodes.
"""
if choose_suppliers_uniformly_at_random:
new_inputs = np.random.choice(
self.economy.agents,
size=self.strategy.n_inputs_attempted, replace=True)
else:
probabilities_to_choose_each_agent = (
self.prob_choose_each_agent_as_supplier_sticky_pref_attach())
new_inputs = np.random.choice(
self.economy.agents,
size=self.strategy.n_inputs_attempted,
p=probabilities_to_choose_each_agent, replace=True)
self.n_retained_suppliers = sum(
new_supplier in self.suppliers for new_supplier in new_inputs)
for supplier in self.suppliers:
supplier.customers.remove(self)
self.suppliers = new_inputs
for supplier in self.suppliers:
supplier.customers.append(self)
return
def prob_choose_each_agent_as_supplier_sticky_pref_attach(self):
"""Compute the chance of choosing each agent as a supplier with
sticky links and preferential attachment (PA).
The suppliers who were functional in your most recent attempt to
produce are given the weight
`self.amount_more_likely_choose_func_supplier * ((1 + deg) ** xi)`
where `deg` is the supplier's out-degree and `xi` is the power in the
definition of preferential attachment Everyone else has weight
`(1 + deg) ** xi` where `deg` is their out-degree. Each new
supplier is chosen independently and with replacement according to
these weights.
"""
all_agents = self.economy.agents
r = self.amount_more_likely_choose_func_supplier
weights = np.empty(len(all_agents))
for i, agent in enumerate(all_agents):
weights[i] = (1 + agent.out_degree())**self.pref_attachment_power
if agent in self.functional_suppliers_most_recent_attempt:
weights[i] *= r
return weights / sum(weights)
def attempt_to_produce(self, time=None):
"""Attempt to produce by sourcing inputs from suppliers.
Remember the identities of the suppliers who were functional by storing
their `_id` attribute in the list
`functional_suppliers_most_recent_attempt`. If the agent is
dysfunctional, then record the given time in the attribute
`when_last_dysfunctional`.
Returns
-------
success : bool
Whether the agent succeeded in producing (i.e., got
self.strategy.n_inputs_needed or more functional inputs from
its suppliers).
"""
n_functional_suppliers = sum(
supplier.is_functional for supplier in self.suppliers)
self.functional_suppliers_most_recent_attempt = [
supplier
for supplier in self.suppliers if supplier.is_functional]
self.is_functional = (
self.strategy.customer_is_functional_after_production_attempt(
self.is_functional, n_functional_suppliers))
return self.is_functional
def is_vulnerable(self):
"""The agent would become dysfunctional if one more supplier were
to become dysfunctional."""
return (
self.is_functional and
(len(self.functional_suppliers_most_recent_attempt) ==
self.strategy.n_inputs_needed))
# Memoize 4096 calls to this function because it is called frequently (whenever
# agents attempt to produce.
@lru_cache(maxsize=2**12)
def compute_optimal_n_inputs_attempted_and_complexity(
fraction_functional_agents, alpha, beta):
"""Compute the optimal strategy (n_inputs_attempted and complexity) given
the fraction of functional agents."""
strategies = _strategies_that_could_be_best_response(
fraction_functional_agents, alpha, beta)
def strategy_to_expected_utility(m, tau):
return _expected_utility(m, tau, fraction_functional_agents,
alpha, beta)
return maximizer_with_tiebreaker(
strategies, objective_function=strategy_to_expected_utility,
tiebreaker=sum)
def _strategies_that_could_be_best_response(
fraction_functional_agents, alpha, beta):
"""Compute the set of strategies that could be a best response."""
if fraction_functional_agents == 0:
return [(0, 0)]
elif fraction_functional_agents == 1:
gamma = (alpha / beta) ** (1 / (beta - 1))
gamma_floor = int(np.floor(gamma))
gamma_ceil = int(np.ceil(gamma))
return [(gamma_floor, gamma_floor), (gamma_ceil, gamma_ceil)]
elif (0 < fraction_functional_agents < 1):
max_possible_n_inputs_attempted = int(
np.ceil(alpha ** (-1 / (1 - beta))))
return [(m, tau)
for m in range(max_possible_n_inputs_attempted + 1)
for tau in range(0, m + 1)
if (_equation_SI_10(m, tau, fraction_functional_agents,
alpha, beta) or
(0 < tau and tau < m and m < tau**beta / alpha))]
else:
msg = "fraction_functional_agents = {} cannot be outside [0, 1]"
raise ValueError(msg.format(fraction_functional_agents))
def _equation_SI_10(m, tau, F, alpha, beta):
"""Equation SI-10 in the paper."""
product_log_factor = scipy.special.lambertw(
((1 / alpha)**(1 / (1 - beta)) * np.log(F)) / (beta - 1))
return (
(m == tau) and
m < ((beta - 1) * product_log_factor / np.log(F)))
def _expected_utility(m, tau, F, alpha, beta):
"""Compute the expected utility of a given strategy in an economy with a
certain amount of reliability."""
assert m >= 0
assert tau >= 0
assert 0 <= F <= 1
return _prob_success(m, tau, F) * tau ** beta - alpha * m
def _prob_success(m, tau, F):
"""Chance of successfully producing when drawing balls from an urn."""
assert m >= 0
assert tau >= 0
assert 0 <= F <= 1
if m == tau == 0:
return 0
else:
binomial = scipy.stats.binom(n=m, p=F)
chance_get_tau_or_more_successes = binomial.sf(tau - 1)
return chance_get_tau_or_more_successes
def maximizer_with_tiebreaker(array, objective_function, tiebreaker):
array_scores = [(a, objective_function(*a)) for a in array]
max_score = max(array_scores, key=itemgetter(1))
maximizers = [a for a, obj in array_scores if obj == max_score[1]]
return min(maximizers, key=tiebreaker)
class Economy(object):
"""A collection of Agents and methods for updating a random Agent and for
collecting information about the state of the economy.
"""
def __init__(self, n_agents, initial_fraction_functional,
alpha, beta, r=1.0, L=1.0, exog_fail=0.0, xi=0):
assert 0 <= initial_fraction_functional <= 1
assert alpha > 0
assert beta > 0
assert n_agents > 1
self.n_agents = n_agents
self.initial_n_functional = int(initial_fraction_functional * n_agents)
self.initial_fraction_functional = initial_fraction_functional
self.amount_more_likely_choose_func_supplier = r
self.xi = xi
self.agents = [
Agent(is_functional, economy=self, r=r, xi=xi) for is_functional in
([True] * self.initial_n_functional +
[False] * (self.n_agents - self.initial_n_functional))]
self.agent_set = set(self.agents)
self.alpha = alpha
self.beta = beta
self.num_times_func_agent_more_likely_chosen = L
self.exog_fail = exog_fail
self.random_agents_queue = []
# Diagnostics
self.n_production_attempts = 0
self.latest_best_response = None
self.latest_producer = None
self.n_customers_of_latest_producer = None
self.n_vulnerable_customers_of_latest_producer = None
self.n_suppliers_of_latest_producer = None
self.change_in_n_functional_from_latest_attempt_to_produce = None
self.n_retained_suppliers_from_latest_attempt_to_produce = None
self.n_exogenous_failures = None
self.time_between_successive_dysfunction = []
self.n_functional = self.initial_n_functional
self.initialize_strategies_and_network()
def __repr__(self):
template = ('Economy(n_agents={n}, initial_fraction_functional={F0}, '
'alpha={alpha}, beta={beta}, r={r}), xi={xi}')
parameters = {'n': self.n_agents,
'r': self.amount_more_likely_choose_func_supplier,
'F0': self.initial_fraction_functional,
'alpha': self.alpha, 'beta': self.beta, 'xi': self.xi}
return template.format(**parameters)
def state(self):
"""Return a dictionary containing information about the state of the
economy."""
return {
'latest_best_response_n_inputs_attempted':
self.latest_best_response.n_inputs_attempted,
'latest_best_response_n_inputs_needed':
self.latest_best_response.n_inputs_needed,
'n_customers_of_latest_producer':
self.n_customers_of_latest_producer,
'n_vulnerable_customers_of_latest_producer':
self.n_vulnerable_customers_of_latest_producer,
'n_suppliers_of_latest_producer':
self.n_suppliers_of_latest_producer,
'change_in_n_functional_from_latest_attempt_to_produce':
self.change_in_n_functional_from_latest_attempt_to_produce,
'n_functional': self.n_functional,
'fraction_functional_agents': self.fraction_functional_agents()}
def fraction_functional_agents(self):
return self.n_functional / self.n_agents
def total_input_attempts(self):
"""Compute the total number of (supplier, customer) relationships
(i.e., total number of 'edges' or 'links') in the economy."""
return sum(len(agent.suppliers) for agent in self.agents)
def customer_supplier_functionality_count(self, per_input_attempt=False):
"""Compute the assortativity of the functionality of customers and
suppliers (i.e., the fraction of (customer, supplier) pairs that are
(functional, functional), (functional, dysfunctional), etc.)."""
pair_functionality_counter = Counter()
for customer in self.agents:
for supplier in customer.suppliers:
pair_functionality_counter[
(customer.is_functional, supplier.is_functional)] += 1
if per_input_attempt:
num_input_attempts = self.total_input_attempts()
for key in pair_functionality_counter:
pair_functionality_counter[key] /= num_input_attempts
return pair_functionality_counter
def initialize_strategies_and_network(self):
"""Initialize the strategies and network (i.e., the customer-supplier
relationships) by choosing suppliers uniformly at random. We cannot use
the preferential attachment rule until there is a network of links, so
here we initialize the links to be chosen uniformly at random from
all possible links (i.e., an Erdos-Renyi random graph)."""
for agent in self.agents:
self.latest_best_response = agent.update_strategy_and_suppliers(
choose_suppliers_uniformly_at_random=True)
return
def update_one_step(self):
"""Update the strategy of a random agent, let it attempt to produce,
and run exogenous failures that cause each agent to independently
fail with probability exog_fail.
Returns
-------
success : bool
Whether the agent successfully produced.
"""
success = (
self.update_random_agent_strategy_inputs_and_attempt_to_produce())
self.run_exogenous_failures()
return success
def run_exogenous_failures(self):
"""Each agent fails independently with probability `exog_fail`.
Returns
-------
n_exogenos_failures: int
The number of exogenous failures that occurred in this step.
"""
if self.exog_fail <= 0:
return 0
functional_agents = [ag for ag in self.agents if ag.is_functional]
assert len(functional_agents) == self.n_functional
whether_each_functional_agent_fails_exogenously = (
np.random.random_sample(size=self.n_functional) < self.exog_fail)
for indx, fails_exogenously in enumerate(
whether_each_functional_agent_fails_exogenously):
if fails_exogenously:
functional_agents[indx].is_functional = False
self.n_functional -= 1
self.n_exogenous_failures = sum(
whether_each_functional_agent_fails_exogenously)
return self.n_exogenous_failures
def update_random_agent_strategy_inputs_and_attempt_to_produce(self):
random_agent = self._get_random_agent()
return self.update_agent_strategy_inputs_and_attempt_to_produce(
random_agent)
def _get_random_agent(self):
"""To speed thigns up, we select random agents 10000 at a time, and
replenish this list when it is empty."""
if len(self.random_agents_queue) == 0:
self._replenish_random_agents_queue()
return self.random_agents_queue.pop()
def _replenish_random_agents_queue(self):
if self.num_times_func_agent_more_likely_chosen == 1.0:
size_of_queue = 10000
else:
size_of_queue = 1
prob_choose_each_agent = self._probabilities_choose_each_agent()
self.random_agents_queue = list(np.random.choice(
self.agents, p=prob_choose_each_agent, replace=True,
size=size_of_queue))
def _probabilities_choose_each_agent(self):
probabilities = np.empty(self.n_agents)
for i, agent in enumerate(self.agents):
if agent.is_functional:
probabilities[i] = self.num_times_func_agent_more_likely_chosen
else:
probabilities[i] = 1
return probabilities / np.sum(probabilities)
def update_agent_strategy_inputs_and_attempt_to_produce(self, agent):
"""Update an agent's best response and suppliers, and have it attempt
to produce.
Returns
-------
success : bool
Whether the agent successfully produced.
"""
assert agent in self.agents
self.record_diagnostics_right_before_production_attempt(agent)
self.latest_best_response = (agent.update_strategy_and_suppliers())
agent_was_functional = agent.is_functional
success = agent.attempt_to_produce(self.n_production_attempts)
self.change_in_n_functional_from_latest_attempt_to_produce = (
success - agent_was_functional)
self.n_functional += (
self.change_in_n_functional_from_latest_attempt_to_produce)
self.n_retained_suppliers_from_latest_attempt_to_produce = (
agent.n_retained_suppliers)
return success
def record_diagnostics_right_before_production_attempt(self, agent):
self.n_production_attempts += 1
self.latest_producer = agent
self.n_customers_of_latest_producer = agent.out_degree()
self.n_vulnerable_customers_of_latest_producer = sum(
customer.is_vulnerable() for customer in agent.customers)
self.n_suppliers_of_latest_producer = (
len(agent.suppliers))
return
|
|
import asyncio
import os.path
from unittest.mock import MagicMock
from asynctest import CoroutineMock
import alarme
from alarme import Application
from tests.common import BaseTest
class ApplicationTest(BaseTest):
def setUp(self):
super().setUp()
self.application = Application()
def test_load_config(self):
package_path, = alarme.__path__
config_path = os.path.join(package_path, '..', 'config_examples', 'full')
self.loop.run_until_complete(
self.application.load_config(config_path)
)
# TODO: Add assertions and tests with different input
def test_exception_handler__handle(self):
exception = MagicMock()
expected_result = MagicMock()
self.application._exception_handler = MagicMock(return_value=expected_result)
result = self.application.exception_handler(exception)
self.assertEqual(result, expected_result)
self.application._exception_handler.assert_called_once_with(exception)
def test_exception_handler__not_handle(self):
class Error(Exception):
pass
exception = Error()
self.application._exception_handler = None
self.assertRaises(Error, self.application.exception_handler, exception)
def test_add_sensor__fresh(self):
id_ = MagicMock()
sensor = MagicMock()
self.application.add_sensor(id_, sensor)
self.assertEqual(self.application.sensors, {id_: sensor})
def test_add_sensor__exists(self):
id1 = MagicMock()
id2 = MagicMock()
sensor1 = MagicMock()
sensor2 = MagicMock()
self.application.sensors = {id1: sensor1}
self.application.add_sensor(id2, sensor2)
self.assertEqual(self.application.sensors, {id1: sensor1, id2: sensor2})
def test_add_state__fresh(self):
id_ = MagicMock()
state = MagicMock()
self.application.add_state(id_, state)
self.assertEqual(self.application.states, {id_: state})
def test_add_state__exists(self):
id1 = MagicMock()
id2 = MagicMock()
state1 = MagicMock()
state2 = MagicMock()
self.application.states = {id1: state1}
self.application.add_state(id2, state2)
self.assertEqual(self.application.states, {id1: state1, id2: state2})
def test_add_action_descriptor__fresh(self):
id_ = MagicMock()
action_descriptor = MagicMock()
self.application.add_action_descriptor(id_, action_descriptor)
self.assertEqual(self.application.action_descriptors, {id_: action_descriptor})
def test_add_action_descriptor__exists(self):
id1 = MagicMock()
id2 = MagicMock()
action_descriptor1 = MagicMock()
action_descriptor2 = MagicMock()
self.application.action_descriptors = {id1: action_descriptor1}
self.application.add_action_descriptor(id2, action_descriptor2)
self.assertEqual(self.application.action_descriptors, {id1: action_descriptor1, id2: action_descriptor2})
def test_set_state__set_replace(self):
old_state = MagicMock()
old_state.deactivate = CoroutineMock()
new_state = MagicMock()
new_state.activate = CoroutineMock()
new_state.reactivatable = False
self.application.state = old_state
self.loop.run_until_complete(self.application.set_state(new_state))
self.assertEqual(self.application.state, new_state)
old_state.deactivate.assert_called_once_with()
new_state.activate.assert_called_once_with()
def test_set_state__set_fresh(self):
new_state = MagicMock()
new_state.activate = CoroutineMock()
new_state.reactivatable = False
self.application.state = None
self.loop.run_until_complete(self.application.set_state(new_state))
self.assertEqual(self.application.state, new_state)
new_state.activate.assert_called_once_with()
def test_set_state__set_reactivatable(self):
state = MagicMock()
state.deactivate = CoroutineMock()
state.activate = CoroutineMock()
state.reactivatable = True
self.application.state = state
self.loop.run_until_complete(self.application.set_state(state))
self.assertEqual(self.application.state, state)
state.deactivate.assert_called_once_with()
state.activate.assert_called_once_with()
def test_set_state__ignore(self):
state = MagicMock()
state.activate = CoroutineMock()
state.reactivatable = False
self.application.state = state
self.loop.run_until_complete(self.application.set_state(state))
self.assertEqual(self.application.state, state)
state.deactivate.assert_not_called()
state.activate.assert_not_called()
def test_run__state(self):
sensor = MagicMock()
sensor.run_forever = CoroutineMock()
async def stop():
while not self.application._app_run_future:
await asyncio.sleep(0.1)
self.application._app_run_future.set_result(None)
self.application.sensors = {sensor.id: sensor}
state = MagicMock()
state.deactivate = CoroutineMock()
self.application.state = state
asyncio.ensure_future(stop())
self.loop.run_until_complete(self.application.run())
self.assertIsNone(self.application._app_run_future)
for sensor in self.application.sensors.values():
sensor.stop.assert_called_once_with()
sensor.run_forever.assert_called_once_with()
self.application.state.deactivate.assert_called_once_with()
def test_run__no_state(self):
sensor = MagicMock()
sensor.run_forever = CoroutineMock()
async def stop():
while not self.application._app_run_future:
await asyncio.sleep(0.1)
self.application._app_run_future.set_result(None)
self.application.sensors = {sensor.id: sensor}
self.application.state = None
asyncio.ensure_future(stop())
self.loop.run_until_complete(self.application.run())
self.assertIsNone(self.application._app_run_future)
for sensor in self.application.sensors.values():
sensor.stop.assert_called_once_with()
sensor.run_forever.assert_called_once_with()
def test_stop__running(self):
self.application._app_run_future = asyncio.Future()
self.application.stop()
self.assertTrue(self.application._app_run_future.done())
def test_stop__not_running(self):
self.application._app_run_future = None
self.application.stop()
def test_notify__state(self):
sensor = MagicMock()
code = MagicMock()
expected_result = MagicMock()
state = MagicMock()
state.notify = CoroutineMock(return_value=expected_result)
self.application.state = state
result = self.loop.run_until_complete(self.application.notify(sensor, code))
self.assertEqual(result, expected_result)
state.notify.assert_called_once_with(sensor, code)
def test_notify__no_state(self):
sensor = MagicMock()
code = MagicMock()
self.application.state = None
result = self.loop.run_until_complete(self.application.notify(sensor, code))
self.assertIsNone(result)
|
|
import warnings
import numpy as np
from numpy.polynomial.hermite_e import HermiteE
from scipy.special import factorial
from scipy.stats import rv_continuous
import scipy.special as special
# TODO:
# * actually solve (31) of Blinnikov & Moessner
# * numerical stability: multiply factorials in logspace?
# * ppf & friends: Cornish & Fisher series, or tabulate/solve
_faa_di_bruno_cache = {
1: [[(1, 1)]],
2: [[(1, 2)], [(2, 1)]],
3: [[(1, 3)], [(2, 1), (1, 1)], [(3, 1)]],
4: [[(1, 4)], [(1, 2), (2, 1)], [(2, 2)], [(3, 1), (1, 1)], [(4, 1)]]}
def _faa_di_bruno_partitions(n):
"""
Return all non-negative integer solutions of the diophantine equation
n*k_n + ... + 2*k_2 + 1*k_1 = n (1)
Parameters
----------
n : int
the r.h.s. of Eq. (1)
Returns
-------
partitions : list
Each solution is itself a list of the form `[(m, k_m), ...]`
for non-zero `k_m`. Notice that the index `m` is 1-based.
Examples:
---------
>>> _faa_di_bruno_partitions(2)
[[(1, 2)], [(2, 1)]]
>>> for p in _faa_di_bruno_partitions(4):
... assert 4 == sum(m * k for (m, k) in p)
"""
if n < 1:
raise ValueError("Expected a positive integer; got %s instead" % n)
try:
return _faa_di_bruno_cache[n]
except KeyError:
# TODO: higher order terms
# solve Eq. (31) from Blinninkov & Moessner here
raise NotImplementedError('Higher order terms not yet implemented.')
def cumulant_from_moments(momt, n):
"""Compute n-th cumulant given moments.
Parameters
----------
momt : array_like
`momt[j]` contains `(j+1)`-th moment.
These can be raw moments around zero, or central moments
(in which case, `momt[0]` == 0).
n : int
which cumulant to calculate (must be >1)
Returns
-------
kappa : float
n-th cumulant.
"""
if n < 1:
raise ValueError("Expected a positive integer. Got %s instead." % n)
if len(momt) < n:
raise ValueError("%s-th cumulant requires %s moments, "
"only got %s." % (n, n, len(momt)))
kappa = 0.
for p in _faa_di_bruno_partitions(n):
r = sum(k for (m, k) in p)
term = (-1)**(r - 1) * factorial(r - 1)
for (m, k) in p:
term *= np.power(momt[m - 1] / factorial(m), k) / factorial(k)
kappa += term
kappa *= factorial(n)
return kappa
## copied from scipy.stats.distributions to avoid the overhead of
## the public methods
_norm_pdf_C = np.sqrt(2*np.pi)
def _norm_pdf(x):
return np.exp(-x**2/2.0) / _norm_pdf_C
def _norm_cdf(x):
return special.ndtr(x)
def _norm_sf(x):
return special.ndtr(-x)
class ExpandedNormal(rv_continuous):
"""Construct the Edgeworth expansion pdf given cumulants.
Parameters
----------
cum : array_like
`cum[j]` contains `(j+1)`-th cumulant: cum[0] is the mean,
cum[1] is the variance and so on.
Notes
-----
This is actually an asymptotic rather than convergent series, hence
higher orders of the expansion may or may not improve the result.
In a strongly non-Gaussian case, it is possible that the density
becomes negative, especially far out in the tails.
Examples
--------
Construct the 4th order expansion for the chi-square distribution using
the known values of the cumulants:
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> from scipy.special import factorial
>>> df = 12
>>> chi2_c = [2**(j-1) * factorial(j-1) * df for j in range(1, 5)]
>>> edgw_chi2 = ExpandedNormal(chi2_c, name='edgw_chi2', momtype=0)
Calculate several moments:
>>> m, v = edgw_chi2.stats(moments='mv')
>>> np.allclose([m, v], [df, 2 * df])
True
Plot the density function:
>>> mu, sigma = df, np.sqrt(2*df)
>>> x = np.linspace(mu - 3*sigma, mu + 3*sigma)
>>> fig1 = plt.plot(x, stats.chi2.pdf(x, df=df), 'g-', lw=4, alpha=0.5)
>>> fig2 = plt.plot(x, stats.norm.pdf(x, mu, sigma), 'b--', lw=4, alpha=0.5)
>>> fig3 = plt.plot(x, edgw_chi2.pdf(x), 'r-', lw=2)
>>> plt.show()
References
----------
.. [*] E.A. Cornish and R.A. Fisher, Moments and cumulants in the
specification of distributions, Revue de l'Institut Internat.
de Statistique. 5: 307 (1938), reprinted in
R.A. Fisher, Contributions to Mathematical Statistics. Wiley, 1950.
.. [*] https://en.wikipedia.org/wiki/Edgeworth_series
.. [*] S. Blinnikov and R. Moessner, Expansions for nearly Gaussian
distributions, Astron. Astrophys. Suppl. Ser. 130, 193 (1998)
"""
def __init__(self, cum, name='Edgeworth expanded normal', **kwds):
if len(cum) < 2:
raise ValueError("At least two cumulants are needed.")
self._coef, self._mu, self._sigma = self._compute_coefs_pdf(cum)
self._herm_pdf = HermiteE(self._coef)
if self._coef.size > 2:
self._herm_cdf = HermiteE(-self._coef[1:])
else:
self._herm_cdf = lambda x: 0.
# warn if pdf(x) < 0 for some values of x within 4 sigma
r = np.real_if_close(self._herm_pdf.roots())
r = (r - self._mu) / self._sigma
if r[(np.imag(r) == 0) & (np.abs(r) < 4)].any():
mesg = 'PDF has zeros at %s ' % r
warnings.warn(mesg, RuntimeWarning)
kwds.update({'name': name,
'momtype': 0}) # use pdf, not ppf in self.moment()
super(ExpandedNormal, self).__init__(**kwds)
def _pdf(self, x):
y = (x - self._mu) / self._sigma
return self._herm_pdf(y) * _norm_pdf(y) / self._sigma
def _cdf(self, x):
y = (x - self._mu) / self._sigma
return (_norm_cdf(y) +
self._herm_cdf(y) * _norm_pdf(y))
def _sf(self, x):
y = (x - self._mu) / self._sigma
return (_norm_sf(y) -
self._herm_cdf(y) * _norm_pdf(y))
def _compute_coefs_pdf(self, cum):
# scale cumulants by \sigma
mu, sigma = cum[0], np.sqrt(cum[1])
lam = np.asarray(cum)
for j, l in enumerate(lam):
lam[j] /= cum[1]**j
coef = np.zeros(lam.size * 3 - 5)
coef[0] = 1.
for s in range(lam.size - 2):
for p in _faa_di_bruno_partitions(s+1):
term = sigma**(s+1)
for (m, k) in p:
term *= np.power(lam[m+1] / factorial(m+2), k) / factorial(k)
r = sum(k for (m, k) in p)
coef[s + 1 + 2*r] += term
return coef, mu, sigma
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the MapDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import snapshot
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
class SnapshotDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase,
parameterized.TestCase):
def _build_snapshot_dataset(self,
num_threads=1,
repeat=False,
pending_snapshot_expiry_seconds=-1,
shard_size_bytes=None):
def ds_fn():
self.snapshot_dir = os.path.join(self.get_temp_dir(), "snapshot")
if not os.path.exists(self.snapshot_dir):
os.mkdir(self.snapshot_dir)
dataset = dataset_ops.Dataset.range(1000)
dataset = dataset.apply(
snapshot.snapshot(
self.snapshot_dir,
num_writer_threads=num_threads,
writer_buffer_size=2 * num_threads,
num_reader_threads=num_threads,
reader_buffer_size=2 * num_threads,
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds,
shard_size_bytes=shard_size_bytes))
if repeat:
dataset = dataset.repeat(2)
return dataset
return ds_fn
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(pending_snapshot_expiry_seconds=[None, 1])))
def testSnapshotBeforeEpochEnd(self, pending_snapshot_expiry_seconds):
ds_fn = self._build_snapshot_dataset(
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds)
outputs = self.gen_outputs(ds_fn, [], 100, verify_exhausted=False)
self.assertSequenceEqual(outputs, range(100))
outputs.extend(
self.gen_outputs(
ds_fn, [], 900, ckpt_saved=True, verify_exhausted=False))
self.assertSequenceEqual(outputs, range(1000))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(pending_snapshot_expiry_seconds=[None, 1])))
def testCheckpointBeforeOneEpochThenRunFewStepsSmallShardMultiThread(
self, pending_snapshot_expiry_seconds):
ds_fn = self._build_snapshot_dataset(
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds,
shard_size_bytes=100)
outputs = []
with ops.Graph().as_default() as g:
init_op, get_next_op, saver = self._build_graph(ds_fn)
with self.session(graph=g) as sess:
self._initialize(init_op, sess)
start = 0
end = 100
num_iters = end - start
for _ in range(num_iters):
outputs.append(sess.run(get_next_op))
self._save(sess, saver)
start = 100
end = 400
num_iters = end - start
for _ in range(num_iters):
outputs.append(sess.run(get_next_op))
self.assertSequenceEqual(outputs, range(400))
outputs = outputs[:100]
outputs.extend(
self.gen_outputs(
ds_fn, [], 900, ckpt_saved=True, verify_exhausted=False))
self.assertSequenceEqual(outputs, range(1000))
fp_dir_list = os.listdir(self.snapshot_dir)
self.assertLen(list(fp_dir_list), 2)
for d in fp_dir_list:
if not d.endswith("-graph.pbtxt"):
fp_dir = os.path.join(self.snapshot_dir, d)
run_dir_list = os.listdir(fp_dir)
self.assertLen(list(run_dir_list), 2)
for e in run_dir_list:
if e != "snapshot.metadata":
run_dir = os.path.join(fp_dir, e)
self.assertLen(list(os.listdir(run_dir)), 258)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(pending_snapshot_expiry_seconds=[None, 1])))
def testCheckpointBeforeOneEpochThenRunFewSteps(
self, pending_snapshot_expiry_seconds):
ds_fn = self._build_snapshot_dataset(
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds)
# Generate 200 entries from iterator but save checkpoint after producing
# 100.
outputs = self.gen_outputs(
ds_fn, [100], 200, verify_exhausted=False, save_checkpoint_at_end=False)
self.assertSequenceEqual(outputs, range(200))
outputs = outputs[:100]
outputs.extend(
self.gen_outputs(
ds_fn, [], 900, ckpt_saved=True, verify_exhausted=False))
self.assertSequenceEqual(outputs, range(1000))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(pending_snapshot_expiry_seconds=[None, 1])))
def testCheckpointBeforeOneEpochThenRunFewStepsMultipleThreads(
self, pending_snapshot_expiry_seconds):
ds_fn = self._build_snapshot_dataset(
num_threads=2,
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds)
# Generate 200 entries from iterator but save checkpoint after producing
# 100.
outputs = self.gen_outputs(
ds_fn, [100], 200, verify_exhausted=False, save_checkpoint_at_end=False)
self.assertSequenceEqual(outputs, range(200))
outputs = outputs[:100]
outputs.extend(
self.gen_outputs(
ds_fn, [], 900, ckpt_saved=True, verify_exhausted=False))
self.assertSequenceEqual(outputs, range(1000))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(pending_snapshot_expiry_seconds=[None, 1])))
def testCheckpointAfterOneEpoch(self, pending_snapshot_expiry_seconds):
ds_fn = self._build_snapshot_dataset(
repeat=True,
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds)
# Generate 1100 entries from iterator and save checkpoint.
outputs = self.gen_outputs(ds_fn, [], 1100, verify_exhausted=False)
self.assertSequenceEqual(outputs, list(range(1000)) + list(range(100)))
# Restore from checkpoint and produce the rest of the elements from the
# iterator.
t = self.gen_outputs(
ds_fn, [], 900, ckpt_saved=True, verify_exhausted=False)
outputs.extend(t)
self.assertSequenceEqual(
outputs,
list(range(1000)) + list(range(100)) + list(range(900)))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(pending_snapshot_expiry_seconds=[None, 1])))
def testCheckpointAfterOneEpochThenRunFewSteps(
self, pending_snapshot_expiry_seconds):
ds_fn = self._build_snapshot_dataset(
repeat=True,
pending_snapshot_expiry_seconds=pending_snapshot_expiry_seconds)
# Generate 200 entries from iterator but save checkpoint after producing
# 100.
outputs = self.gen_outputs(
ds_fn, [1100],
1200,
verify_exhausted=False,
save_checkpoint_at_end=False)
self.assertSequenceEqual(
outputs,
list(range(1000)) + list(range(100)) + list(range(100)))
outputs = outputs[:1100]
t = self.gen_outputs(
ds_fn, [], 900, ckpt_saved=True, verify_exhausted=False)
outputs.extend(t)
self.assertSequenceEqual(
outputs, (list(range(1000)) + list(range(100)) + list(range(900))))
if __name__ == "__main__":
test.main()
|
|
import logging
import random
import bson
import modularodm.exceptions
from django.contrib.contenttypes.fields import (GenericForeignKey,
GenericRelation)
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import MultipleObjectsReturned
from django.core.exceptions import ValidationError as DjangoValidationError
from django.db import models
from django.db.models import ForeignKey
from django.db.models.signals import post_save
from django.dispatch import receiver
from include import IncludeQuerySet
from osf.utils.caching import cached_property
from osf.exceptions import ValidationError
from osf.modm_compat import to_django_query
from osf.utils.fields import LowercaseCharField, NonNaiveDateTimeField
ALPHABET = '23456789abcdefghjkmnpqrstuvwxyz'
logger = logging.getLogger(__name__)
def generate_guid(length=5):
while True:
guid_id = ''.join(random.sample(ALPHABET, length))
try:
# is the guid in the blacklist
BlackListGuid.objects.get(guid=guid_id)
except BlackListGuid.DoesNotExist:
# it's not, check and see if it's already in the database
try:
Guid.objects.get(_id=guid_id)
except Guid.DoesNotExist:
# valid and unique guid
return guid_id
def generate_object_id():
return str(bson.ObjectId())
class BaseModel(models.Model):
"""Base model that acts makes subclasses mostly compatible with the
modular-odm ``StoredObject`` interface.
"""
migration_page_size = 50000
objects = models.QuerySet.as_manager()
class Meta:
abstract = True
def __unicode__(self):
return '{}'.format(self.id)
def to_storage(self):
local_django_fields = set([x.name for x in self._meta.concrete_fields])
return {name: self.serializable_value(name) for name in local_django_fields}
@classmethod
def get_fk_field_names(cls):
return [field.name for field in cls._meta.get_fields() if
field.is_relation and not field.auto_created and (field.many_to_one or field.one_to_one) and not isinstance(field, GenericForeignKey)]
@classmethod
def get_m2m_field_names(cls):
return [field.attname or field.name for field in
cls._meta.get_fields() if
field.is_relation and field.many_to_many and not hasattr(field, 'field')]
@classmethod
def load(cls, data, select_for_update=False):
try:
if isinstance(data, basestring):
# Some models (CitationStyle) have an _id that is not a bson
# Looking up things by pk will never work with a basestring
return cls.objects.get(_id=data) if not select_for_update else cls.objects.filter(_id=data).select_for_update().get()
return cls.objects.get(pk=data) if not select_for_update else cls.objects.filter(pk=data).select_for_update().get()
except cls.DoesNotExist:
return None
@classmethod
def find_one(cls, query, select_for_update=False):
try:
if select_for_update:
return cls.objects.filter(to_django_query(query, model_cls=cls)).select_for_update().get()
return cls.objects.get(to_django_query(query, model_cls=cls))
except cls.DoesNotExist:
raise modularodm.exceptions.NoResultsFound()
except cls.MultipleObjectsReturned as e:
raise modularodm.exceptions.MultipleResultsFound(*e.args)
@classmethod
def find(cls, query=None):
if not query:
return cls.objects.all()
else:
return cls.objects.filter(to_django_query(query, model_cls=cls))
@classmethod
def remove(cls, query=None):
return cls.find(query).delete()
@classmethod
def remove_one(cls, obj):
if obj.pk:
return obj.delete()
@property
def _primary_name(self):
return '_id'
@property
def _is_loaded(self):
return bool(self.pk)
def reload(self):
return self.refresh_from_db()
def refresh_from_db(self):
super(BaseModel, self).refresh_from_db()
# Django's refresh_from_db does not uncache GFKs
for field in self._meta.virtual_fields:
if hasattr(field, 'cache_attr') and field.cache_attr in self.__dict__:
del self.__dict__[field.cache_attr]
def clone(self):
"""Create a new, unsaved copy of this object."""
copy = self.__class__.objects.get(pk=self.pk)
copy.id = None
# empty all the fks
fk_field_names = [f.name for f in self._meta.model._meta.get_fields() if isinstance(f, (ForeignKey, GenericForeignKey))]
for field_name in fk_field_names:
setattr(copy, field_name, None)
try:
copy._id = bson.ObjectId()
except AttributeError:
pass
return copy
def save(self, *args, **kwargs):
# Make Django validate on save (like modm)
if not kwargs.get('force_insert') and not kwargs.get('force_update'):
try:
self.full_clean()
except DjangoValidationError as err:
raise ValidationError(*err.args)
return super(BaseModel, self).save(*args, **kwargs)
# TODO: Rename to Identifier?
class Guid(BaseModel):
"""Stores either a short guid or long object_id for any model that inherits from BaseIDMixin.
Each ID field (e.g. 'guid', 'object_id') MUST have an accompanying method, named with
'initialize_<ID type>' (e.g. 'initialize_guid') that generates and sets the field.
"""
primary_identifier_name = '_id'
id = models.AutoField(primary_key=True)
_id = LowercaseCharField(max_length=255, null=False, blank=False, default=generate_guid, db_index=True,
unique=True)
referent = GenericForeignKey()
content_type = models.ForeignKey(ContentType, null=True, blank=True)
object_id = models.PositiveIntegerField(null=True, blank=True)
created = NonNaiveDateTimeField(db_index=True, auto_now_add=True)
def __repr__(self):
return '<id:{0}, referent:({1})>'.format(self._id, self.referent.__repr__())
# Override load in order to load by GUID
@classmethod
def load(cls, data, select_for_update=False):
try:
return cls.objects.get(_id=data) if not select_for_update else cls.objects.filter(_id=data).select_for_update().get()
except cls.DoesNotExist:
return None
class Meta:
ordering = ['-created']
get_latest_by = 'created'
index_together = (
('content_type', 'object_id', 'created'),
)
class BlackListGuid(BaseModel):
id = models.AutoField(primary_key=True)
guid = LowercaseCharField(max_length=255, unique=True, db_index=True)
@property
def _id(self):
return self.guid
def generate_guid_instance():
return Guid.objects.create().id
class PKIDStr(str):
def __new__(self, _id, pk):
return str.__new__(self, _id)
def __init__(self, _id, pk):
self.__pk = pk
def __int__(self):
return self.__pk
class BaseIDMixin(models.Model):
class Meta:
abstract = True
class ObjectIDMixin(BaseIDMixin):
primary_identifier_name = '_id'
_id = models.CharField(max_length=24, default=generate_object_id, unique=True, db_index=True)
def __unicode__(self):
return '_id: {}'.format(self._id)
@classmethod
def load(cls, q, select_for_update=False):
try:
return cls.objects.get(_id=q) if not select_for_update else cls.objects.filter(_id=q).select_for_update().get()
except cls.DoesNotExist:
# modm doesn't throw exceptions when loading things that don't exist
return None
class Meta:
abstract = True
class InvalidGuid(Exception):
pass
class OptionalGuidMixin(BaseIDMixin):
"""
This makes it so that things can **optionally** have guids. Think files.
Things that inherit from this must also inherit from ObjectIDMixin ... probably
"""
__guid_min_length__ = 5
guids = GenericRelation(Guid, related_name='referent', related_query_name='referents')
content_type_pk = models.PositiveIntegerField(null=True, blank=True)
def __unicode__(self):
return '{}'.format(self.get_guid() or self.id)
def get_guid(self, create=False):
if not self.pk:
logger.warn('Implicitly saving object before creating guid')
self.save()
if create:
try:
guid, created = Guid.objects.get_or_create(
object_id=self.pk,
content_type_id=ContentType.objects.get_for_model(self).pk
)
except MultipleObjectsReturned:
# lol, hacks
pass
else:
return guid
return self.guids.first()
class Meta:
abstract = True
class GuidMixinQuerySet(IncludeQuerySet):
def _filter_or_exclude(self, negate, *args, **kwargs):
return super(GuidMixinQuerySet, self)._filter_or_exclude(negate, *args, **kwargs).include('guids')
def all(self):
return super(GuidMixinQuerySet, self).all().include('guids')
def count(self):
return super(GuidMixinQuerySet, self.include(None)).count()
class GuidMixin(BaseIDMixin):
__guid_min_length__ = 5
guids = GenericRelation(Guid, related_name='referent', related_query_name='referents')
content_type_pk = models.PositiveIntegerField(null=True, blank=True)
objects = GuidMixinQuerySet.as_manager()
# TODO: use pre-delete signal to disable delete cascade
def __unicode__(self):
return '{}'.format(self._id)
@cached_property
def _id(self):
try:
guid = self.guids.all()[0]
except IndexError:
return None
if guid:
return guid._id
return None
@_id.setter
def _id(self, value):
# TODO do we really want to allow this?
guid, created = Guid.objects.get_or_create(_id=value)
if created:
guid.object_id = self.pk
guid.content_type = ContentType.objects.get_for_model(self)
guid.save()
elif guid.content_type == ContentType.objects.get_for_model(self) and guid.object_id == self.pk:
# TODO should this up the created for the guid until now so that it appears as the first guid
# for this object?
return
else:
raise InvalidGuid('Cannot indirectly repoint an existing guid, please use the Guid model')
_primary_key = _id
@classmethod
def load(cls, q, select_for_update=False):
# Minor optimization--no need to query if q is None or ''
if not q:
return None
try:
# guids___id__isnull=False forces an INNER JOIN
if select_for_update:
return cls.objects.filter(guids___id__isnull=False, guids___id=q).select_for_update().first()
return cls.objects.filter(guids___id__isnull=False, guids___id=q).first()
except cls.DoesNotExist:
return None
@property
def deep_url(self):
return None
class Meta:
abstract = True
@receiver(post_save)
def ensure_guid(sender, instance, created, **kwargs):
if not issubclass(sender, GuidMixin):
return False
existing_guids = Guid.objects.filter(object_id=instance.pk, content_type=ContentType.objects.get_for_model(instance))
has_cached_guids = hasattr(instance, '_prefetched_objects_cache') and 'guids' in instance._prefetched_objects_cache
if not existing_guids.exists():
# Clear query cache of instance.guids
if has_cached_guids:
del instance._prefetched_objects_cache['guids']
Guid.objects.create(object_id=instance.pk, content_type=ContentType.objects.get_for_model(instance),
_id=generate_guid(instance.__guid_min_length__))
|
|
# Copyright 2017 AT&T Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import binascii
import hashlib
import hmac
import random
import time
import six
from six.moves.urllib import parse as urlparse
from oslo_serialization import jsonutils as json
from tempest.lib.common import rest_client
class OAUTHTokenClient(rest_client.RestClient):
api_version = "v3"
def _escape(self, s):
"""Escape a unicode string in an OAuth-compatible fashion."""
safe = b'~'
s = s.encode('utf-8') if isinstance(s, six.text_type) else s
s = urlparse.quote(s, safe)
if isinstance(s, six.binary_type):
s = s.decode('utf-8')
return s
def _generate_params_with_signature(self, client_key, uri,
client_secret=None,
resource_owner_key=None,
resource_owner_secret=None,
callback_uri=None,
verifier=None,
http_method='GET'):
"""Generate OAUTH params along with signature."""
timestamp = six.text_type(int(time.time()))
nonce = six.text_type(random.getrandbits(64)) + timestamp
oauth_params = [
('oauth_nonce', nonce),
('oauth_timestamp', timestamp),
('oauth_version', '1.0'),
('oauth_signature_method', 'HMAC-SHA1'),
('oauth_consumer_key', client_key),
]
if resource_owner_key:
oauth_params.append(('oauth_token', resource_owner_key))
if callback_uri:
oauth_params.append(('oauth_callback', callback_uri))
if verifier:
oauth_params.append(('oauth_verifier', verifier))
# normalize_params
key_values = [(self._escape(k), self._escape(v))
for k, v in oauth_params]
key_values.sort()
parameter_parts = ['{0}={1}'.format(k, v) for k, v in key_values]
normalized_params = '&'.join(parameter_parts)
# normalize_uri
scheme, netloc, path, params, query, fragment = urlparse.urlparse(uri)
scheme = scheme.lower()
netloc = netloc.lower()
normalized_uri = urlparse.urlunparse((scheme, netloc, path,
params, '', ''))
# construct base string
base_string = self._escape(http_method.upper())
base_string += '&'
base_string += self._escape(normalized_uri)
base_string += '&'
base_string += self._escape(normalized_params)
# sign using hmac-sha1
key = self._escape(client_secret or '')
key += '&'
key += self._escape(resource_owner_secret or '')
key_utf8 = key.encode('utf-8')
text_utf8 = base_string.encode('utf-8')
signature = hmac.new(key_utf8, text_utf8, hashlib.sha1)
sig = binascii.b2a_base64(signature.digest())[:-1].decode('utf-8')
oauth_params.append(('oauth_signature', sig))
return oauth_params
def _generate_oauth_header(self, oauth_params):
authorization_header = {}
authorization_header_parameters_parts = []
for oauth_parameter_name, value in oauth_params:
escaped_name = self._escape(oauth_parameter_name)
escaped_value = self._escape(value)
part = '{0}="{1}"'.format(escaped_name, escaped_value)
authorization_header_parameters_parts.append(part)
authorization_header_parameters = ', '.join(
authorization_header_parameters_parts)
oauth_string = 'OAuth %s' % authorization_header_parameters
authorization_header['Authorization'] = oauth_string
return authorization_header
def create_request_token(self, consumer_key, consumer_secret, project_id):
"""Create request token.
For more information, please refer to the official API reference:
http://developer.openstack.org/api-ref/identity/v3-ext/#create-request-token
"""
endpoint = 'OS-OAUTH1/request_token'
headers = {'Requested-Project-Id': project_id}
oauth_params = self._generate_params_with_signature(
consumer_key,
self.base_url + '/' + endpoint,
client_secret=consumer_secret,
callback_uri='oob',
http_method='POST')
oauth_header = self._generate_oauth_header(oauth_params)
headers.update(oauth_header)
resp, body = self.post(endpoint,
body=None,
headers=headers)
self.expected_success(201, resp.status)
if not isinstance(body, str):
body = body.decode('utf-8')
body = dict(item.split("=") for item in body.split("&"))
return rest_client.ResponseBody(resp, body)
def authorize_request_token(self, request_token_id, role_ids):
"""Authorize request token.
For more information, please refer to the official API reference:
http://developer.openstack.org/api-ref/identity/v3-ext/#authorize-request-token
"""
roles = [{'id': role_id} for role_id in role_ids]
body = {'roles': roles}
post_body = json.dumps(body)
resp, body = self.put("OS-OAUTH1/authorize/%s" % request_token_id,
post_body)
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def create_access_token(self, consumer_key, consumer_secret, request_key,
request_secret, oauth_verifier):
"""Create access token.
For more information, please refer to the official API reference:
http://developer.openstack.org/api-ref/identity/v3-ext/#create-access-token
"""
endpoint = 'OS-OAUTH1/access_token'
oauth_params = self._generate_params_with_signature(
consumer_key,
self.base_url + '/' + endpoint,
client_secret=consumer_secret,
resource_owner_key=request_key,
resource_owner_secret=request_secret,
verifier=oauth_verifier,
http_method='POST')
headers = self._generate_oauth_header(oauth_params)
resp, body = self.post(endpoint, body=None, headers=headers)
self.expected_success(201, resp.status)
if not isinstance(body, str):
body = body.decode('utf-8')
body = dict(item.split("=") for item in body.split("&"))
return rest_client.ResponseBody(resp, body)
def get_access_token(self, user_id, access_token_id):
"""Get access token.
For more information, please refer to the official API reference:
http://developer.openstack.org/api-ref/identity/v3-ext/#get-access-token
"""
resp, body = self.get("users/%s/OS-OAUTH1/access_tokens/%s"
% (user_id, access_token_id))
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def revoke_access_token(self, user_id, access_token_id):
"""Revoke access token.
For more information, please refer to the official API reference:
http://developer.openstack.org/api-ref/identity/v3-ext/#revoke-access-token
"""
resp, body = self.delete("users/%s/OS-OAUTH1/access_tokens/%s"
% (user_id, access_token_id))
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp, body)
def list_access_tokens(self, user_id):
"""List access tokens.
For more information, please refer to the official API reference:
http://developer.openstack.org/api-ref/identity/v3-ext/#list-access-tokens
"""
resp, body = self.get("users/%s/OS-OAUTH1/access_tokens"
% (user_id))
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def list_access_token_roles(self, user_id, access_token_id):
"""List roles for an access token.
For more information, please refer to the official API reference:
http://developer.openstack.org/api-ref/identity/v3-ext/#list-roles-for-an-access-token
"""
resp, body = self.get("users/%s/OS-OAUTH1/access_tokens/%s/roles"
% (user_id, access_token_id))
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def get_access_token_role(self, user_id, access_token_id, role_id):
"""Show role details for an access token.
For more information, please refer to the official API reference:
http://developer.openstack.org/api-ref/identity/v3-ext/#show-role-details-for-an-access-token
"""
resp, body = self.get("users/%s/OS-OAUTH1/access_tokens/%s/roles/%s"
% (user_id, access_token_id, role_id))
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
|
|
import cv2
import time
import numpy as np
import math
import os, sys
import pdb
#width = 1920
#height = 1080
width = 512
height = 428
blackImg = np.zeros((height,width,3), np.uint8)
class FingerDetector:
def __init__(self, bottomLine, blurPixelSize, threshVal, bothHands=True, kinect=None):
self.vidSrc = cv2.VideoCapture(0)
self.background = cv2.createBackgroundSubtractorMOG2()
self.buildBackgroundModel(kinect)
self.blurPixelSize = blurPixelSize
self.bothHands = bothHands
self.bottomLine = bottomLine
self.threshVal = threshVal
def adjustParams(self, k):
if k == ord('q') and self.blurPixelSize < 30:
self.blurPixelSize += 2
print "blur size += 2 ", self.blurPixelSize
elif k == ord('w') and self.blurPixelSize > 2:
self.blurPixelSize -= 2
print "blur size -= 2 ", self.blurPixelSize
elif k == ord('a') and self.threshVal < 251:
self.threshVal += 5
print "threshold += 5 ", self.threshVal
elif k == ord('s') and self.threshVal > 6:
self.threshVal -= 5
print "threshold -= 5 ", self.threshVal
def continuousFingers(self):
while True:
#frame = self.getFrame()
fingerPoints, fingerImage = self.getFingerPositions()
k = cv2.waitKey(10)
if k == 27:
break
else:
self.adjustParams(k)
cv2.imshow('a', fingerImage)
def getFingerPositions(self, frame=None):
if frame is None:
frame = self.getFrame()
diff = self.background.apply(frame)
diff = self.filterBottom(diff, self.bottomLine)
blackImgCopy = self.getBackgroundCopy()
self.drawBottomLine(blackImgCopy, self.bottomLine)
blur = self.blurFrame(diff, self.blurPixelSize)
thresh = self.thresholdFrame(blur, self.threshVal)
leftHand, rightHand = self.getLargestShapes(thresh, self.bothHands)
numHands = 1
if self.bothHands:
numHands = 2
leftHand, rightHand = self.getHandSides(leftHand, rightHand)
hand = leftHand
isLeftHand = False
fingerPoints = []
if hand is not None:
for i in range(numHands):
self.drawShape(blackImgCopy, hand)
centerOfHand = self.getCenterOfHand(hand)
self.drawCenterOfHand(blackImgCopy, centerOfHand)
hullWithPoints, hullWithoutPoints = self.getConvexHull(hand)
#self.drawConvexHull(blackImgCopy, hullWithoutPoints)
topFingers = self.getFingerPointsFromHull(hullWithoutPoints, centerOfHand)
if topFingers is not None:
fingerPoints.extend(topFingers)
defects = self.getConvexDefects(hand, hullWithPoints)
fingerDefects = self.getFingerConvexDefects(blackImgCopy, defects, hand, centerOfHand)
self.drawDefects(blackImgCopy, centerOfHand, defects, hand)
thumbPoint = self.getThumbPoint(hand, defects, centerOfHand, isLeftHand)
if fingerPoints is not None and thumbPoint is not None:
fingerPoints.append(thumbPoint)
fingerPoints = self.checkForOverlappingPoints(fingerPoints)
self.drawFingerPoints(blackImgCopy, fingerPoints)
#second iteration
hand = rightHand
isLeftHand = True
#return (fingerPoints, diff)
return (fingerPoints, blackImgCopy)
'''frame/ diffing functions'''
def getFrame(self):
ret, frame = self.vidSrc.read()
return frame
def buildBackgroundModel(self, kinect=None):
print "Hit esc to exit background mode"
cv2.ocl.setUseOpenCL(False)
while True:
frame = None
if kinect is None:
frame = self.getFrame()
else:
frame = kinect.getFrame(kinect.rgbSharedMem)
fgmask = self.background.apply(frame, learningRate=0.1)
cv2.imshow('Foreground', fgmask)
cv2.imshow('Original', frame)
if cv2.waitKey(10) == 27:
break
def getBackgroundCopy(self):
return blackImg.copy()
'''bluring / thresholding functions'''
def blurFrame(self, frame, blurPixelSize):
blur = cv2.medianBlur(frame, blurPixelSize)
return blur
def thresholdFrame(self, frame, threshVal):
maxVal = 255
ret, threshFrame = cv2.threshold(frame, threshVal, maxVal, cv2.THRESH_BINARY)
return threshFrame
'''shape functions'''
def getLargestShapes(self, frame, bothHands=False):
_, contours, contourHeirarchy = cv2.findContours(frame, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
maxContourSize = 0
largestContour = []
secondLargestContour = []
for contour in contours:
if len(contour) > maxContourSize:
maxContourSize = len(contour)
secondLargestContour = largestContour
largestContour = contour
if bothHands:
return (largestContour, secondLargestContour)
return (largestContour, None)
def getConvexHull(self, contour):
hull = None
hull1 = None
if contour is not None and len(contour) > 0:
hull = cv2.convexHull(contour, returnPoints=False)
hull1 = cv2.convexHull(contour)
return hull, hull1
def filterBottom(self, diff, bottomLine):
for idx in range(0, bottomLine):
row = diff[idx]
for elemIdx in range(len(row)):
row[elemIdx] = 0
return diff
def getConvexDefects(self, contour, hull):
defects = None
if hull is not None and len(hull) > 3 and contour is not None:
defects = cv2.convexityDefects(contour, hull)
return defects
def checkForOverlappingPoints(self, points):
if points is None:
return None
minDist = 5
hasChanged = True
while hasChanged:
hasChanged = False
for idx1 in range(len(points)):
for idx2 in range(len(points)):
if idx1 != idx2 and hasChanged is False:
dist = math.sqrt( math.pow(points[idx1][0] - points[idx2][0], 2) + math.pow(points[idx1][1] - points[idx2][1], 2))
if dist <= minDist:
del points[idx1]
hasChanged = True
return points
'''hand geometry functions'''
def getCenterOfHand(self, contour):
centerOfHand = None
if contour is not None and len(contour) > 0:
handMoments = cv2.moments(contour, binaryImage=1)
if handMoments['m00'] != 0:
centerX = int(handMoments['m10']/handMoments['m00'])
centerY = int(handMoments['m01']/handMoments['m00'])
centerY += centerY*0.1
centerOfHand = (centerX, int(centerY))
return centerOfHand
def getFingerPointsFromHull(self, hull, centerOfHand):
centers = None
if hull is not None and len(hull) > 3 and centerOfHand is not None:
#k means clustering
k = 4
filteredCenters = None
kmeansHull = []
for elem in hull:
if elem[0][1] >= centerOfHand[1]:
kmeansHull.append([np.float32(elem[0][0]), np.float32(elem[0][1])])
kmeansHull = np.asarray(kmeansHull)
if len(kmeansHull) >= k:
maxIters = 100
criteria = (cv2.TERM_CRITERIA_EPS, 10, 0.1)
retval, bestLabels, centers = cv2.kmeans(kmeansHull, k, None, criteria, maxIters, cv2.KMEANS_PP_CENTERS)
centers = centers.tolist()
centers = [[int(x), int(y)] for x,y in centers]
return centers
#not really that useful
def getFingerConvexDefects(self, img, defects, contour, center):
if defects is None:
return None
defects = self.getLongestDefects(defects, 4)
filteredDefects = []
for defect in defects:
s, e, f, d = defect[0]
start = tuple(contour[s][0])
farthest = tuple(contour[f][0])
end = tuple(contour[e][0])
if start is not None and center is not None and end is not None:
if start[1] < center[1] and farthest[1] < center[1] and end[1] < center[1]:
filteredDefects.append(defect)
def getLongestDefects(self, defects, n, clockwise=True):
largestDefects = []
usedIdxs = set()
for i in range(n):
maxDist = float("-inf")
maxIdx = -1
for idx, defect in enumerate(defects):
distance = defect[0][3]
if distance > maxDist and idx not in usedIdxs:
maxDist = distance
maxIdx = idx
usedIdxs.add(maxIdx)
usedIdxs = sorted(list(usedIdxs), reverse=clockwise)
for idx in usedIdxs:
largestDefects.append(defects[idx])
return largestDefects
def getThumbPoint(self, contour, defects, centerOfHand, leftHand=True):
if defects is None or contour is None or centerOfHand is None:
return None
maxDistance = 0
longestDefect = None
for defect in defects:
s, e, f, distance = defect[0]
start = tuple(contour[s][0])
farthest = tuple(contour[f][0])
end = tuple(contour[e][0])
if distance > maxDistance:
if leftHand:
#if thumb is on right hand side
if start[0] > centerOfHand[0] and farthest[0] > centerOfHand[0] and end[0] > centerOfHand[0]:
#if start is above and end is below
if start[1] < centerOfHand[1] and end[1] > centerOfHand[1]:
maxDistance = distance
longestDefect = defect.copy()
if leftHand is False:
#if thumb on left hand side
if start[0] < centerOfHand[0] and farthest[0] < centerOfHand[0] and end[0] < centerOfHand[0]:
if end[1] < centerOfHand[1] and start[1] > centerOfHand[1]:
maxDistance = distance
longestDefect = defect.copy()
if longestDefect is None:
return None
s, e, f, d = longestDefect[0]
if leftHand:
thumbPoint = ((contour[s][0][0] + contour[f][0][0]) / 2, (contour[s][0][1] + contour[f][0][1]) / 2)
elif leftHand is False:
thumbPoint = ((contour[e][0][0] + contour[f][0][0]) / 2, (contour[e][0][1] + contour[f][0][1]) / 2)
return thumbPoint
def getHandSides(self, hand1, hand2):
if hand1 is None and hand2 is None:
return (None, None)
elif hand1 is None:
return (hand1, None)
elif hand2 is None:
return (hand2, None)
hand1Center = self.getCenterOfHand(hand1)
hand2Center = self.getCenterOfHand(hand2)
leftHand = hand1
rightHand = hand2
if hand1Center is not None and hand2Center is not None and hand1Center[0] > hand2Center[0]:
leftHand = hand2
rightHand = hand1
return (leftHand, rightHand)
'''drawing functions'''
def drawShape(self, frame, contour):
if contour is not None and len(contour) >= 1:
cv2.drawContours(frame, contour, -1, (255,255,255), thickness=5)
cv2.fillPoly(frame, pts=[contour], color=(255,255,255))
def drawBottomLine(self, frame, bottomLine):
start = (0, bottomLine)
end = (width, bottomLine)
cv2.line(frame, start, end, (0,255,255), thickness=3)
def drawConvexHull(self, frame, contour):
convexHull = None
if contour is not None:
convexHull = cv2.convexHull(contour)
if convexHull is not None and len(convexHull) > 2:
for idx in range(len(convexHull) - 1):
cv2.line(frame, tuple(convexHull[idx][0]), tuple(convexHull[idx + 1][0]), (0,255,255), thickness=10)
cv2.line(frame, tuple(convexHull[0][0]), tuple(convexHull[-1][0]), (0, 255, 255), thickness= 10)
def drawCenterOfHand(self, frame, centerOfHand):
if centerOfHand is not None:
cv2.circle(frame, centerOfHand, 5, (255, 255, 0), thickness=5)
def drawFingerPoints(self, frame, fingerPoints):
if fingerPoints is not None:
for fingerCoord in fingerPoints:
if fingerCoord is not None:
if type(fingerCoord) is not tuple:
fingerCoord = tuple(fingerCoord)
cv2.circle(frame, fingerCoord, 5, (255, 0, 255), thickness=5)
def drawDefects(self, frame, centerOfHand, defects, contour):
if centerOfHand is not None and defects is not None and contour is not None:
for defect in defects:
s, e, f, d = defect[0]
start = tuple(contour[s][0])
farthest = tuple(contour[f][0])
end = tuple(contour[e][0])
#if start[0] > centerOfHand[0] and farthest[0] > centerOfHand[0] and end[0] > centerOfHand[0]:
cv2.line(frame, start, farthest, (0,255,0), thickness=5)
cv2.line(frame, farthest, end, (0,255,0), thickness=5)
fingerDetector = FingerDetector(300, 27, 159, False)
fingerDetector.continuousFingers()
|
|
import re
import json
from string import digits, ascii_uppercase, capwords
import datetime
from django import http
from django.conf import settings
from django.db import transaction
import pytest
from nomination import url_handler, models
from . import factories
pytestmark = pytest.mark.django_db
alnum_list = sorted(digits + ascii_uppercase)
class TestAlphabeticalBrowse():
def test_returns_browse_dict(self):
surts = {
'A': ('http://(org,alarm,)', '(org,a'),
'C': ('http://(org,charlie,)', '(org,c'),
'1': ('http://(org,123,)', '(org,1')
}
project = factories.ProjectFactory()
# Create the surts we're expecting to see represented in the returned dict.
[factories.SURTFactory(url_project=project, value=surts[key][0]) for key in surts]
expected = {
'org': [(char, surts[char][1] if surts.get(char) else None) for char in alnum_list]
}
# Create another unrelated SURT to make sure we aren't grabbing everything.
factories.SURTFactory()
results = url_handler.alphabetical_browse(project)
assert results == expected
@pytest.mark.parametrize('surt, expected', [
('http://(,)', {}),
('http://(org,)', {'org': [(char, None) for char in alnum_list]})
])
def test_no_valid_surts_found(self, surt, expected):
project = factories.ProjectFactory()
factories.SURTFactory(url_project=project, value=surt)
results = url_handler.alphabetical_browse(project)
assert results == expected
def test_project_not_found(self):
project = 'not a real project'
with pytest.raises(http.Http404):
url_handler.alphabetical_browse(project)
class TestGetMetadata():
def test_returns_metadata_list(self):
project = factories.ProjectFactory()
vals = [mdv.metadata for mdv in factories.MetadataValuesFactory.create_batch(3)]
proj_metadata = [
factories.ProjectMetadataFactory(project=project, metadata=val)
for val in vals
]
expected = [
(pm, [models.Metadata_Values.objects.get(metadata=pm.metadata).value])
for pm in proj_metadata
]
results = url_handler.get_metadata(project)
for each in results:
assert (each[0], list(each[1])) in expected
def test_metadata_list_includes_valueset_values(self):
project = factories.ProjectFactory()
valset = factories.ValuesetFactory()
vals = [factories.ValuesetValuesFactory(valueset=valset).value for i in range(3)]
metadata = factories.MetadataFactory(value_sets=[valset])
factories.ProjectMetadataFactory(project=project, metadata=metadata)
results = url_handler.get_metadata(project)
for _, value_iter in results:
value_list = list(value_iter)
assert all(value in vals for value in value_list)
assert len(value_list) == 3
@pytest.mark.parametrize('posted_data, processed_posted_data, expected', [
(
{'color': ['blue', 'other_specify']},
{'color': ['blue', 'other_specify'], 'color_other': 'magenta'},
{'color': ['blue', 'magenta'], 'color_other': 'magenta'}
),
(
{'color': ['blue', 'other_specify']},
{'color': ['blue', 'other_specify']},
{'color': ['blue']}
),
(
{'color': ['blue', 'green']},
{'color': ['blue', 'green']},
{'color': ['blue', 'green']}
),
(
{'color': ['other_specify']},
{'color': 'other_specify', 'color_other': 'magenta'},
{'color': 'magenta', 'color_other': 'magenta'}
),
pytest.param({'color': ['other_specify']}, {'color': 'other_specify'}, {},
marks=pytest.mark.xfail)
])
def test_handle_metadata(rf, posted_data, processed_posted_data, expected):
request = rf.post('/', posted_data)
assert url_handler.handle_metadata(request, processed_posted_data) == expected
class TestValidateDate():
@pytest.mark.parametrize('date_str', [
'2006-10-25',
'10/25/2006',
'10/25/06',
'Oct 25 2006',
'Oct 25, 2006',
'25 Oct 2006',
'25 Oct, 2006',
'October 25 2006',
'October 25, 2006',
'25 October 2006',
'25 October, 2006'
])
def test_returns_valid_date(self, date_str):
assert isinstance(url_handler.validate_date(date_str), datetime.date)
def test_returns_none_with_invalid_date(self):
date_str = '2006, Oct 25'
assert url_handler.validate_date(date_str) is None
class TestAddURL():
def test_returns_expected(self):
project = factories.ProjectWithMetadataFactory()
attribute_name = project.metadata.first().name
value = 'some_value'
form_data = {
'url_value': 'http://www.example.com',
'nominator_email': 'somebody@someplace.com',
'nominator_name': 'John Lambda',
'nominator_institution': 'UNT',
attribute_name: value
}
expected = [
'You have successfully nominated {0}'.format(form_data['url_value']),
'You have successfully added the {0} "{1}" for {2}'.format(
attribute_name, value, form_data['url_value'])
]
assert url_handler.add_url(project, form_data) == expected
def test_cannot_get_system_nominator(self):
form_data = {'url_value': 'http://www.example.com'}
project = factories.ProjectFactory()
models.Nominator.objects.get().delete()
with pytest.raises(http.Http404):
url_handler.add_url(project, form_data)
@pytest.mark.xfail(reason='Unreachable path')
def test_cannot_get_or_create_surt(self):
form_data = {'url_value': 'http://www.example.com'}
project = None
with pytest.raises(http.Http404):
url_handler.add_url(project, form_data)
def test_cannot_get_or_create_nominator(self):
form_data = {
'url_value': 'http://www.example.com',
'nominator_email': None,
'nominator_name': None,
'nominator_institution': None
}
project = factories.ProjectFactory()
with pytest.raises(http.Http404):
url_handler.add_url(project, form_data)
class TestAddMetadata():
def test_returns_expected(self):
project = factories.ProjectWithMetadataFactory()
nominator = factories.NominatorFactory()
attribute_name = project.metadata.first().name
value = 'some_value'
form_data = {
'nominator_email': nominator.nominator_email,
'nominator_institution': nominator.nominator_institution,
'nominator_name': nominator.nominator_name,
'scope': '1',
'url_value': 'http://www.example.com',
attribute_name: value
}
expected = [
'You have successfully nominated {0}'.format(form_data['url_value']),
'You have successfully added the {0} "{1}" for {2}'.format(
attribute_name, value, form_data['url_value'])
]
assert url_handler.add_metadata(project, form_data) == expected
def test_nominator_not_found(self):
project = factories.ProjectFactory()
form_data = {'nominator_email': 'someone@someplace.com'}
with pytest.raises(http.Http404):
url_handler.add_metadata(project, form_data)
@pytest.mark.parametrize('url, expected', [
('http://www.example.com', 'http://www.example.com'),
(' http://www.example.com ', 'http://www.example.com'),
('https://www.example.com', 'https://www.example.com'),
('http://www.example.com///', 'http://www.example.com')
])
def test_check_url(url, expected):
assert url_handler.check_url(url) == expected
class TestGetNominator():
def test_returns_nominator(self):
nominator = factories.NominatorFactory()
form_data = {
'nominator_email': nominator.nominator_email,
'nominator_name': nominator.nominator_name,
'nominator_institution': nominator.nominator_institution
}
assert url_handler.get_nominator(form_data) == nominator
def test_creates_and_returns_nominator(self):
form_data = {
'nominator_email': 'somebody@somewhere.com',
'nominator_name': 'John Smith',
'nominator_institution': 'UNT'
}
assert len(models.Nominator.objects.all()) == 1
new_nominator = url_handler.get_nominator(form_data)
assert len(models.Nominator.objects.all()) == 2
for key in form_data.keys():
assert getattr(new_nominator, key) == form_data[key]
def test_cannot_create_nominator(self):
form_data = {
'nominator_email': 'somebody@somewhere.com',
'nominator_name': None,
'nominator_institution': None
}
with pytest.raises(http.Http404):
url_handler.get_nominator(form_data)
class TestNominateURL():
@pytest.mark.parametrize('scope_value, scope', [
('1', 'In Scope'),
('0', 'Out of Scope')
])
def test_nomination_exists(self, scope_value, scope):
project = factories.ProjectFactory()
nominator = factories.NominatorFactory()
form_data = {'url_value': 'http://www.example.com'}
factories.NominatedURLFactory(
url_nominator=nominator,
url_project=project,
entity=form_data['url_value'],
value=scope_value
)
results = url_handler.nominate_url(project, nominator, form_data, scope_value)[0]
assert 'already' in results
assert scope in results
@pytest.mark.parametrize('scope_value, scope', [
('1', 'In Scope'),
('0', 'Out of Scope')
])
def test_nomination_gets_modified(self, scope_value, scope):
project = factories.ProjectFactory()
nominator = factories.NominatorFactory()
form_data = {'url_value': 'http://www.example.com'}
factories.NominatedURLFactory(
url_nominator=nominator,
url_project=project,
entity=form_data['url_value'],
value='1' if scope_value == '0' else '0'
)
results = url_handler.nominate_url(project, nominator, form_data, scope_value)[0]
assert 'successfully' in results
assert scope in results
def test_creates_new_nomination(self):
project = factories.ProjectFactory()
nominator = factories.NominatorFactory()
form_data = {'url_value': 'http://www.example.com'}
scope_value = 1
results = url_handler.nominate_url(project, nominator, form_data, scope_value)[0]
expected = 'You have successfully nominated {0}'.format(form_data['url_value'])
assert results == expected
def test_cannot_create_nomination(self):
project = nominator = scope_value = None
form_data = {}
with pytest.raises(http.Http404):
url_handler.nominate_url(project, nominator, form_data, scope_value)
class TestAddOtherAttribute():
@pytest.fixture
def setup(self):
nominator = factories.NominatorFactory()
project = factories.ProjectWithMetadataFactory()
metadata_names = [md.name for md in project.metadata.all()]
return project, metadata_names, nominator
def test_returns_expected(self, setup):
project, metadata_names, nominator = setup
entity = 'http://www.example.com'
value = 'some_value'
form_data = {'url_value': entity}
for metadata in metadata_names:
form_data[metadata] = value
results = url_handler.add_other_attribute(project, nominator, form_data, [])
expected = [
'You have successfully added the {0} "{1}" for {2}'.format(met_name, value, entity)
for met_name in metadata_names
]
assert sorted(results) == sorted(expected)
def test_returns_expected_with_multiple_attribute_values(self, setup):
project, metadata_names, nominator = setup
entity = 'http://www.example.com'
values = ['some_value', 'some_other_value']
form_data = {'url_value': entity}
for metadata in metadata_names:
form_data[metadata] = values
results = url_handler.add_other_attribute(project, nominator, form_data, [])
expected = [
'You have successfully added the {0} "{1}" for {2}'.format(met_name, value, entity)
for met_name in metadata_names
for value in values
]
assert sorted(results) == sorted(expected)
class TestSaveAttribute():
def test_creates_url(self):
results = url_handler.save_attribute(
factories.ProjectFactory(),
factories.NominatorFactory(),
{'url_value': 'http://www.example.com'},
[],
'Language',
'English'
)
assert 'You have successfully added' in results[0]
assert models.URL.objects.all().count() == 1
def test_does_not_create_url_if_it_exists_already(self):
url = factories.URLFactory()
results = url_handler.save_attribute(
url.url_project,
url.url_nominator,
{'url_value': url.entity},
[],
url.attribute,
url.value
)
assert 'You have already added' in results[0]
assert models.URL.objects.all().count() == 1
def test_url_cannot_be_saved(self):
with pytest.raises(http.Http404):
with transaction.atomic():
url_handler.save_attribute(None, None, {'url_value': ''}, [], '', '',)
assert models.URL.objects.all().count() == 0
class TestSurtExists():
def test_returns_true_with_existing_surt(self):
system_nominator = models.Nominator.objects.get(id=settings.SYSTEM_NOMINATOR_ID)
url = factories.SURTFactory()
assert url_handler.surt_exists(url.url_project, system_nominator, url.entity) is True
def test_creates_surt_when_surt_does_not_exist(self):
system_nominator = models.Nominator.objects.get(id=settings.SYSTEM_NOMINATOR_ID)
project = factories.ProjectFactory()
url = 'http://example.com'
assert len(models.URL.objects.all()) == 0
assert url_handler.surt_exists(project, system_nominator, url) is True
assert len(models.URL.objects.all()) == 1
def test_surt_cannot_be_created(self):
system_nominator = models.Nominator.objects.get(id=settings.SYSTEM_NOMINATOR_ID)
project = factories.ProjectFactory()
url = None
with pytest.raises(http.Http404):
url_handler.surt_exists(project, system_nominator, url)
@pytest.mark.parametrize('url, expected', [
('www.example.com', 'http://www.example.com'),
(' http://www.example.com ', 'http://www.example.com')
])
def test_url_formatter(url, expected):
assert url_handler.url_formatter(url) == expected
@pytest.mark.parametrize('url, preserveCase, expected', [
# Documentation on SURTs is inconsistent about whether a comma
# should come before the port or not. The assumption here is
# that it should.
('http://userinfo@domain.tld:80/path?query#fragment', False,
'http://(tld,domain,:80@userinfo)/path?query#fragment'),
('http://www.example.com', False, 'http://(com,example,www,)'),
('ftp://www.example.com', False, 'ftp://(com,example,www,)'),
('ftps://www.example.com', False, 'ftp://(com,example,www,)'),
('https://www.example.com', False, 'http://(com,example,www,)'),
('www.example.com', False, 'http://(com,example,www,)'),
('http://www.eXaMple.cOm', True, 'http://(cOm,eXaMple,www,)'),
('Not a URL.', False, ''),
('1.2.3.4:80/examples', False, 'http://(1.2.3.4:80)/examples'),
])
def test_surtize(url, preserveCase, expected):
assert url_handler.surtize(url, preserveCase=preserveCase) == expected
def test_appendToSurt():
match_obj = re.search(r'(World!)', 'The End of The World!')
groupnum = 1
surt = 'Hello, '
expected = 'Hello, World!'
assert url_handler.appendToSurt(match_obj, groupnum, surt) == expected
@pytest.mark.parametrize('uri, expected', [
('http://www.example.com', 'http://www.example.com'),
('www.example.com', 'http://www.example.com'),
(':.', ':.'),
('.:', 'http://.:')
])
def test_addImpliedHttpIfNecessary(uri, expected):
assert url_handler.addImpliedHttpIfNecessary(uri) == expected
class TestCreateJsonBrowse():
@pytest.mark.parametrize('root, text, id_group', [
('com,', '<a href="surt/(com,example">com,example</a>', 'com,example,'),
('', 'com', 'com,')
])
def test_returns_expected(self, root, text, id_group):
project = factories.ProjectFactory()
factories.SURTFactory(
url_project=project,
entity='http://www.example.com',
value='http://(com,example,www)'
)
expected = [{
'hasChildren': True,
'id': id_group,
'text': text
}]
results = url_handler.create_json_browse(project.project_slug, None, root)
assert json.loads(results) == expected
@pytest.mark.parametrize('root, text, id_group', [
('com,', '<a href="surt/(com,example">com,example</a>', 'com,example,'),
('', 'com', 'com,')
])
def test_handles_non_http(self, root, text, id_group):
project = factories.ProjectFactory()
factories.SURTFactory(
url_project=project,
entity='ftp://www.example.com',
value='ftp://(com,example,www)'
)
expected = [{
'hasChildren': True,
'id': id_group,
'text': text
}]
results = url_handler.create_json_browse(project.project_slug, None, root)
assert json.loads(results) == expected
def test_returns_expected_with_no_children(self):
project = factories.ProjectFactory()
factories.SURTFactory(
url_project=project,
entity='http://www.example.com',
value='http://(com,example,www)'
)
root = 'com,example,'
expected = [{
'id': 'com,example,www,',
'text': '<a href="surt/(com,example,www">com,example,www</a>'
}]
results = url_handler.create_json_browse(project.project_slug, None, root)
assert json.loads(results) == expected
@pytest.mark.parametrize('root, text, id_group', [
('com,', '<a href="surt/(com,example">com,example</a>', 'com,example,'),
('', 'com', 'com,'),
])
def test_does_not_show_duplicates(self, root, text, id_group):
project = factories.ProjectFactory()
factories.SURTFactory.create_batch(
2,
url_project=project,
entity='http://www.example.com',
value='http://(com,example,www)'
)
factories.SURTFactory(
url_project=project,
entity='ftp://www.example.com',
value='ftp://(com,example,www)'
)
expected = [{
'hasChildren': True,
'id': id_group,
'text': text
}]
results = url_handler.create_json_browse(project.project_slug, None, root)
assert json.loads(results) == expected
def test_groups_by_prefix_when_many_urls_exist(self):
project = factories.ProjectFactory()
urls = factories.SURTFactory.create_batch(101, url_project=project)
root = 'com,'
expected = []
results = url_handler.create_json_browse(project.project_slug, None, root)
for url in urls:
surt_dict = {
'hasChildren': True,
'id': root + url.value[url.value.find(root) + 4],
'text': url.value[url.value.find(root) + 4]
}
if surt_dict not in expected:
expected.append(surt_dict)
assert sorted(json.loads(results), key=lambda x: x['id']) == \
sorted(expected, key=lambda x: x['id'])
def test_cannot_find_project(self):
slug = 'blah'
with pytest.raises(http.Http404):
url_handler.create_json_browse(slug, None, None)
def test_cannot_find_matching_surts(self):
project = factories.ProjectFactory()
root = 'example'
assert url_handler.create_json_browse(project.project_slug, None, root) == '[]'
def test_empty_root(self):
project = factories.ProjectFactory()
root = ''
assert url_handler.create_json_browse(project.project_slug, None, root) == '[]'
class TestCreateJsonSearch():
def test_returns_expected(self):
project = factories.ProjectFactory()
expected_urls = factories.URLFactory.create_batch(10, url_project=project)
other_urls = factories.URLFactory.create_batch(10)
json_url_list = url_handler.create_json_search(project.project_slug)
for url in expected_urls:
assert url.entity in json_url_list
for url in other_urls:
assert url.entity not in json_url_list
def test_project_not_found(self):
with pytest.raises(http.Http404):
url_handler.create_json_search('fake_slug')
class TestCreateURLList():
def test_returns_expected(self):
project = factories.ProjectFactory()
entity = 'www.example.com'
surt = 'http://(com,example,www,)'
domain_surt = 'http://(com,example,'
urls = factories.URLFactory.create_batch(5, url_project=project, entity=entity)
nominations = factories.NominatedURLFactory.create_batch(
5,
url_project=project,
entity=entity
)
score = 0
for nomination in nominations:
score += int(nomination.value)
factories.SURTFactory(url_project=project, entity=entity, value=surt)
results = url_handler.create_url_list(
project,
models.URL.objects.filter(entity__iexact=entity)
)
assert results['entity'] == entity
for nomination in nominations:
name = nomination.url_nominator.nominator_name
institution = nomination.url_nominator.nominator_institution
assert '{0} - {1}'.format(name, institution) in results['nomination_list']
assert results['nomination_count'] == 5
assert results['nomination_score'] == score
assert results['surt'] == domain_surt
for url in urls:
attribute = capwords(url.attribute.replace('_', ' '))
assert url.value in results['attribute_dict'][attribute]
def test_returns_expected_with_project_metadata_values(self):
project = factories.ProjectFactory()
metadata = factories.MetadataFactory()
factories.ProjectMetadataFactory(project=project, metadata=metadata)
value = 'one'
met_value = factories.MetadataValuesFactory(metadata=metadata, value__key=value).value
url = factories.URLFactory(url_project=project, attribute=metadata.name, value=value)
results = url_handler.create_url_list(project, [url])
attribute = capwords(url.attribute.replace('_', ' '))
assert results['attribute_dict'][attribute] == [met_value]
class TestCreateURLDump():
def test_returns_expected_with_nomination(self):
project = factories.ProjectFactory()
url = factories.NominatedURLFactory(url_project=project)
nominator = url.url_nominator
results = url_handler.create_url_dump(project)
assert results == {
url.entity: {
'nominators': ['{0} - {1}'.format(
nominator.nominator_name,
nominator.nominator_institution
)],
'nomination_count': 1,
'nomination_score': int(url.value),
'attributes': {}
}
}
def test_returns_expected_with_surt(self):
project = factories.ProjectFactory()
surt = 'http://(com,example,www)'
domain_surt = 'http://(com,example,'
url = factories.SURTFactory(url_project=project, value=surt)
results = url_handler.create_url_dump(project)
assert results == {
url.entity: {
'nominators': [],
'nomination_count': 0,
'nomination_score': 0,
'attributes': {},
'surt': surt,
'domain_surt': domain_surt
}
}
def test_returns_correct_attribute(self):
project = factories.ProjectWithMetadataFactory(metadata2=None)
attribute = project.metadata.all()[0].name
value = models.Metadata_Values.objects.filter(metadata__name__iexact=attribute)[0].value
url = factories.URLFactory(url_project=project, attribute=attribute, value=value.key)
results = url_handler.create_url_dump(project)
assert results == {
url.entity: {
'nominators': [],
'nomination_count': 0,
'nomination_score': 0,
'attributes': {attribute: [value.value]},
}
}
def test_returns_correct_attribute_with_new_value(self):
project = factories.ProjectWithMetadataFactory(metadata2=None)
attribute = project.metadata.all()[0].name
url = factories.URLFactory(url_project=project, attribute=attribute)
results = url_handler.create_url_dump(project)
assert results == {
url.entity: {
'nominators': [],
'nomination_count': 0,
'nomination_score': 0,
'attributes': {attribute: [url.value]},
}
}
class TestCreateSurtDict():
@pytest.mark.parametrize('surt_root, expected_letter', [
('http://(com,example,www,)', False),
('http://(com,a,', 'a')
])
def test_returns_expected(self, surt_root, expected_letter):
project = factories.ProjectFactory()
surts = [
surt_root + '/some/stuff',
surt_root + '/other/stuff',
surt_root + '/nothing',
surt_root
]
urls = [factories.SURTFactory(url_project=project, value=surt) for surt in surts]
surt_dict = url_handler.create_surt_dict(project, surt_root)
assert len(surt_dict['url_list']) == len(surts)
for url in surt_dict['url_list']:
assert url in urls
assert surt_dict['letter'] == expected_letter
def test_returns_none_when_no_surts_found(self):
surt_dict = url_handler.create_surt_dict('', 'http://(com,example,)')
assert surt_dict['url_list'] is None
@pytest.mark.parametrize('surt, expected', [
('http://(com,example,www,)', 'http://(com,example,'),
('http://(uk,gov,nationalarchives,www,)', 'http://(uk,gov,'),
('http://not-a-surt.com', 'http://not-a-surt.com')
])
def test_get_domain_surt(surt, expected):
assert url_handler.get_domain_surt(surt) == expected
def test_fix_scheme_double_slash():
url = 'http:/www.example.com'
expected = 'http://www.example.com'
assert url_handler.fix_scheme_double_slash(url) == expected
def test_fix_scheme_double_slash_ftp():
url = 'ftp:/www.example.com/clvl37.idx'
expected = 'ftp://www.example.com/clvl37.idx'
assert url_handler.fix_scheme_double_slash(url) == expected
def test_strip_scheme():
url = 'https://example.com'
expected = 'example.com'
assert url_handler.strip_scheme(url) == expected
|
|
# Copyrighs 2012-2019 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import mlog, mparser
import pickle, os, uuid
import sys
from itertools import chain
from pathlib import PurePath
from collections import OrderedDict, defaultdict
from .mesonlib import (
MesonException, MachineChoice, PerMachine, OrderedSet,
default_libdir, default_libexecdir, default_prefix, split_args
)
from .envconfig import get_env_var_pair
from .wrap import WrapMode
import ast
import argparse
import configparser
import enum
import shlex
import typing as T
if T.TYPE_CHECKING:
from . import dependencies
from .compilers import Compiler # noqa: F401
from .environment import Environment
OptionDictType = T.Dict[str, 'UserOption[T.Any]']
version = '0.55.999'
backendlist = ['ninja', 'vs', 'vs2010', 'vs2015', 'vs2017', 'vs2019', 'xcode']
default_yielding = False
# Can't bind this near the class method it seems, sadly.
_T = T.TypeVar('_T')
class UserOption(T.Generic[_T]):
def __init__(self, description, choices, yielding):
super().__init__()
self.choices = choices
self.description = description
if yielding is None:
yielding = default_yielding
if not isinstance(yielding, bool):
raise MesonException('Value of "yielding" must be a boolean.')
self.yielding = yielding
def printable_value(self):
return self.value
# Check that the input is a valid value and return the
# "cleaned" or "native" version. For example the Boolean
# option could take the string "true" and return True.
def validate_value(self, value: T.Any) -> _T:
raise RuntimeError('Derived option class did not override validate_value.')
def set_value(self, newvalue):
self.value = self.validate_value(newvalue)
class UserStringOption(UserOption[str]):
def __init__(self, description, value, choices=None, yielding=None):
super().__init__(description, choices, yielding)
self.set_value(value)
def validate_value(self, value):
if not isinstance(value, str):
raise MesonException('Value "%s" for string option is not a string.' % str(value))
return value
class UserBooleanOption(UserOption[bool]):
def __init__(self, description, value, yielding=None):
super().__init__(description, [True, False], yielding)
self.set_value(value)
def __bool__(self) -> bool:
return self.value
def validate_value(self, value) -> bool:
if isinstance(value, bool):
return value
if value.lower() == 'true':
return True
if value.lower() == 'false':
return False
raise MesonException('Value %s is not boolean (true or false).' % value)
class UserIntegerOption(UserOption[int]):
def __init__(self, description, value, yielding=None):
min_value, max_value, default_value = value
self.min_value = min_value
self.max_value = max_value
c = []
if min_value is not None:
c.append('>=' + str(min_value))
if max_value is not None:
c.append('<=' + str(max_value))
choices = ', '.join(c)
super().__init__(description, choices, yielding)
self.set_value(default_value)
def validate_value(self, value) -> int:
if isinstance(value, str):
value = self.toint(value)
if not isinstance(value, int):
raise MesonException('New value for integer option is not an integer.')
if self.min_value is not None and value < self.min_value:
raise MesonException('New value %d is less than minimum value %d.' % (value, self.min_value))
if self.max_value is not None and value > self.max_value:
raise MesonException('New value %d is more than maximum value %d.' % (value, self.max_value))
return value
def toint(self, valuestring) -> int:
try:
return int(valuestring)
except ValueError:
raise MesonException('Value string "%s" is not convertible to an integer.' % valuestring)
class UserUmaskOption(UserIntegerOption, UserOption[T.Union[str, int]]):
def __init__(self, description, value, yielding=None):
super().__init__(description, (0, 0o777, value), yielding)
self.choices = ['preserve', '0000-0777']
def printable_value(self):
if self.value == 'preserve':
return self.value
return format(self.value, '04o')
def validate_value(self, value):
if value is None or value == 'preserve':
return 'preserve'
return super().validate_value(value)
def toint(self, valuestring):
try:
return int(valuestring, 8)
except ValueError as e:
raise MesonException('Invalid mode: {}'.format(e))
class UserComboOption(UserOption[str]):
def __init__(self, description, choices: T.List[str], value, yielding=None):
super().__init__(description, choices, yielding)
if not isinstance(self.choices, list):
raise MesonException('Combo choices must be an array.')
for i in self.choices:
if not isinstance(i, str):
raise MesonException('Combo choice elements must be strings.')
self.set_value(value)
def validate_value(self, value):
if value not in self.choices:
optionsstring = ', '.join(['"%s"' % (item,) for item in self.choices])
raise MesonException('Value "{}" for combo option "{}" is not one of the choices.'
' Possible choices are: {}.'.format(
value, self.description, optionsstring))
return value
class UserArrayOption(UserOption[T.List[str]]):
def __init__(self, description, value, split_args=False, user_input=False, allow_dups=False, **kwargs):
super().__init__(description, kwargs.get('choices', []), yielding=kwargs.get('yielding', None))
self.split_args = split_args
self.allow_dups = allow_dups
self.value = self.validate_value(value, user_input=user_input)
def validate_value(self, value, user_input: bool = True) -> T.List[str]:
# User input is for options defined on the command line (via -D
# options). Users can put their input in as a comma separated
# string, but for defining options in meson_options.txt the format
# should match that of a combo
if not user_input and isinstance(value, str) and not value.startswith('['):
raise MesonException('Value does not define an array: ' + value)
if isinstance(value, str):
if value.startswith('['):
try:
newvalue = ast.literal_eval(value)
except ValueError:
raise MesonException('malformed option {}'.format(value))
elif value == '':
newvalue = []
else:
if self.split_args:
newvalue = split_args(value)
else:
newvalue = [v.strip() for v in value.split(',')]
elif isinstance(value, list):
newvalue = value
else:
raise MesonException('"{}" should be a string array, but it is not'.format(newvalue))
if not self.allow_dups and len(set(newvalue)) != len(newvalue):
msg = 'Duplicated values in array option is deprecated. ' \
'This will become a hard error in the future.'
mlog.deprecation(msg)
for i in newvalue:
if not isinstance(i, str):
raise MesonException('String array element "{0}" is not a string.'.format(str(newvalue)))
if self.choices:
bad = [x for x in newvalue if x not in self.choices]
if bad:
raise MesonException('Options "{}" are not in allowed choices: "{}"'.format(
', '.join(bad), ', '.join(self.choices)))
return newvalue
class UserFeatureOption(UserComboOption):
static_choices = ['enabled', 'disabled', 'auto']
def __init__(self, description, value, yielding=None):
super().__init__(description, self.static_choices, value, yielding)
def is_enabled(self):
return self.value == 'enabled'
def is_disabled(self):
return self.value == 'disabled'
def is_auto(self):
return self.value == 'auto'
if T.TYPE_CHECKING:
CacheKeyType = T.Tuple[T.Tuple[T.Any, ...], ...]
SubCacheKeyType = T.Tuple[T.Any, ...]
class DependencyCacheType(enum.Enum):
OTHER = 0
PKG_CONFIG = 1
CMAKE = 2
@classmethod
def from_type(cls, dep: 'dependencies.Dependency') -> 'DependencyCacheType':
from . import dependencies
# As more types gain search overrides they'll need to be added here
if isinstance(dep, dependencies.PkgConfigDependency):
return cls.PKG_CONFIG
if isinstance(dep, dependencies.CMakeDependency):
return cls.CMAKE
return cls.OTHER
class DependencySubCache:
def __init__(self, type_: DependencyCacheType):
self.types = [type_]
self.__cache = {} # type: T.Dict[SubCacheKeyType, dependencies.Dependency]
def __getitem__(self, key: 'SubCacheKeyType') -> 'dependencies.Dependency':
return self.__cache[key]
def __setitem__(self, key: 'SubCacheKeyType', value: 'dependencies.Dependency') -> None:
self.__cache[key] = value
def __contains__(self, key: 'SubCacheKeyType') -> bool:
return key in self.__cache
def values(self) -> T.Iterable['dependencies.Dependency']:
return self.__cache.values()
class DependencyCache:
"""Class that stores a cache of dependencies.
This class is meant to encapsulate the fact that we need multiple keys to
successfully lookup by providing a simple get/put interface.
"""
def __init__(self, builtins_per_machine: PerMachine[T.Dict[str, UserOption[T.Any]]], for_machine: MachineChoice):
self.__cache = OrderedDict() # type: T.MutableMapping[CacheKeyType, DependencySubCache]
self.__builtins_per_machine = builtins_per_machine
self.__for_machine = for_machine
def __calculate_subkey(self, type_: DependencyCacheType) -> T.Tuple[T.Any, ...]:
if type_ is DependencyCacheType.PKG_CONFIG:
return tuple(self.__builtins_per_machine[self.__for_machine]['pkg_config_path'].value)
elif type_ is DependencyCacheType.CMAKE:
return tuple(self.__builtins_per_machine[self.__for_machine]['cmake_prefix_path'].value)
assert type_ is DependencyCacheType.OTHER, 'Someone forgot to update subkey calculations for a new type'
return tuple()
def __iter__(self) -> T.Iterator['CacheKeyType']:
return self.keys()
def put(self, key: 'CacheKeyType', dep: 'dependencies.Dependency') -> None:
t = DependencyCacheType.from_type(dep)
if key not in self.__cache:
self.__cache[key] = DependencySubCache(t)
subkey = self.__calculate_subkey(t)
self.__cache[key][subkey] = dep
def get(self, key: 'CacheKeyType') -> T.Optional['dependencies.Dependency']:
"""Get a value from the cache.
If there is no cache entry then None will be returned.
"""
try:
val = self.__cache[key]
except KeyError:
return None
for t in val.types:
subkey = self.__calculate_subkey(t)
try:
return val[subkey]
except KeyError:
pass
return None
def values(self) -> T.Iterator['dependencies.Dependency']:
for c in self.__cache.values():
yield from c.values()
def keys(self) -> T.Iterator['CacheKeyType']:
return iter(self.__cache.keys())
def items(self) -> T.Iterator[T.Tuple['CacheKeyType', T.List['dependencies.Dependency']]]:
for k, v in self.__cache.items():
vs = []
for t in v.types:
subkey = self.__calculate_subkey(t)
if subkey in v:
vs.append(v[subkey])
yield k, vs
def clear(self) -> None:
self.__cache.clear()
# Can't bind this near the class method it seems, sadly.
_V = T.TypeVar('_V')
# This class contains all data that must persist over multiple
# invocations of Meson. It is roughly the same thing as
# cmakecache.
class CoreData:
def __init__(self, options: argparse.Namespace, scratch_dir: str):
self.lang_guids = {
'default': '8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942',
'c': '8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942',
'cpp': '8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942',
'test': '3AC096D0-A1C2-E12C-1390-A8335801FDAB',
'directory': '2150E333-8FDC-42A3-9474-1A3956D46DE8',
}
self.test_guid = str(uuid.uuid4()).upper()
self.regen_guid = str(uuid.uuid4()).upper()
self.install_guid = str(uuid.uuid4()).upper()
self.target_guids = {}
self.version = version
self.builtins = {} # : OptionDictType
self.builtins_per_machine = PerMachine({}, {})
self.backend_options = {} # : OptionDictType
self.user_options = {} # : OptionDictType
self.compiler_options = PerMachine(
defaultdict(dict),
defaultdict(dict),
) # : PerMachine[T.defaultdict[str, OptionDictType]]
self.base_options = {} # : OptionDictType
self.cross_files = self.__load_config_files(options, scratch_dir, 'cross')
self.compilers = PerMachine(OrderedDict(), OrderedDict())
build_cache = DependencyCache(self.builtins_per_machine, MachineChoice.BUILD)
host_cache = DependencyCache(self.builtins_per_machine, MachineChoice.BUILD)
self.deps = PerMachine(build_cache, host_cache) # type: PerMachine[DependencyCache]
self.compiler_check_cache = OrderedDict()
# Only to print a warning if it changes between Meson invocations.
self.config_files = self.__load_config_files(options, scratch_dir, 'native')
self.builtin_options_libdir_cross_fixup()
self.init_builtins('')
@staticmethod
def __load_config_files(options: argparse.Namespace, scratch_dir: str, ftype: str) -> T.List[str]:
# Need to try and make the passed filenames absolute because when the
# files are parsed later we'll have chdir()d.
if ftype == 'cross':
filenames = options.cross_file
else:
filenames = options.native_file
if not filenames:
return []
found_invalid = [] # type: T.List[str]
missing = [] # type: T.List[str]
real = [] # type: T.List[str]
for i, f in enumerate(filenames):
f = os.path.expanduser(os.path.expandvars(f))
if os.path.exists(f):
if os.path.isfile(f):
real.append(os.path.abspath(f))
continue
elif os.path.isdir(f):
found_invalid.append(os.path.abspath(f))
else:
# in this case we've been passed some kind of pipe, copy
# the contents of that file into the meson private (scratch)
# directory so that it can be re-read when wiping/reconfiguring
copy = os.path.join(scratch_dir, '{}.{}.ini'.format(uuid.uuid4(), ftype))
with open(f, 'r') as rf:
with open(copy, 'w') as wf:
wf.write(rf.read())
real.append(copy)
# Also replace the command line argument, as the pipe
# probably won't exist on reconfigure
filenames[i] = copy
continue
if sys.platform != 'win32':
paths = [
os.environ.get('XDG_DATA_HOME', os.path.expanduser('~/.local/share')),
] + os.environ.get('XDG_DATA_DIRS', '/usr/local/share:/usr/share').split(':')
for path in paths:
path_to_try = os.path.join(path, 'meson', ftype, f)
if os.path.isfile(path_to_try):
real.append(path_to_try)
break
else:
missing.append(f)
else:
missing.append(f)
if missing:
if found_invalid:
mlog.log('Found invalid candidates for', ftype, 'file:', *found_invalid)
mlog.log('Could not find any valid candidate for', ftype, 'files:', *missing)
raise MesonException('Cannot find specified {} file: {}'.format(ftype, f))
return real
def builtin_options_libdir_cross_fixup(self):
# By default set libdir to "lib" when cross compiling since
# getting the "system default" is always wrong on multiarch
# platforms as it gets a value like lib/x86_64-linux-gnu.
if self.cross_files:
builtin_options['libdir'].default = 'lib'
def sanitize_prefix(self, prefix):
prefix = os.path.expanduser(prefix)
if not os.path.isabs(prefix):
raise MesonException('prefix value {!r} must be an absolute path'
''.format(prefix))
if prefix.endswith('/') or prefix.endswith('\\'):
# On Windows we need to preserve the trailing slash if the
# string is of type 'C:\' because 'C:' is not an absolute path.
if len(prefix) == 3 and prefix[1] == ':':
pass
# If prefix is a single character, preserve it since it is
# the root directory.
elif len(prefix) == 1:
pass
else:
prefix = prefix[:-1]
return prefix
def sanitize_dir_option_value(self, prefix: str, option: str, value: T.Any) -> T.Any:
'''
If the option is an installation directory option and the value is an
absolute path, check that it resides within prefix and return the value
as a path relative to the prefix.
This way everyone can do f.ex, get_option('libdir') and be sure to get
the library directory relative to prefix.
.as_posix() keeps the posix-like file seperators Meson uses.
'''
try:
value = PurePath(value)
except TypeError:
return value
if option.endswith('dir') and value.is_absolute() and \
option not in builtin_dir_noprefix_options:
# Value must be a subdir of the prefix
# commonpath will always return a path in the native format, so we
# must use pathlib.PurePath to do the same conversion before
# comparing.
msg = ('The value of the {!r} option is \'{!s}\' which must be a '
'subdir of the prefix {!r}.\nNote that if you pass a '
'relative path, it is assumed to be a subdir of prefix.')
# os.path.commonpath doesn't understand case-insensitive filesystems,
# but PurePath().relative_to() does.
try:
value = value.relative_to(prefix)
except ValueError:
raise MesonException(msg.format(option, value, prefix))
if '..' in str(value):
raise MesonException(msg.format(option, value, prefix))
return value.as_posix()
def init_builtins(self, subproject: str):
# Create builtin options with default values
for key, opt in builtin_options.items():
self.add_builtin_option(self.builtins, key, opt, subproject)
for for_machine in iter(MachineChoice):
for key, opt in builtin_options_per_machine.items():
self.add_builtin_option(self.builtins_per_machine[for_machine], key, opt, subproject)
def add_builtin_option(self, opts_map, key, opt, subproject):
if subproject:
if opt.yielding:
# This option is global and not per-subproject
return
optname = subproject + ':' + key
value = opts_map[key].value
else:
optname = key
value = None
opts_map[optname] = opt.init_option(key, value, default_prefix())
def init_backend_options(self, backend_name):
if backend_name == 'ninja':
self.backend_options['backend_max_links'] = \
UserIntegerOption(
'Maximum number of linker processes to run or 0 for no '
'limit',
(0, None, 0))
elif backend_name.startswith('vs'):
self.backend_options['backend_startup_project'] = \
UserStringOption(
'Default project to execute in Visual Studio',
'')
def get_builtin_option(self, optname, subproject=''):
raw_optname = optname
if subproject:
optname = subproject + ':' + optname
for opts in self._get_all_builtin_options():
v = opts.get(optname)
if v is None or v.yielding:
v = opts.get(raw_optname)
if v is None:
continue
if raw_optname == 'wrap_mode':
return WrapMode.from_string(v.value)
return v.value
raise RuntimeError('Tried to get unknown builtin option %s.' % raw_optname)
def _try_set_builtin_option(self, optname, value):
for opts in self._get_all_builtin_options():
opt = opts.get(optname)
if opt is None:
continue
if optname == 'prefix':
value = self.sanitize_prefix(value)
else:
prefix = self.builtins['prefix'].value
value = self.sanitize_dir_option_value(prefix, optname, value)
break
else:
return False
opt.set_value(value)
# Make sure that buildtype matches other settings.
if optname == 'buildtype':
self.set_others_from_buildtype(value)
else:
self.set_buildtype_from_others()
return True
def set_builtin_option(self, optname, value):
res = self._try_set_builtin_option(optname, value)
if not res:
raise RuntimeError('Tried to set unknown builtin option %s.' % optname)
def set_others_from_buildtype(self, value):
if value == 'plain':
opt = '0'
debug = False
elif value == 'debug':
opt = '0'
debug = True
elif value == 'debugoptimized':
opt = '2'
debug = True
elif value == 'release':
opt = '3'
debug = False
elif value == 'minsize':
opt = 's'
debug = True
else:
assert(value == 'custom')
return
self.builtins['optimization'].set_value(opt)
self.builtins['debug'].set_value(debug)
def set_buildtype_from_others(self):
opt = self.builtins['optimization'].value
debug = self.builtins['debug'].value
if opt == '0' and not debug:
mode = 'plain'
elif opt == '0' and debug:
mode = 'debug'
elif opt == '2' and debug:
mode = 'debugoptimized'
elif opt == '3' and not debug:
mode = 'release'
elif opt == 's' and debug:
mode = 'minsize'
else:
mode = 'custom'
self.builtins['buildtype'].set_value(mode)
@classmethod
def get_prefixed_options_per_machine(
cls,
options_per_machine # : PerMachine[T.Dict[str, _V]]]
) -> T.Iterable[T.Tuple[str, _V]]:
return cls._flatten_pair_iterator(
(for_machine.get_prefix(), options_per_machine[for_machine])
for for_machine in iter(MachineChoice)
)
@classmethod
def flatten_lang_iterator(
cls,
outer # : T.Iterable[T.Tuple[str, T.Dict[str, _V]]]
) -> T.Iterable[T.Tuple[str, _V]]:
return cls._flatten_pair_iterator((lang + '_', opts) for lang, opts in outer)
@staticmethod
def _flatten_pair_iterator(
outer # : T.Iterable[T.Tuple[str, T.Dict[str, _V]]]
) -> T.Iterable[T.Tuple[str, _V]]:
for k0, v0 in outer:
for k1, v1 in v0.items():
yield (k0 + k1, v1)
def _get_all_nonbuiltin_options(self) -> T.Iterable[T.Dict[str, UserOption]]:
yield self.backend_options
yield self.user_options
yield dict(self.flatten_lang_iterator(self.get_prefixed_options_per_machine(self.compiler_options)))
yield self.base_options
def _get_all_builtin_options(self) -> T.Iterable[T.Dict[str, UserOption]]:
yield dict(self.get_prefixed_options_per_machine(self.builtins_per_machine))
yield self.builtins
def get_all_options(self) -> T.Iterable[T.Dict[str, UserOption]]:
yield from self._get_all_nonbuiltin_options()
yield from self._get_all_builtin_options()
def validate_option_value(self, option_name, override_value):
for opts in self.get_all_options():
opt = opts.get(option_name)
if opt is not None:
try:
return opt.validate_value(override_value)
except MesonException as e:
raise type(e)(('Validation failed for option %s: ' % option_name) + str(e)) \
.with_traceback(sys.exc_info()[2])
raise MesonException('Tried to validate unknown option %s.' % option_name)
def get_external_args(self, for_machine: MachineChoice, lang):
return self.compiler_options[for_machine][lang]['args'].value
def get_external_link_args(self, for_machine: MachineChoice, lang):
return self.compiler_options[for_machine][lang]['link_args'].value
def merge_user_options(self, options):
for (name, value) in options.items():
if name not in self.user_options:
self.user_options[name] = value
else:
oldval = self.user_options[name]
if type(oldval) != type(value):
self.user_options[name] = value
def is_cross_build(self, when_building_for: MachineChoice = MachineChoice.HOST) -> bool:
if when_building_for == MachineChoice.BUILD:
return False
return len(self.cross_files) > 0
def strip_build_option_names(self, options):
res = OrderedDict()
for k, v in options.items():
if k.startswith('build.'):
k = k.split('.', 1)[1]
res[k] = v
return res
def copy_build_options_from_regular_ones(self):
assert(not self.is_cross_build())
for k, o in self.builtins_per_machine.host.items():
self.builtins_per_machine.build[k].set_value(o.value)
for lang, host_opts in self.compiler_options.host.items():
build_opts = self.compiler_options.build[lang]
for k, o in host_opts.items():
if k in build_opts:
build_opts[k].set_value(o.value)
def set_options(self, options, *, subproject='', warn_unknown=True):
if not self.is_cross_build():
options = self.strip_build_option_names(options)
# Set prefix first because it's needed to sanitize other options
if 'prefix' in options:
prefix = self.sanitize_prefix(options['prefix'])
self.builtins['prefix'].set_value(prefix)
for key in builtin_dir_noprefix_options:
if key not in options:
self.builtins[key].set_value(builtin_options[key].prefixed_default(key, prefix))
unknown_options = []
for k, v in options.items():
if k == 'prefix':
continue
if self._try_set_builtin_option(k, v):
continue
for opts in self._get_all_nonbuiltin_options():
tgt = opts.get(k)
if tgt is None:
continue
tgt.set_value(v)
break
else:
unknown_options.append(k)
if unknown_options and warn_unknown:
unknown_options = ', '.join(sorted(unknown_options))
sub = 'In subproject {}: '.format(subproject) if subproject else ''
mlog.warning('{}Unknown options: "{}"'.format(sub, unknown_options))
mlog.log('The value of new options can be set with:')
mlog.log(mlog.bold('meson setup <builddir> --reconfigure -Dnew_option=new_value ...'))
if not self.is_cross_build():
self.copy_build_options_from_regular_ones()
def set_default_options(self, default_options, subproject, env):
# Warn if the user is using two different ways of setting build-type
# options that override each other
if 'buildtype' in env.cmd_line_options and \
('optimization' in env.cmd_line_options or 'debug' in env.cmd_line_options):
mlog.warning('Recommend using either -Dbuildtype or -Doptimization + -Ddebug. '
'Using both is redundant since they override each other. '
'See: https://mesonbuild.com/Builtin-options.html#build-type-options')
cmd_line_options = OrderedDict()
# Set project default_options as if they were passed to the cmdline.
# Subprojects can only define default for user options and not yielding
# builtin option.
from . import optinterpreter
for k, v in default_options.items():
if subproject:
if (k not in builtin_options or builtin_options[k].yielding) \
and optinterpreter.is_invalid_name(k, log=False):
continue
k = subproject + ':' + k
cmd_line_options[k] = v
# Override project default_options using conf files (cross or native)
for k, v in env.paths.host:
if v is not None:
cmd_line_options[k] = v
# Override all the above defaults using the command-line arguments
# actually passed to us
cmd_line_options.update(env.cmd_line_options)
env.cmd_line_options = cmd_line_options
# Create a subset of cmd_line_options, keeping only options for this
# subproject. Also take builtin options if it's the main project.
# Language and backend specific options will be set later when adding
# languages and setting the backend (builtin options must be set first
# to know which backend we'll use).
options = OrderedDict()
# Some options default to environment variables if they are
# unset, set those now. These will either be overwritten
# below, or they won't. These should only be set on the first run.
for for_machine in MachineChoice:
p_env_pair = get_env_var_pair(for_machine, self.is_cross_build(), 'PKG_CONFIG_PATH')
if p_env_pair is not None:
p_env_var, p_env = p_env_pair
# PKG_CONFIG_PATH may contain duplicates, which must be
# removed, else a duplicates-in-array-option warning arises.
p_list = list(OrderedSet(p_env.split(':')))
key = 'pkg_config_path'
if for_machine == MachineChoice.BUILD:
key = 'build.' + key
if env.first_invocation:
options[key] = p_list
elif options.get(key, []) != p_list:
mlog.warning(
p_env_var +
' environment variable has changed '
'between configurations, meson ignores this. '
'Use -Dpkg_config_path to change pkg-config search '
'path instead.'
)
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix):]
return text
for k, v in env.cmd_line_options.items():
if subproject:
if not k.startswith(subproject + ':'):
continue
elif k not in builtin_options.keys() \
and remove_prefix(k, 'build.') not in builtin_options_per_machine.keys():
if ':' in k:
continue
if optinterpreter.is_invalid_name(k, log=False):
continue
options[k] = v
self.set_options(options, subproject=subproject)
def add_lang_args(self, lang: str, comp: T.Type['Compiler'],
for_machine: MachineChoice, env: 'Environment') -> None:
"""Add global language arguments that are needed before compiler/linker detection."""
from .compilers import compilers
for k, o in compilers.get_global_options(
lang,
comp,
for_machine,
env.is_cross_build(),
env.properties[for_machine]).items():
# prefixed compiler options affect just this machine
opt_prefix = for_machine.get_prefix()
user_k = opt_prefix + lang + '_' + k
if user_k in env.cmd_line_options:
o.set_value(env.cmd_line_options[user_k])
self.compiler_options[for_machine][lang].setdefault(k, o)
def process_new_compiler(self, lang: str, comp: T.Type['Compiler'], env: 'Environment') -> None:
from . import compilers
self.compilers[comp.for_machine][lang] = comp
enabled_opts = []
for k, o in comp.get_options().items():
# prefixed compiler options affect just this machine
opt_prefix = comp.for_machine.get_prefix()
user_k = opt_prefix + lang + '_' + k
if user_k in env.cmd_line_options:
o.set_value(env.cmd_line_options[user_k])
self.compiler_options[comp.for_machine][lang].setdefault(k, o)
enabled_opts = []
for optname in comp.base_options:
if optname in self.base_options:
continue
oobj = compilers.base_options[optname]
if optname in env.cmd_line_options:
oobj.set_value(env.cmd_line_options[optname])
enabled_opts.append(optname)
self.base_options[optname] = oobj
self.emit_base_options_warnings(enabled_opts)
def emit_base_options_warnings(self, enabled_opts: list):
if 'b_bitcode' in enabled_opts:
mlog.warning('Base option \'b_bitcode\' is enabled, which is incompatible with many linker options. Incompatible options such as \'b_asneeded\' have been disabled.', fatal=False)
mlog.warning('Please see https://mesonbuild.com/Builtin-options.html#Notes_about_Apple_Bitcode_support for more details.', fatal=False)
class CmdLineFileParser(configparser.ConfigParser):
def __init__(self):
# We don't want ':' as key delimiter, otherwise it would break when
# storing subproject options like "subproject:option=value"
super().__init__(delimiters=['='], interpolation=None)
class MachineFileParser():
def __init__(self, filenames: T.List[str]):
self.parser = CmdLineFileParser()
self.constants = {'True': True, 'False': False}
self.sections = {}
self.parser.read(filenames)
# Parse [constants] first so they can be used in other sections
if self.parser.has_section('constants'):
self.constants.update(self._parse_section('constants'))
for s in self.parser.sections():
if s == 'constants':
continue
self.sections[s] = self._parse_section(s)
def _parse_section(self, s):
self.scope = self.constants.copy()
section = {}
for entry, value in self.parser.items(s):
if ' ' in entry or '\t' in entry or "'" in entry or '"' in entry:
raise EnvironmentException('Malformed variable name {!r} in machine file.'.format(entry))
# Windows paths...
value = value.replace('\\', '\\\\')
try:
ast = mparser.Parser(value, 'machinefile').parse()
res = self._evaluate_statement(ast.lines[0])
except MesonException:
raise EnvironmentException('Malformed value in machine file variable {!r}.'.format(entry))
except KeyError as e:
raise EnvironmentException('Undefined constant {!r} in machine file variable {!r}.'.format(e.args[0], entry))
section[entry] = res
self.scope[entry] = res
return section
def _evaluate_statement(self, node):
if isinstance(node, (mparser.StringNode)):
return node.value
elif isinstance(node, mparser.BooleanNode):
return node.value
elif isinstance(node, mparser.NumberNode):
return node.value
elif isinstance(node, mparser.ArrayNode):
return [self._evaluate_statement(arg) for arg in node.args.arguments]
elif isinstance(node, mparser.IdNode):
return self.scope[node.value]
elif isinstance(node, mparser.ArithmeticNode):
l = self._evaluate_statement(node.left)
r = self._evaluate_statement(node.right)
if node.operation == 'add':
if (isinstance(l, str) and isinstance(r, str)) or \
(isinstance(l, list) and isinstance(r, list)):
return l + r
elif node.operation == 'div':
if isinstance(l, str) and isinstance(r, str):
return os.path.join(l, r)
raise EnvironmentException('Unsupported node type')
def parse_machine_files(filenames):
parser = MachineFileParser(filenames)
return parser.sections
def get_cmd_line_file(build_dir):
return os.path.join(build_dir, 'meson-private', 'cmd_line.txt')
def read_cmd_line_file(build_dir, options):
filename = get_cmd_line_file(build_dir)
if not os.path.isfile(filename):
return
config = CmdLineFileParser()
config.read(filename)
# Do a copy because config is not really a dict. options.cmd_line_options
# overrides values from the file.
d = dict(config['options'])
d.update(options.cmd_line_options)
options.cmd_line_options = d
properties = config['properties']
if not options.cross_file:
options.cross_file = ast.literal_eval(properties.get('cross_file', '[]'))
if not options.native_file:
# This will be a string in the form: "['first', 'second', ...]", use
# literal_eval to get it into the list of strings.
options.native_file = ast.literal_eval(properties.get('native_file', '[]'))
def cmd_line_options_to_string(options):
return {k: str(v) for k, v in options.cmd_line_options.items()}
def write_cmd_line_file(build_dir, options):
filename = get_cmd_line_file(build_dir)
config = CmdLineFileParser()
properties = OrderedDict()
if options.cross_file:
properties['cross_file'] = options.cross_file
if options.native_file:
properties['native_file'] = options.native_file
config['options'] = cmd_line_options_to_string(options)
config['properties'] = properties
with open(filename, 'w') as f:
config.write(f)
def update_cmd_line_file(build_dir, options):
filename = get_cmd_line_file(build_dir)
config = CmdLineFileParser()
config.read(filename)
config['options'].update(cmd_line_options_to_string(options))
with open(filename, 'w') as f:
config.write(f)
def get_cmd_line_options(build_dir, options):
copy = argparse.Namespace(**vars(options))
read_cmd_line_file(build_dir, copy)
cmdline = ['-D{}={}'.format(k, v) for k, v in copy.cmd_line_options.items()]
if options.cross_file:
cmdline += ['--cross-file {}'.format(f) for f in options.cross_file]
if options.native_file:
cmdline += ['--native-file {}'.format(f) for f in options.native_file]
return ' '.join([shlex.quote(x) for x in cmdline])
def major_versions_differ(v1, v2):
return v1.split('.')[0:2] != v2.split('.')[0:2]
def load(build_dir):
filename = os.path.join(build_dir, 'meson-private', 'coredata.dat')
load_fail_msg = 'Coredata file {!r} is corrupted. Try with a fresh build tree.'.format(filename)
try:
with open(filename, 'rb') as f:
obj = pickle.load(f)
except (pickle.UnpicklingError, EOFError):
raise MesonException(load_fail_msg)
except AttributeError:
raise MesonException(
"Coredata file {!r} references functions or classes that don't "
"exist. This probably means that it was generated with an old "
"version of meson.".format(filename))
if not isinstance(obj, CoreData):
raise MesonException(load_fail_msg)
if major_versions_differ(obj.version, version):
raise MesonException('Build directory has been generated with Meson version %s, '
'which is incompatible with current version %s.\n' %
(obj.version, version))
return obj
def save(obj, build_dir):
filename = os.path.join(build_dir, 'meson-private', 'coredata.dat')
prev_filename = filename + '.prev'
tempfilename = filename + '~'
if major_versions_differ(obj.version, version):
raise MesonException('Fatal version mismatch corruption.')
if os.path.exists(filename):
import shutil
shutil.copyfile(filename, prev_filename)
with open(tempfilename, 'wb') as f:
pickle.dump(obj, f)
f.flush()
os.fsync(f.fileno())
os.replace(tempfilename, filename)
return filename
def register_builtin_arguments(parser):
for n, b in builtin_options.items():
b.add_to_argparse(n, parser, '', '')
for n, b in builtin_options_per_machine.items():
b.add_to_argparse(n, parser, '', ' (just for host machine)')
b.add_to_argparse(n, parser, 'build.', ' (just for build machine)')
parser.add_argument('-D', action='append', dest='projectoptions', default=[], metavar="option",
help='Set the value of an option, can be used several times to set multiple options.')
def create_options_dict(options):
result = OrderedDict()
for o in options:
try:
(key, value) = o.split('=', 1)
except ValueError:
raise MesonException('Option {!r} must have a value separated by equals sign.'.format(o))
result[key] = value
return result
def parse_cmd_line_options(args):
args.cmd_line_options = create_options_dict(args.projectoptions)
# Merge builtin options set with --option into the dict.
for name in chain(
builtin_options.keys(),
('build.' + k for k in builtin_options_per_machine.keys()),
builtin_options_per_machine.keys(),
):
value = getattr(args, name, None)
if value is not None:
if name in args.cmd_line_options:
cmdline_name = BuiltinOption.argparse_name_to_arg(name)
raise MesonException(
'Got argument {0} as both -D{0} and {1}. Pick one.'.format(name, cmdline_name))
args.cmd_line_options[name] = value
delattr(args, name)
_U = T.TypeVar('_U', bound=UserOption[_T])
class BuiltinOption(T.Generic[_T, _U]):
"""Class for a builtin option type.
There are some cases that are not fully supported yet.
"""
def __init__(self, opt_type: T.Type[_U], description: str, default: T.Any, yielding: bool = True, *,
choices: T.Any = None):
self.opt_type = opt_type
self.description = description
self.default = default
self.choices = choices
self.yielding = yielding
def init_option(self, name: str, value: T.Optional[T.Any], prefix: str) -> _U:
"""Create an instance of opt_type and return it."""
if value is None:
value = self.prefixed_default(name, prefix)
keywords = {'yielding': self.yielding, 'value': value}
if self.choices:
keywords['choices'] = self.choices
return self.opt_type(self.description, **keywords)
def _argparse_action(self) -> T.Optional[str]:
# If the type is a boolean, the presence of the argument in --foo form
# is to enable it. Disabling happens by using -Dfoo=false, which is
# parsed under `args.projectoptions` and does not hit this codepath.
if isinstance(self.default, bool):
return 'store_true'
return None
def _argparse_choices(self) -> T.Any:
if self.opt_type is UserBooleanOption:
return [True, False]
elif self.opt_type is UserFeatureOption:
return UserFeatureOption.static_choices
return self.choices
@staticmethod
def argparse_name_to_arg(name: str) -> str:
if name == 'warning_level':
return '--warnlevel'
else:
return '--' + name.replace('_', '-')
def prefixed_default(self, name: str, prefix: str = '') -> T.Any:
if self.opt_type in [UserComboOption, UserIntegerOption]:
return self.default
try:
return builtin_dir_noprefix_options[name][prefix]
except KeyError:
pass
return self.default
def add_to_argparse(self, name: str, parser: argparse.ArgumentParser, prefix: str, help_suffix: str) -> None:
kwargs = OrderedDict()
c = self._argparse_choices()
b = self._argparse_action()
h = self.description
if not b:
h = '{} (default: {}).'.format(h.rstrip('.'), self.prefixed_default(name))
else:
kwargs['action'] = b
if c and not b:
kwargs['choices'] = c
kwargs['default'] = argparse.SUPPRESS
kwargs['dest'] = prefix + name
cmdline_name = self.argparse_name_to_arg(prefix + name)
parser.add_argument(cmdline_name, help=h + help_suffix, **kwargs)
# Update `docs/markdown/Builtin-options.md` after changing the options below
builtin_options = OrderedDict([
# Directories
('prefix', BuiltinOption(UserStringOption, 'Installation prefix', default_prefix())),
('bindir', BuiltinOption(UserStringOption, 'Executable directory', 'bin')),
('datadir', BuiltinOption(UserStringOption, 'Data file directory', 'share')),
('includedir', BuiltinOption(UserStringOption, 'Header file directory', 'include')),
('infodir', BuiltinOption(UserStringOption, 'Info page directory', 'share/info')),
('libdir', BuiltinOption(UserStringOption, 'Library directory', default_libdir())),
('libexecdir', BuiltinOption(UserStringOption, 'Library executable directory', default_libexecdir())),
('localedir', BuiltinOption(UserStringOption, 'Locale data directory', 'share/locale')),
('localstatedir', BuiltinOption(UserStringOption, 'Localstate data directory', 'var')),
('mandir', BuiltinOption(UserStringOption, 'Manual page directory', 'share/man')),
('sbindir', BuiltinOption(UserStringOption, 'System executable directory', 'sbin')),
('sharedstatedir', BuiltinOption(UserStringOption, 'Architecture-independent data directory', 'com')),
('sysconfdir', BuiltinOption(UserStringOption, 'Sysconf data directory', 'etc')),
# Core options
('auto_features', BuiltinOption(UserFeatureOption, "Override value of all 'auto' features", 'auto')),
('backend', BuiltinOption(UserComboOption, 'Backend to use', 'ninja', choices=backendlist)),
('buildtype', BuiltinOption(UserComboOption, 'Build type to use', 'debug',
choices=['plain', 'debug', 'debugoptimized', 'release', 'minsize', 'custom'])),
('debug', BuiltinOption(UserBooleanOption, 'Debug', True)),
('default_library', BuiltinOption(UserComboOption, 'Default library type', 'shared', choices=['shared', 'static', 'both'],
yielding=False)),
('errorlogs', BuiltinOption(UserBooleanOption, "Whether to print the logs from failing tests", True)),
('install_umask', BuiltinOption(UserUmaskOption, 'Default umask to apply on permissions of installed files', '022')),
('layout', BuiltinOption(UserComboOption, 'Build directory layout', 'mirror', choices=['mirror', 'flat'])),
('optimization', BuiltinOption(UserComboOption, 'Optimization level', '0', choices=['0', 'g', '1', '2', '3', 's'])),
('stdsplit', BuiltinOption(UserBooleanOption, 'Split stdout and stderr in test logs', True)),
('strip', BuiltinOption(UserBooleanOption, 'Strip targets on install', False)),
('unity', BuiltinOption(UserComboOption, 'Unity build', 'off', choices=['on', 'off', 'subprojects'])),
('unity_size', BuiltinOption(UserIntegerOption, 'Unity block size', (2, None, 4))),
('warning_level', BuiltinOption(UserComboOption, 'Compiler warning level to use', '1', choices=['0', '1', '2', '3'])),
('werror', BuiltinOption(UserBooleanOption, 'Treat warnings as errors', False, yielding=False)),
('wrap_mode', BuiltinOption(UserComboOption, 'Wrap mode', 'default', choices=['default', 'nofallback', 'nodownload', 'forcefallback'])),
('force_fallback_for', BuiltinOption(UserArrayOption, 'Force fallback for those subprojects', [])),
])
builtin_options_per_machine = OrderedDict([
('pkg_config_path', BuiltinOption(UserArrayOption, 'List of additional paths for pkg-config to search', [])),
('cmake_prefix_path', BuiltinOption(UserArrayOption, 'List of additional prefixes for cmake to search', [])),
])
# Special prefix-dependent defaults for installation directories that reside in
# a path outside of the prefix in FHS and common usage.
builtin_dir_noprefix_options = {
'sysconfdir': {'/usr': '/etc'},
'localstatedir': {'/usr': '/var', '/usr/local': '/var/local'},
'sharedstatedir': {'/usr': '/var/lib', '/usr/local': '/var/local/lib'},
}
forbidden_target_names = {'clean': None,
'clean-ctlist': None,
'clean-gcno': None,
'clean-gcda': None,
'coverage': None,
'coverage-text': None,
'coverage-xml': None,
'coverage-html': None,
'phony': None,
'PHONY': None,
'all': None,
'test': None,
'benchmark': None,
'install': None,
'uninstall': None,
'build.ninja': None,
'scan-build': None,
'reconfigure': None,
'dist': None,
'distcheck': None,
}
|
|
# encoding: utf-8
from __future__ import unicode_literals
from collections import Mapping
from functools import reduce
from operator import and_
from pymongo.cursor import CursorType
from ... import F, Filter, P, S
from ...trait import Collection
from ....schema.compat import odict
from ....package.loader import traverse
class Queryable(Collection):
"""EXPERIMENTAL: Extend active collection behaviours to include querying."""
UNIVERSAL_OPTIONS = {
'collation',
'limit',
'projection',
'skip',
'sort',
}
FIND_OPTIONS = UNIVERSAL_OPTIONS | {
'allow_partial_results',
'await',
'batch_size',
'cursor_type',
'max_time_ms', # translated -> modifiers['$maxTimeMS']
'modifiers',
'no_cursor_timeout',
'oplog_replay',
'tail',
'wait',
'await', # Reserved in Python 3.7+
}
FIND_MAPPING = {
'allowPartialResults': 'allow_partial_results',
'batchSize': 'batch_size',
'cursorType': 'cursor_type',
'maxTimeMS': 'max_time_ms', # See above.
'maxTimeMs': 'max_time_ms', # Common typo.
'noCursorTimeout': 'no_cursor_timeout',
'oplogReplay': 'oplog_replay',
}
AGGREGATE_OPTIONS = UNIVERSAL_OPTIONS | {
'allowDiskUse',
'batchSize',
'maxTimeMS',
'useCursor',
}
AGGREGATE_MAPPING = {
'allow_disk_use': 'allowDiskUse',
'batch_size': 'batchSize',
'maxTimeMs': 'maxTimeMS', # Common typo.
'max_time_ms': 'maxTimeMS',
'use_cursor': 'useCursor',
}
@classmethod
def _prepare_query(cls, mapping, valid, *args, **kw):
"""Process arguments to query methods. For internal use only.
Positional arguments are treated as query components, combined using boolean AND reduction.
Keyword arguments are processed depending on the passed in mapping and set of valid options, with non-
option arguments treated as parametric query components, also ANDed with any positionally passed query
components.
Parametric querying with explicit `__eq` against these "reserved words" is possible to work around their
reserved-ness.
Querying options for find and aggregate may differ in use of under_score or camelCase formatting; this
helper removes the distinction and allows either.
"""
collection = cls.get_collection(kw.pop('source', None))
query = Filter(document=cls, collection=collection)
options = {}
if args:
query &= reduce(and_, args)
# Gather any valid options.
for key in tuple(kw):
name = mapping.get(key, key)
if name in valid:
options[name] = kw.pop(key)
# Support parametric projection via the use of iterables of strings in the form 'field' or '-field',
# with name resolution. See the documentation for P for details.
if 'projection' in options and not isinstance(options['projection'], Mapping):
options['projection'] = P(cls, *options['projection'])
# Support parametric sorting via the use of iterables of strings. See the documentation for S for details.
if 'sort' in options:
options['sort'] = S(cls, *options['sort'])
if kw: # Remainder are parametric query fragments.
query &= F(cls, **kw)
return cls, collection, query, options
@classmethod
def _prepare_find(cls, *args, **kw):
"""Execute a find and return the resulting queryset using combined plain and parametric query generation.
Additionally, performs argument case normalization, refer to the `_prepare_query` method's docstring.
"""
cls, collection, query, options = cls._prepare_query(
cls.FIND_MAPPING,
cls.FIND_OPTIONS,
*args,
**kw
)
if 'await' in options:
raise TypeError("Await is hard-deprecated as reserved keyword in Python 3.7, use wait instead.")
if 'cursor_type' in options and {'tail', 'wait'} & set(options):
raise TypeError("Can not combine cursor_type and tail/wait arguments.")
elif options.pop('tail', False):
options['cursor_type'] = CursorType.TAILABLE_AWAIT if options.pop('wait', True) else CursorType.TAILABLE
elif 'wait' in options:
raise TypeError("Wait option only applies to tailing cursors.")
modifiers = options.get('modifiers', dict())
if 'max_time_ms' in options:
modifiers['$maxTimeMS'] = options.pop('max_time_ms')
if modifiers:
options['modifiers'] = modifiers
return cls, collection, query, options
@classmethod
def _prepare_aggregate(cls, *args, **kw):
"""Generate and execute an aggregate query pipline using combined plain and parametric query generation.
Additionally, performs argument case normalization, refer to the `_prepare_query` method's docstring.
This provides a find-like interface for generating aggregate pipelines with a few shortcuts that make
aggregates behave more like "find, optionally with more steps". Positional arguments that are not Filter
instances are assumed to be aggregate pipeline stages.
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.aggregate
"""
stages = []
stage_args = []
fragments = []
for arg in args: # Split the positional arguments into filter fragments and projection stages.
(fragments if isinstance(arg, Filter) else stage_args).append(arg)
cls, collection, query, options = cls._prepare_query(
cls.AGGREGATE_MAPPING,
cls.AGGREGATE_OPTIONS,
*fragments,
**kw
)
if query:
stages.append({'$match': query})
stages.extend(stage_args)
if 'sort' in options: # Convert the find-like option to a stage with the correct semantics.
stages.append({'$sort': odict(options.pop('sort'))})
if 'skip' in options: # Note: Sort + limit memory optimization invalidated when skipping.
stages.append({'$skip': options.pop('skip')})
if 'limit' in options:
stages.append({'$limit': options.pop('limit')})
if 'projection' in options:
stages.append({'$project': options.pop('projection')})
return cls, collection, stages, options
@classmethod
def find(cls, *args, **kw):
"""Query the collection this class is bound to.
Additional arguments are processed according to `_prepare_find` prior to passing to PyMongo, where positional
parameters are interpreted as query fragments, parametric keyword arguments combined, and other keyword
arguments passed along with minor transformation.
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.find
"""
Doc, collection, query, options = cls._prepare_find(*args, **kw)
return collection.find(query, **options)
@classmethod
def find_one(cls, *args, **kw):
"""Get a single document from the collection this class is bound to.
Additional arguments are processed according to `_prepare_find` prior to passing to PyMongo, where positional
parameters are interpreted as query fragments, parametric keyword arguments combined, and other keyword
arguments passed along with minor transformation.
Automatically calls `from_mongo` over the retrieved data to return an instance of the model.
For simple "by ID" lookups, instead of calling `Model.find_one(identifier)` use the short-hand notation that
treats your model as a Python-native collection as of Python 3.7: (most familiar as used in type annotation)
Model[identifier]
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.find_one
https://www.python.org/dev/peps/pep-0560/#class-getitem
"""
if len(args) == 1 and not isinstance(args[0], Filter):
args = (getattr(cls, cls.__pk__) == args[0], )
Doc, collection, query, options = cls._prepare_find(*args, **kw)
result = Doc.from_mongo(collection.find_one(query, **options))
return result
# Alias this to conform to Python-native "Collection" API: https://www.python.org/dev/peps/pep-0560/#class-getitem
__class_getitem__ = find_one # Useful on Python 3.7 or above.
# https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.find_one_and_delete
# https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.find_one_and_replace
# https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.find_one_and_update
@classmethod
def find_in_sequence(cls, field, order, *args, **kw):
"""Return a QuerySet iterating the results of a query in a defined order. Technically an aggregate.
To be successful one must be running MongoDB 3.4 or later. Document order will not be represented otherwise.
Based on the technique described here: http://s.webcore.io/2O3i0N2E3h0r
See also: https://jira.mongodb.org/browse/SERVER-7528
"""
field = traverse(cls, field)
order = list(order) # We need to coalesce the value to prepare for multiple uses.
kw['sort'] = {'__order': 1}
kw.setdefault('projection', {'__order': 0})
cls, collection, stages, options = cls._prepare_aggregate(
field.any(order),
{'$addFields': {'__order': {'$indexOfArray': [order, '$' + ~field]}}},
*args,
**kw
)
if __debug__: # noqa
# This "foot shot avoidance" check requires a server round-trip, potentially, so we only do this in dev.
if tuple(collection.database.client.server_info()['versionArray'][:2]) < (3, 4): # pragma: no cover
raise RuntimeError("Queryable.find_in_sequence only works against MongoDB server versions 3.4 or newer.")
return collection.aggregate(stages, **options)
# https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.count
# https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.distinct
# https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.group
# https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.map_reduce
# https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.inline_map_reduce
# https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.parallel_scan
# https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.initialize_unordered_bulk_op
# https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.initialize_ordered_bulk_op
def reload(self, *fields, **kw):
"""Reload the entire document from the database, or refresh specific named top-level fields."""
Doc, collection, query, options = self._prepare_find(id=self.id, projection=fields, **kw)
result = collection.find_one(query, **options)
if fields: # Refresh only the requested data.
for k in result: # TODO: Better merge algorithm.
if k == ~Doc.id: continue
self.__data__[k] = result[k]
else:
self.__data__ = result
return self
# https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.insert_many
#def replace(self, *args, **kw):
# """Replace a single document matching the filter with this document, passing additional arguments to PyMongo.
#
# **Warning:** Be careful if the current document has only been partially projected, as the omitted fields will
# either be dropped or have their default values saved where `assign` is `True`.
#
# https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.replace_one
# """
# pass
# https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.replace_one
# update
# https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.update_one
# https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.update_many
# delete
# https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.delete_one
# https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.delete_many
|
|
import subprocess
import sys
import os
try:
import StringIO.StringIO as StringIO
except:
from io import StringIO
root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
sys.path.insert(0, root_dir)
from fortls.jsonrpc import write_rpc_request, write_rpc_notification, \
read_rpc_messages, path_to_uri
run_command = os.path.join(root_dir, "fortls.py --incrmental_sync --use_signature_help")
test_dir = os.path.join(root_dir, "test", "test_source")
def run_request(request):
pid = subprocess.Popen(run_command, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
results = pid.communicate(input=request.encode())
tmp_file = StringIO(results[0].decode())
results = read_rpc_messages(tmp_file)
parsed_results = []
for result in results:
if "method" in result:
continue
parsed_results.append(result['result'])
errcode = pid.poll()
return errcode, parsed_results
def test_init():
def check_return(result_dict):
# Expected capabilities
# {
# "completionProvider": {
# "resolveProvider": false,
# "triggerCharacters": ["%"]
# },
# "definitionProvider": true,
# "documentSymbolProvider": true,
# "referencesProvider": True,
# "hoverProvider": true,
# "textDocumentSync": 2
# }
#
assert "capabilities" in result_dict
assert result_dict["capabilities"]["textDocumentSync"] == 2
assert result_dict["capabilities"]["definitionProvider"] is True
assert result_dict["capabilities"]["documentSymbolProvider"] is True
assert result_dict["capabilities"]["hoverProvider"] is True
assert result_dict["capabilities"]["referencesProvider"] is True
assert result_dict["capabilities"]["completionProvider"]["resolveProvider"] is False
assert result_dict["capabilities"]["completionProvider"]["triggerCharacters"][0] == "%"
#
string = write_rpc_request(1, "initialize", {"rootPath": test_dir})
errcode, results = run_request(string)
#
assert errcode == 0
check_return(results[0])
def test_open():
string = write_rpc_request(1, "initialize", {"rootPath": test_dir})
file_path = os.path.join(test_dir, "subdir", "test_free.f90")
string += write_rpc_notification("textDocument/didOpen", {
"textDocument": {"uri": file_path}
})
errcode, results = run_request(string)
#
assert errcode == 0
assert len(results) == 1
def test_change():
string = write_rpc_request(1, "initialize", {"rootPath": test_dir})
file_path = os.path.join(test_dir, "subdir", "test_unknown.f90")
string += write_rpc_notification("textDocument/didOpen", {
"textDocument": {"uri": file_path}
})
string += write_rpc_notification("textDocument/didChange", {
"textDocument": {"uri": file_path},
"contentChanges": [{
"text": "module test_unkown\nend module test_unknown\n",
"range": {
"start": {"line": 0, "character": 0},
"end": {"line": 0, "character": 0}
}
}]
})
string += write_rpc_request(2, "textDocument/documentSymbol", {
"textDocument": {"uri": file_path}
})
file_path = os.path.join(test_dir, "subdir", "test_free.f90")
string += write_rpc_notification("textDocument/didChange", {
"textDocument": {"uri": file_path},
"contentChanges": [{
"text": " unicode test",
"range": {
"start": {"line": 3, "character": 3},
"end": {"line": 3, "character": 3}
}
},
{
"text": "",
"range": {
"start": {"line": 6, "character": 0},
"end": {"line": 31, "character": 0}
}
},
{
"text": "",
"range": {
"start": {"line": 7, "character": 0},
"end": {"line": 39, "character": 0}
}
}]
})
string += write_rpc_request(3, "textDocument/documentSymbol", {
"textDocument": {"uri": file_path}
})
errcode, results = run_request(string)
#
assert errcode == 0
assert len(results) == 3
assert len(results[1]) == 1
assert len(results[2]) == 5
def test_symbols():
def check_return(result_array):
# Expected objects
objs = (
["test_free", 2, 0, 79],
["scale_type", 5, 4, 6],
["val", 13, 5, 5],
["vector", 5, 8, 16],
["n", 13, 9, 9],
["v", 13, 10, 10],
["bound_nopass", 6, 11, 11],
["create", 6, 13, 13],
["norm", 6, 14, 14],
["bound_pass", 6, 15, 15],
["scaled_vector", 5, 18, 23],
["scale", 13, 19, 19],
["set_scale", 6, 21, 21],
["norm", 6, 22, 22],
["fort_wrap", 11, 26, 29],
["vector_create", 12, 35, 41],
["vector_norm", 12, 43, 47],
["scaled_vector_set", 12, 49, 53],
["scaled_vector_norm", 12, 55, 59],
["unscaled_norm", 12, 61, 65],
["test_sig_Sub", 12, 67, 70],
["bound_pass", 12, 72, 78]
)
assert len(result_array) == len(objs)
for i, obj in enumerate(objs):
assert result_array[i]["name"] == obj[0]
assert result_array[i]["kind"] == obj[1]
assert result_array[i]["location"]["range"]["start"]["line"] == obj[2]
assert result_array[i]["location"]["range"]["end"]["line"] == obj[3]
#
string = write_rpc_request(1, "initialize", {"rootPath": test_dir})
file_path = os.path.join(test_dir, "subdir", "test_free.f90")
string += write_rpc_request(2, "textDocument/documentSymbol", {
"textDocument": {"uri": file_path}
})
errcode, results = run_request(string)
#
assert errcode == 0
check_return(results[1])
def test_workspace_symbols():
def check_return(result_array):
# Expected objects
objs = (
["test", 6, 7],
["test_abstract", 2, 0],
["test_free", 2, 0],
["test_gen_type", 5, 1],
["test_generic", 2, 0],
["test_inherit", 2, 0],
["test_mod", 2, 0],
["test_program", 2, 0],
["test_rename_sub", 6, 9],
["test_select", 2, 0],
["test_select_sub", 6, 16],
["test_sig_Sub", 6, 67],
["test_str1", 13, 5],
["test_str2", 13, 5],
["test_sub", 6, 8],
["test_vis_mod", 2, 0]
)
assert len(result_array) == len(objs)
for i, obj in enumerate(objs):
assert result_array[i]["name"] == obj[0]
assert result_array[i]["kind"] == obj[1]
assert result_array[i]["location"]["range"]["start"]["line"] == obj[2]
#
string = write_rpc_request(1, "initialize", {"rootPath": test_dir})
string += write_rpc_request(2, "workspace/symbol", {
"query": "test"
})
errcode, results = run_request(string)
#
assert errcode == 0
check_return(results[1])
def test_comp():
def check_return(result_array, checks):
assert len(result_array) == checks[0]
if checks[0] > 0:
assert result_array[0]["label"] == checks[1]
assert result_array[0]["detail"] == checks[2]
def comp_request(file_path, line, char):
return write_rpc_request(1, "textDocument/completion", {
"textDocument": {"uri": file_path},
"position": {"line": line, "character": char}
})
#
string = write_rpc_request(1, "initialize", {"rootPath": test_dir})
file_path = os.path.join(test_dir, "test_prog.f08")
string += comp_request(file_path, 12, 6)
string += comp_request(file_path, 13, 6)
string += comp_request(file_path, 17, 24)
string += comp_request(file_path, 18, 23)
string += comp_request(file_path, 20, 7)
string += comp_request(file_path, 21, 20)
string += comp_request(file_path, 21, 42)
string += comp_request(file_path, 23, 26)
file_path = os.path.join(test_dir, "subdir", "test_submod.F90")
string += comp_request(file_path, 30, 12)
string += comp_request(file_path, 31, 8)
string += comp_request(file_path, 31, 23)
string += comp_request(file_path, 35, 12)
string += comp_request(file_path, 36, 48)
file_path = os.path.join(test_dir, "test_inc.f90")
string += comp_request(file_path, 10, 2)
file_path = os.path.join(test_dir, "subdir", "test_inc2.f90")
string += comp_request(file_path, 3, 2)
file_path = os.path.join(test_dir, "subdir", "test_abstract.f90")
string += comp_request(file_path, 7, 12)
file_path = os.path.join(test_dir, "subdir", "test_free.f90")
string += comp_request(file_path, 10, 22)
string += comp_request(file_path, 28, 14)
file_path = os.path.join(test_dir, "subdir", "test_fixed.f")
string += comp_request(file_path, 15, 8)
string += comp_request(file_path, 15, 21)
file_path = os.path.join(test_dir, "subdir", "test_select.f90")
string += comp_request(file_path, 21, 7)
string += comp_request(file_path, 23, 7)
string += comp_request(file_path, 25, 7)
string += comp_request(file_path, 30, 7)
file_path = os.path.join(test_dir, "test_block.f08")
string += comp_request(file_path, 2, 2)
string += comp_request(file_path, 5, 4)
string += comp_request(file_path, 8, 6)
file_path = os.path.join(test_dir, "subdir", "test_generic.f90")
string += comp_request(file_path, 14, 10)
file_path = os.path.join(test_dir, "subdir", "test_inherit.f90")
string += comp_request(file_path, 10, 11)
file_path = os.path.join(test_dir, "subdir", "test_rename.F90")
string += comp_request(file_path, 13, 5)
string += comp_request(file_path, 14, 5)
file_path = os.path.join(test_dir, "subdir", "test_vis.f90")
string += comp_request(file_path, 8, 10)
errcode, results = run_request(string)
assert errcode == 0
#
exp_results = (
# test_prog.f08
[1, "myfun", "DOUBLE PRECISION FUNCTION myfun(n, xval)"],
[4, "glob_sub", "SUBROUTINE glob_sub(n, xval, yval)"],
[1, "bound_nopass", "SUBROUTINE bound_nopass(a, b)"],
[1, "bound_pass", "SUBROUTINE bound_pass(arg1)"],
[1, "stretch_vector", "TYPE(scaled_vector)"],
[6, "scale", "TYPE(scale_type)"],
[2, "n", "INTEGER(4)"],
[1, "val", "REAL(8)"],
# subdir/test_submod.F90
[1, "point", "TYPE"],
[1, "distance", "REAL"],
[2, "x", "REAL"],
[1, "point", "TYPE"],
[2, "x", "REAL"],
# test_inc.f90
[2, "val1", "REAL(8)"],
# subdir/test_inc2.f90
[2, "val1", "REAL(8)"],
# subdir/test_abstract.f90
[1, "abs_interface", "SUBROUTINE"],
# subdir/test_free.f90
[1, "DIMENSION(:)", "KEYWORD"],
[3, "INTENT(IN)", "KEYWORD"],
# subdir/test_fixed.f90
[1, "bob", "CHARACTER*(LEN=200)"],
[1, "dave", "CHARACTER*(20)"],
# subdir/test_select.f90
[2, "a", "REAL(8)"],
[2, "a", "COMPLEX(8)"],
[1, "n", "INTEGER(4)"],
[2, "a", "REAL(8)"],
# test_block.f08
[7, "READ", "STATEMENT"],
[8, "READ", "STATEMENT"],
[9, "READ", "STATEMENT"],
# subdir/test_generic.f90
[4, "my_gen", "SUBROUTINE my_gen(self, a, b)"],
# subdir/test_inherit.f90
[1, "val", "REAL(8)"],
# subdir/test_rename.F90
[1, "localname", "INTEGER"],
[1, "renamed_var2", "REAL(8)"],
# subdir/test_vis.f90
[3, "some_type", "TYPE"]
)
assert len(exp_results)+1 == len(results)
for i in range(len(exp_results)):
check_return(results[i+1], exp_results[i])
def test_sig():
def check_return(results, checks):
assert results.get('activeParameter', -1) == checks[0]
signatures = results.get('signatures')
assert signatures[0].get('label') == checks[2]
assert len(signatures[0].get('parameters')) == checks[1]
def sig_request(file_path, line, char):
return write_rpc_request(1, "textDocument/signatureHelp", {
"textDocument": {"uri": file_path},
"position": {"line": line, "character": char}
})
#
string = write_rpc_request(1, "initialize", {"rootPath": test_dir})
file_path = os.path.join(test_dir, "test_prog.f08")
string += sig_request(file_path, 25, 18)
string += sig_request(file_path, 25, 20)
string += sig_request(file_path, 25, 22)
string += sig_request(file_path, 25, 27)
string += sig_request(file_path, 25, 29)
errcode, results = run_request(string)
assert errcode == 0
#
sub_sig = "test_sig_Sub(arg1, arg2, opt1=opt1, opt2=opt2, opt3=opt3)"
exp_results = (
[0, 5, sub_sig],
[1, 5, sub_sig],
[2, 5, sub_sig],
[3, 5, sub_sig],
[4, 5, sub_sig]
)
assert len(exp_results)+1 == len(results)
for i in range(len(exp_results)):
check_return(results[i+1], exp_results[i])
def test_def():
def check_return(result_array, checks):
assert result_array["uri"] == path_to_uri(checks[2])
assert result_array["range"]["start"]["line"] == checks[0]
assert result_array["range"]["start"]["line"] == checks[1]
def def_request(file_path, line, char):
return write_rpc_request(1, "textDocument/definition", {
"textDocument": {"uri": file_path},
"position": {"line": line, "character": char}
})
#
string = write_rpc_request(1, "initialize", {"rootPath": test_dir})
file_path = os.path.join(test_dir, "test_prog.f08")
string += def_request(file_path, 12, 6)
string += def_request(file_path, 13, 6)
string += def_request(file_path, 20, 7)
string += def_request(file_path, 21, 20)
string += def_request(file_path, 21, 42)
string += def_request(file_path, 23, 26)
file_path = os.path.join(test_dir, "subdir", "test_submod.F90")
string += def_request(file_path, 30, 12)
string += def_request(file_path, 35, 12)
file_path = os.path.join(test_dir, "test_inc.f90")
string += def_request(file_path, 10, 2)
file_path = os.path.join(test_dir, "subdir", "test_inc2.f90")
string += def_request(file_path, 3, 2)
file_path = os.path.join(test_dir, "subdir", "test_rename.F90")
string += def_request(file_path, 13, 5)
string += def_request(file_path, 14, 5)
errcode, results = run_request(string)
assert errcode == 0
#
fixed_path = os.path.join(test_dir, "subdir", "test_fixed.f")
free_path = os.path.join(test_dir, "subdir", "test_free.f90")
exp_results = (
# test_prog.f08
[0, 0, fixed_path],
[22, 22, fixed_path],
[10, 10, os.path.join(test_dir, "test_prog.f08")],
[21, 21, free_path],
[14, 14, free_path],
[5, 5, free_path],
# subdir/test_submod.F90
[1, 1, os.path.join(test_dir, "subdir", "test_submod.F90")],
[1, 1, os.path.join(test_dir, "subdir", "test_submod.F90")],
# test_inc.f90
[0, 0, os.path.join(test_dir, "subdir", "test_inc2.f90")],
# subdir/test_inc2.f90
[4, 4, os.path.join(test_dir, "test_inc.f90")],
# subdir/test_rename.F90
[6, 6, os.path.join(test_dir, "subdir", "test_rename.F90")],
[1, 1, os.path.join(test_dir, "subdir", "test_rename.F90")]
)
assert len(exp_results)+1 == len(results)
for i in range(len(exp_results)):
check_return(results[i+1], exp_results[i])
def test_refs():
def check_return(result_array, checks):
def find_in_results(uri, sline):
for (i, result) in enumerate(result_array):
if (result["uri"] == uri) and (result["range"]["start"]["line"] == sline):
del result_array[i]
return result
return None
assert len(result_array) == len(checks)
for check in checks:
result = find_in_results(path_to_uri(check[0]), check[1])
assert (result is not None)
assert result["range"]["start"]["character"] == check[2]
assert result["range"]["end"]["character"] == check[3]
#
string = write_rpc_request(1, "initialize", {"rootPath": test_dir})
file_path = os.path.join(test_dir, "test_prog.f08")
string += write_rpc_request(2, "textDocument/references", {
"textDocument": {"uri": file_path},
"position": {"line": 9, "character": 8}
})
errcode, results = run_request(string)
assert errcode == 0
#
free_path = os.path.join(test_dir, "subdir", "test_free.f90")
check_return(results[1], (
[os.path.join(test_dir, "test_prog.f08"), 2, 21, 27],
[os.path.join(test_dir, "test_prog.f08"), 9, 5, 11],
[free_path, 8, 8, 14],
[free_path, 16, 9, 15],
[free_path, 18, 14, 20],
[free_path, 36, 6, 12],
[free_path, 44, 6, 12],
[free_path, 50, 6, 12],
[free_path, 76, 6, 12]
))
def test_hover():
def check_return(result_array, checks):
assert len(result_array) == len(checks)
for (i, check) in enumerate(checks):
assert result_array[i]['contents'][0]['value'] == check
#
string = write_rpc_request(1, "initialize", {"rootPath": test_dir})
file_path = os.path.join(test_dir, "subdir", "test_abstract.f90")
string += write_rpc_request(2, "textDocument/hover", {
"textDocument": {"uri": file_path},
"position": {"line": 7, "character": 30}
})
errcode, results = run_request(string)
assert errcode == 0
#
check_return(results[1:], ("""SUBROUTINE test(a, b)
INTEGER(4), DIMENSION(3,6), INTENT(IN) :: a
REAL(8), DIMENSION(4), INTENT(OUT) :: b""",))
def test_docs():
def check_return(result_array, checks):
comm_lines = []
for (i, hover_line) in enumerate(result_array['contents'][0]['value'].splitlines()):
if hover_line.count('!!') > 0:
comm_lines.append((i, hover_line))
assert len(comm_lines) == len(checks)
for i in range(len(checks)):
assert comm_lines[i][0] == checks[i][0]
assert comm_lines[i][1] == checks[i][1]
def hover_request(file_path, line, char):
return write_rpc_request(1, "textDocument/hover", {
"textDocument": {"uri": file_path},
"position": {"line": line, "character": char}
})
#
string = write_rpc_request(1, "initialize", {"rootPath": test_dir})
file_path = os.path.join(test_dir, "subdir", "test_free.f90")
string += hover_request(file_path, 13, 19)
string += hover_request(file_path, 13, 31)
string += hover_request(file_path, 14, 17)
string += hover_request(file_path, 14, 28)
string += hover_request(file_path, 21, 18)
string += hover_request(file_path, 21, 37)
string += hover_request(file_path, 22, 17)
string += hover_request(file_path, 22, 32)
string += hover_request(file_path, 15, 32)
string += hover_request(file_path, 15, 47)
errcode, results = run_request(string)
assert errcode == 0
#
check_return(results[1], ((1, '!! Doc 1'), (3, ' !! Doc 5')))
check_return(results[2], ((1, '!! Doc 4'), (4, ' !! Doc 5')))
check_return(results[3], ((1, '!! Doc 2'), ))
check_return(results[4], ((1, '!! Doc 6'), ))
check_return(results[5], ((1, '!! Doc 7'), (3, ' !! Doc 8')))
check_return(results[6], ((1, '!! Doc 7'), (4, ' !! Doc 8')))
check_return(results[7], ((1, '!! Doc 3'), ))
check_return(results[8], ())
check_return(results[9], ())
check_return(results[10], ((3, ' !! Doc 9'), (4, ' !! Doc 10')))
if __name__ == "__main__":
test_init()
test_open()
test_change()
test_symbols()
test_workspace_symbols()
test_comp()
test_sig()
test_def()
test_refs()
test_hover()
test_docs()
|
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
from twilio.rest.sync.v1.service.sync_map.sync_map_item import SyncMapItemList
from twilio.rest.sync.v1.service.sync_map.sync_map_permission import SyncMapPermissionList
class SyncMapList(ListResource):
def __init__(self, version, service_sid):
"""
Initialize the SyncMapList
:param Version version: Version that contains the resource
:param service_sid: The SID of the Sync Service that the resource is associated with
:returns: twilio.rest.sync.v1.service.sync_map.SyncMapList
:rtype: twilio.rest.sync.v1.service.sync_map.SyncMapList
"""
super(SyncMapList, self).__init__(version)
# Path Solution
self._solution = {'service_sid': service_sid, }
self._uri = '/Services/{service_sid}/Maps'.format(**self._solution)
def create(self, unique_name=values.unset, ttl=values.unset,
collection_ttl=values.unset):
"""
Create the SyncMapInstance
:param unicode unique_name: An application-defined string that uniquely identifies the resource
:param unicode ttl: An alias for collection_ttl
:param unicode collection_ttl: How long, in seconds, before the Sync Map expires and is deleted
:returns: The created SyncMapInstance
:rtype: twilio.rest.sync.v1.service.sync_map.SyncMapInstance
"""
data = values.of({'UniqueName': unique_name, 'Ttl': ttl, 'CollectionTtl': collection_ttl, })
payload = self._version.create(method='POST', uri=self._uri, data=data, )
return SyncMapInstance(self._version, payload, service_sid=self._solution['service_sid'], )
def stream(self, limit=None, page_size=None):
"""
Streams SyncMapInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.sync.v1.service.sync_map.SyncMapInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'])
def list(self, limit=None, page_size=None):
"""
Lists SyncMapInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.sync.v1.service.sync_map.SyncMapInstance]
"""
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of SyncMapInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of SyncMapInstance
:rtype: twilio.rest.sync.v1.service.sync_map.SyncMapPage
"""
data = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(method='GET', uri=self._uri, params=data, )
return SyncMapPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of SyncMapInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of SyncMapInstance
:rtype: twilio.rest.sync.v1.service.sync_map.SyncMapPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return SyncMapPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a SyncMapContext
:param sid: The SID of the Sync Map resource to fetch
:returns: twilio.rest.sync.v1.service.sync_map.SyncMapContext
:rtype: twilio.rest.sync.v1.service.sync_map.SyncMapContext
"""
return SyncMapContext(self._version, service_sid=self._solution['service_sid'], sid=sid, )
def __call__(self, sid):
"""
Constructs a SyncMapContext
:param sid: The SID of the Sync Map resource to fetch
:returns: twilio.rest.sync.v1.service.sync_map.SyncMapContext
:rtype: twilio.rest.sync.v1.service.sync_map.SyncMapContext
"""
return SyncMapContext(self._version, service_sid=self._solution['service_sid'], sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Sync.V1.SyncMapList>'
class SyncMapPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the SyncMapPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param service_sid: The SID of the Sync Service that the resource is associated with
:returns: twilio.rest.sync.v1.service.sync_map.SyncMapPage
:rtype: twilio.rest.sync.v1.service.sync_map.SyncMapPage
"""
super(SyncMapPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of SyncMapInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.sync.v1.service.sync_map.SyncMapInstance
:rtype: twilio.rest.sync.v1.service.sync_map.SyncMapInstance
"""
return SyncMapInstance(self._version, payload, service_sid=self._solution['service_sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Sync.V1.SyncMapPage>'
class SyncMapContext(InstanceContext):
def __init__(self, version, service_sid, sid):
"""
Initialize the SyncMapContext
:param Version version: Version that contains the resource
:param service_sid: The SID of the Sync Service with the Sync Map resource to fetch
:param sid: The SID of the Sync Map resource to fetch
:returns: twilio.rest.sync.v1.service.sync_map.SyncMapContext
:rtype: twilio.rest.sync.v1.service.sync_map.SyncMapContext
"""
super(SyncMapContext, self).__init__(version)
# Path Solution
self._solution = {'service_sid': service_sid, 'sid': sid, }
self._uri = '/Services/{service_sid}/Maps/{sid}'.format(**self._solution)
# Dependents
self._sync_map_items = None
self._sync_map_permissions = None
def fetch(self):
"""
Fetch the SyncMapInstance
:returns: The fetched SyncMapInstance
:rtype: twilio.rest.sync.v1.service.sync_map.SyncMapInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return SyncMapInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
sid=self._solution['sid'],
)
def delete(self):
"""
Deletes the SyncMapInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete(method='DELETE', uri=self._uri, )
def update(self, ttl=values.unset, collection_ttl=values.unset):
"""
Update the SyncMapInstance
:param unicode ttl: An alias for collection_ttl
:param unicode collection_ttl: How long, in seconds, before the Sync Map expires and is deleted
:returns: The updated SyncMapInstance
:rtype: twilio.rest.sync.v1.service.sync_map.SyncMapInstance
"""
data = values.of({'Ttl': ttl, 'CollectionTtl': collection_ttl, })
payload = self._version.update(method='POST', uri=self._uri, data=data, )
return SyncMapInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
sid=self._solution['sid'],
)
@property
def sync_map_items(self):
"""
Access the sync_map_items
:returns: twilio.rest.sync.v1.service.sync_map.sync_map_item.SyncMapItemList
:rtype: twilio.rest.sync.v1.service.sync_map.sync_map_item.SyncMapItemList
"""
if self._sync_map_items is None:
self._sync_map_items = SyncMapItemList(
self._version,
service_sid=self._solution['service_sid'],
map_sid=self._solution['sid'],
)
return self._sync_map_items
@property
def sync_map_permissions(self):
"""
Access the sync_map_permissions
:returns: twilio.rest.sync.v1.service.sync_map.sync_map_permission.SyncMapPermissionList
:rtype: twilio.rest.sync.v1.service.sync_map.sync_map_permission.SyncMapPermissionList
"""
if self._sync_map_permissions is None:
self._sync_map_permissions = SyncMapPermissionList(
self._version,
service_sid=self._solution['service_sid'],
map_sid=self._solution['sid'],
)
return self._sync_map_permissions
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Sync.V1.SyncMapContext {}>'.format(context)
class SyncMapInstance(InstanceResource):
def __init__(self, version, payload, service_sid, sid=None):
"""
Initialize the SyncMapInstance
:returns: twilio.rest.sync.v1.service.sync_map.SyncMapInstance
:rtype: twilio.rest.sync.v1.service.sync_map.SyncMapInstance
"""
super(SyncMapInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload.get('sid'),
'unique_name': payload.get('unique_name'),
'account_sid': payload.get('account_sid'),
'service_sid': payload.get('service_sid'),
'url': payload.get('url'),
'links': payload.get('links'),
'revision': payload.get('revision'),
'date_expires': deserialize.iso8601_datetime(payload.get('date_expires')),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'created_by': payload.get('created_by'),
}
# Context
self._context = None
self._solution = {'service_sid': service_sid, 'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SyncMapContext for this SyncMapInstance
:rtype: twilio.rest.sync.v1.service.sync_map.SyncMapContext
"""
if self._context is None:
self._context = SyncMapContext(
self._version,
service_sid=self._solution['service_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def sid(self):
"""
:returns: The unique string that identifies the resource
:rtype: unicode
"""
return self._properties['sid']
@property
def unique_name(self):
"""
:returns: An application-defined string that uniquely identifies the resource
:rtype: unicode
"""
return self._properties['unique_name']
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def service_sid(self):
"""
:returns: The SID of the Sync Service that the resource is associated with
:rtype: unicode
"""
return self._properties['service_sid']
@property
def url(self):
"""
:returns: The absolute URL of the Sync Map resource
:rtype: unicode
"""
return self._properties['url']
@property
def links(self):
"""
:returns: The URLs of the Sync Map's nested resources
:rtype: unicode
"""
return self._properties['links']
@property
def revision(self):
"""
:returns: The current revision of the Sync Map, represented as a string
:rtype: unicode
"""
return self._properties['revision']
@property
def date_expires(self):
"""
:returns: The ISO 8601 date and time in GMT when the Sync Map expires
:rtype: datetime
"""
return self._properties['date_expires']
@property
def date_created(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def created_by(self):
"""
:returns: The identity of the Sync Map's creator
:rtype: unicode
"""
return self._properties['created_by']
def fetch(self):
"""
Fetch the SyncMapInstance
:returns: The fetched SyncMapInstance
:rtype: twilio.rest.sync.v1.service.sync_map.SyncMapInstance
"""
return self._proxy.fetch()
def delete(self):
"""
Deletes the SyncMapInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def update(self, ttl=values.unset, collection_ttl=values.unset):
"""
Update the SyncMapInstance
:param unicode ttl: An alias for collection_ttl
:param unicode collection_ttl: How long, in seconds, before the Sync Map expires and is deleted
:returns: The updated SyncMapInstance
:rtype: twilio.rest.sync.v1.service.sync_map.SyncMapInstance
"""
return self._proxy.update(ttl=ttl, collection_ttl=collection_ttl, )
@property
def sync_map_items(self):
"""
Access the sync_map_items
:returns: twilio.rest.sync.v1.service.sync_map.sync_map_item.SyncMapItemList
:rtype: twilio.rest.sync.v1.service.sync_map.sync_map_item.SyncMapItemList
"""
return self._proxy.sync_map_items
@property
def sync_map_permissions(self):
"""
Access the sync_map_permissions
:returns: twilio.rest.sync.v1.service.sync_map.sync_map_permission.SyncMapPermissionList
:rtype: twilio.rest.sync.v1.service.sync_map.sync_map_permission.SyncMapPermissionList
"""
return self._proxy.sync_map_permissions
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Sync.V1.SyncMapInstance {}>'.format(context)
|
|
import numpy as np
import astropy.units as u
from astropy.table import Table
from . import filter
from . import utils
from . import config as cfg
class Photometry(object):
"""Photometry class.
This class is used for observations and synthetic photometry. It contains
some meta info and lists of photometry.
"""
def __init__(self,filters=None,measurement=None,e_measurement=None,unit=None,
s_measurement=None,bibcode=None,upperlim=None,ignore=None,
fnujy=None,e_fnujy=None,note=None):
self.filters = filters
self.measurement = measurement
self.e_measurement = e_measurement
self.s_measurement = s_measurement
self.unit = unit
self.bibcode = bibcode
self.upperlim = upperlim
self.ignore = ignore
self.fnujy = fnujy
self.e_fnujy = e_fnujy
self.note = note
def addto(self,p):
"""Add Photometry object to another."""
if self.nphot is None:
self.filters = p.filters
self.measurement = p.measurement
self.e_measurement = p.e_measurement
self.s_measurement = p.s_measurement
self.unit = p.unit
self.bibcode = p.bibcode
self.upperlim = p.upperlim
self.ignore = p.ignore
self.fnujy = p.fnujy
self.e_fnujy = p.e_fnujy
self.note = p.note
elif p.nphot is None:
pass
else:
self.filters = np.append( self.filters, p.filters)
self.measurement = np.append( self.measurement, p.measurement)
self.e_measurement = np.append(self.e_measurement,p.e_measurement)
self.s_measurement = np.append(self.s_measurement,p.s_measurement)
self.unit = np.append( self.unit, p.unit)
self.bibcode = np.append( self.bibcode, p.bibcode)
self.upperlim = np.append( self.upperlim, p.upperlim)
self.ignore = np.append( self.ignore, p.ignore)
self.fnujy = np.append( self.fnujy, p.fnujy)
self.e_fnujy = np.append( self.e_fnujy, p.e_fnujy)
self.note = np.append( self.note, p.note)
@classmethod
def read_sdb_file(cls,file,keep_filters=None):
""" Load photometry from a file and return a Photometry object.
The file format is set by sdb_getphot.py, and is ascii.ipac. To
get any spectra use spectrum.ObsSpectrum.get_sdb_file, and for
keywords use utils.get_sdb_keywords.
"""
self = cls()
# get the photometry, checking there is any
phot = Table.read(file,format='ascii.ipac')
if len(phot) == 0:
return None
# loop over the rows and fill the object if we're
# keeping this filter
for i in range(len(phot)):
if keep_filters is not None:
if phot[i]['Band'] not in keep_filters:
continue
row = phot[i]
# quick sanity checking
sum = 0.0
for par in ['Phot','Err','Sys']:
if not np.isfinite(row[par]):
utils.SdfError("non-finite {} value {} in {}".format(par,row[par],file))
sum += row[par]
p = Photometry()
p.filters = np.array([row['Band']])
p.measurement = np.array([row['Phot']])
p.e_measurement = np.abs( np.array([row['Err']]) )
p.s_measurement = np.abs( np.array([row['Sys']]) )
p.unit = np.array([u.Unit(row['Unit'])])
p.bibcode = np.array([row['bibcode']])
p.upperlim = np.array([ row['Lim'] == 1 ])
p.ignore = np.array([ row['exclude'] == 1 ])
p.note = np.array([row['Note1']])
# if exclude desired
if row['Band'] in cfg.fitting['exclude_filters']:
p.ignore = np.array([True])
# if upper limit desired
if row['Band'] in cfg.fitting['upperlim_filters']:
if not p.upperlim:
if p.unit != 'mag':
p.measurement += 3 * p.e_measurement
else:
p.measurement -= 3 * p.e_measurement
p.upperlim = np.array([True])
# or if zero fluxes and errors
if sum == 0.0:
p.ignore = np.array([True])
self.addto(p)
self.fill_fnujy()
self.sort()
return self
@property
def nphot(self):
if self.filters is not None:
return len(self.filters)
else:
return None
@property
def nused(self):
return np.sum(self.ignore == False)
def fill_fnujy(self):
"""Convert measurements to Jy and fill fnujy/e_fnujy arrays.
Where no uncertainties are given 10% (or 0.1mag) is assumed.
Colours/indices are left as is.
"""
fnu = np.zeros(self.nphot)
efnu = np.zeros(self.nphot)
strom = [-1,-1,-1] # indices of b-y, m1, c1
for i in range(self.nphot):
# attempt to combine uncertainties
if np.isfinite(self.e_measurement[i]) and np.isfinite(self.s_measurement[i]):
etot = np.sqrt(self.e_measurement[i]**2 + self.s_measurement[i]**2)
elif np.isfinite(self.e_measurement[i]) and not np.isfinite(self.s_measurement[i]):
etot = self.e_measurement[i]
elif not np.isfinite(self.e_measurement[i]) and np.isfinite(self.s_measurement[i]):
etot = self.s_measurement[i]
else:
print("WARNING no uncertainties given, assuming 10%")
etot = np.nan
# get calibrated measurement (if exists)
if not filter.iscolour(self.filters[i]):
filt = filter.Filter.get(self.filters[i])
cal_meas = filt.measflux2flux(self.measurement[i])
else:
cal_meas = self.measurement[i]
# convert flux and uncertainty
if self.unit[i] != 'mag':
fnu[i] = (cal_meas*self.unit[i]).to('Jy').value
if np.isfinite(etot) and etot > 0.:
efnu[i] = (etot * self.unit[i]).to('Jy').value
else:
efnu[i] = 0.1 * fnu[i] # assume 10%
else:
# use zero point to convert
if not filter.iscolour(self.filters[i]):
filt = filter.Filter.get(self.filters[i])
fnu[i] = filt.mag2flux(cal_meas)
if np.isfinite(etot) and etot > 0.:
efnu[i] = fnu[i] * etot / 1.09 # small uncertainties trick
else:
efnu[i] = 0.1 * fnu[i] # assume 10%
# leave colours/indices as is
else:
fnu[i] = cal_meas
if np.isfinite(etot) and etot > 0.:
efnu[i] = etot
else:
efnu[i] = 0.1 # assume 10%
# note stromgren
if self.filters[i] == 'BS_YS' and strom[0] == -1:
strom[0] = i
if self.filters[i] == 'STROMM1' and strom[1] == -1:
strom[1] = i
if self.filters[i] == 'STROMC1' and strom[2] == -1:
strom[2] = i
# convert uvby as a group (lowest possible sum of indices = 0+1+2)
if np.sum(strom) > 2:
fnu[strom[0]],fnu[strom[1]],fnu[strom[2]] = \
utils.uvby_convert(fnu[strom[0]],fnu[strom[1]],fnu[strom[2]])
self.fnujy = fnu
self.e_fnujy = efnu
def mean_wavelength(self):
"""Return the mean wavelength of the filters."""
mw = np.array([])
for f in self.filters:
if filter.iscolour(f):
col = filter.Colour.get(f)
mw = np.append( mw, col.mean_wavelength )
else:
filt = filter.Filter.get(f)
mw = np.append( mw, filt.mean_wavelength )
return mw
def sort(self):
"""Sort arrays in increasing wavelength order."""
srt = np.argsort( self.mean_wavelength() )
self.filters = self.filters[srt]
self.measurement = self.measurement[srt]
self.e_measurement = self.e_measurement[srt]
self.s_measurement = self.s_measurement[srt]
self.unit = self.unit[srt]
self.bibcode = self.bibcode[srt]
self.upperlim = self.upperlim[srt]
self.ignore = self.ignore[srt]
self.fnujy = self.fnujy[srt]
self.e_fnujy = self.e_fnujy[srt]
self.note = self.note[srt]
@property
def measurement(self):
return self._measurement
@measurement.setter
def measurement(self, value):
self._measurement = utils.validate_1d(value,self.nphot)
@property
def e_measurement(self):
return self._e_measurement
@e_measurement.setter
def e_measurement(self, value):
self._e_measurement = utils.validate_1d(value,self.nphot)
@property
def s_measurement(self):
return self._s_measurement
@s_measurement.setter
def s_measurement(self, value):
self._s_measurement = utils.validate_1d(value,self.nphot)
@property
def unit(self):
return self._unit
@unit.setter
def unit(self, value):
self._unit = utils.validate_1d(value,self.nphot,dtype=u.Unit)
@property
def bibcode(self):
return self._bibcode
@bibcode.setter
def bibcode(self, value):
self._bibcode = utils.validate_1d(value,self.nphot,dtype=str)
@property
def note(self):
return self._note
@note.setter
def note(self, value):
self._note = utils.validate_1d(value,self.nphot,dtype=str)
|
|
"""
Tests for L{pyflakes.scripts.pyflakes}.
"""
import os
import sys
import shutil
import subprocess
import tempfile
from unittest2 import skipIf, TestCase
from pyflakes.messages import UnusedImport
from pyflakes.reporter import Reporter
from pyflakes.api import (
checkPath,
checkRecursive,
iterSourceCode,
)
if sys.version_info < (3,):
from cStringIO import StringIO
else:
from io import StringIO
unichr = chr
def withStderrTo(stderr, f, *args, **kwargs):
"""
Call C{f} with C{sys.stderr} redirected to C{stderr}.
"""
(outer, sys.stderr) = (sys.stderr, stderr)
try:
return f(*args, **kwargs)
finally:
sys.stderr = outer
class Node(object):
"""
Mock an AST node.
"""
def __init__(self, lineno, col_offset=0):
self.lineno = lineno
self.col_offset = col_offset
class LoggingReporter(object):
"""
Implementation of Reporter that just appends any error to a list.
"""
def __init__(self, log):
"""
Construct a C{LoggingReporter}.
@param log: A list to append log messages to.
"""
self.log = log
def flake(self, message):
self.log.append(('flake', str(message)))
def unexpectedError(self, filename, message):
self.log.append(('unexpectedError', filename, message))
def syntaxError(self, filename, msg, lineno, offset, line):
self.log.append(('syntaxError', filename, msg, lineno, offset, line))
class TestIterSourceCode(TestCase):
"""
Tests for L{iterSourceCode}.
"""
def setUp(self):
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tempdir)
def makeEmptyFile(self, *parts):
assert parts
fpath = os.path.join(self.tempdir, *parts)
fd = open(fpath, 'a')
fd.close()
return fpath
def test_emptyDirectory(self):
"""
There are no Python files in an empty directory.
"""
self.assertEqual(list(iterSourceCode([self.tempdir])), [])
def test_singleFile(self):
"""
If the directory contains one Python file, C{iterSourceCode} will find
it.
"""
childpath = self.makeEmptyFile('foo.py')
self.assertEqual(list(iterSourceCode([self.tempdir])), [childpath])
def test_onlyPythonSource(self):
"""
Files that are not Python source files are not included.
"""
self.makeEmptyFile('foo.pyc')
self.assertEqual(list(iterSourceCode([self.tempdir])), [])
def test_recurses(self):
"""
If the Python files are hidden deep down in child directories, we will
find them.
"""
os.mkdir(os.path.join(self.tempdir, 'foo'))
apath = self.makeEmptyFile('foo', 'a.py')
os.mkdir(os.path.join(self.tempdir, 'bar'))
bpath = self.makeEmptyFile('bar', 'b.py')
cpath = self.makeEmptyFile('c.py')
self.assertEqual(
sorted(iterSourceCode([self.tempdir])),
sorted([apath, bpath, cpath]))
def test_multipleDirectories(self):
"""
L{iterSourceCode} can be given multiple directories. It will recurse
into each of them.
"""
foopath = os.path.join(self.tempdir, 'foo')
barpath = os.path.join(self.tempdir, 'bar')
os.mkdir(foopath)
apath = self.makeEmptyFile('foo', 'a.py')
os.mkdir(barpath)
bpath = self.makeEmptyFile('bar', 'b.py')
self.assertEqual(
sorted(iterSourceCode([foopath, barpath])),
sorted([apath, bpath]))
def test_explicitFiles(self):
"""
If one of the paths given to L{iterSourceCode} is not a directory but
a file, it will include that in its output.
"""
epath = self.makeEmptyFile('e.py')
self.assertEqual(list(iterSourceCode([epath])),
[epath])
class TestReporter(TestCase):
"""
Tests for L{Reporter}.
"""
def test_syntaxError(self):
"""
C{syntaxError} reports that there was a syntax error in the source
file. It reports to the error stream and includes the filename, line
number, error message, actual line of source and a caret pointing to
where the error is.
"""
err = StringIO()
reporter = Reporter(None, err)
reporter.syntaxError('foo.py', 'a problem', 3, 4, 'bad line of source')
self.assertEquals(
("foo.py:3: a problem\n"
"bad line of source\n"
" ^\n"),
err.getvalue())
def test_syntaxErrorNoOffset(self):
"""
C{syntaxError} doesn't include a caret pointing to the error if
C{offset} is passed as C{None}.
"""
err = StringIO()
reporter = Reporter(None, err)
reporter.syntaxError('foo.py', 'a problem', 3, None,
'bad line of source')
self.assertEquals(
("foo.py:3: a problem\n"
"bad line of source\n"),
err.getvalue())
def test_multiLineSyntaxError(self):
"""
If there's a multi-line syntax error, then we only report the last
line. The offset is adjusted so that it is relative to the start of
the last line.
"""
err = StringIO()
lines = [
'bad line of source',
'more bad lines of source',
]
reporter = Reporter(None, err)
reporter.syntaxError('foo.py', 'a problem', 3, len(lines[0]) + 5,
'\n'.join(lines))
self.assertEquals(
("foo.py:3: a problem\n" +
lines[-1] + "\n" +
" ^\n"),
err.getvalue())
def test_unexpectedError(self):
"""
C{unexpectedError} reports an error processing a source file.
"""
err = StringIO()
reporter = Reporter(None, err)
reporter.unexpectedError('source.py', 'error message')
self.assertEquals('source.py: error message\n', err.getvalue())
def test_flake(self):
"""
C{flake} reports a code warning from Pyflakes. It is exactly the
str() of a L{pyflakes.messages.Message}.
"""
out = StringIO()
reporter = Reporter(out, None)
message = UnusedImport('foo.py', Node(42), 'bar')
reporter.flake(message)
self.assertEquals(out.getvalue(), "%s\n" % (message,))
class CheckTests(TestCase):
"""
Tests for L{check} and L{checkPath} which check a file for flakes.
"""
def makeTempFile(self, content):
"""
Make a temporary file containing C{content} and return a path to it.
"""
_, fpath = tempfile.mkstemp()
if not hasattr(content, 'decode'):
content = content.encode('ascii')
fd = open(fpath, 'wb')
fd.write(content)
fd.close()
return fpath
def assertHasErrors(self, path, errorList):
"""
Assert that C{path} causes errors.
@param path: A path to a file to check.
@param errorList: A list of errors expected to be printed to stderr.
"""
err = StringIO()
count = withStderrTo(err, checkPath, path)
self.assertEquals(
(count, err.getvalue()), (len(errorList), ''.join(errorList)))
def getErrors(self, path):
"""
Get any warnings or errors reported by pyflakes for the file at C{path}.
@param path: The path to a Python file on disk that pyflakes will check.
@return: C{(count, log)}, where C{count} is the number of warnings or
errors generated, and log is a list of those warnings, presented
as structured data. See L{LoggingReporter} for more details.
"""
log = []
reporter = LoggingReporter(log)
count = checkPath(path, reporter)
return count, log
def test_legacyScript(self):
from pyflakes.scripts import pyflakes as script_pyflakes
self.assertIs(script_pyflakes.checkPath, checkPath)
def test_missingTrailingNewline(self):
"""
Source which doesn't end with a newline shouldn't cause any
exception to be raised nor an error indicator to be returned by
L{check}.
"""
fName = self.makeTempFile("def foo():\n\tpass\n\t")
self.assertHasErrors(fName, [])
def test_checkPathNonExisting(self):
"""
L{checkPath} handles non-existing files.
"""
count, errors = self.getErrors('extremo')
self.assertEquals(count, 1)
self.assertEquals(
errors,
[('unexpectedError', 'extremo', 'No such file or directory')])
def test_multilineSyntaxError(self):
"""
Source which includes a syntax error which results in the raised
L{SyntaxError.text} containing multiple lines of source are reported
with only the last line of that source.
"""
source = """\
def foo():
'''
def bar():
pass
def baz():
'''quux'''
"""
# Sanity check - SyntaxError.text should be multiple lines, if it
# isn't, something this test was unprepared for has happened.
def evaluate(source):
exec(source)
try:
evaluate(source)
except SyntaxError:
e = sys.exc_info()[1]
self.assertTrue(e.text.count('\n') > 1)
else:
self.fail()
sourcePath = self.makeTempFile(source)
self.assertHasErrors(
sourcePath,
["""\
%s:8: invalid syntax
'''quux'''
^
""" % (sourcePath,)])
def test_eofSyntaxError(self):
"""
The error reported for source files which end prematurely causing a
syntax error reflects the cause for the syntax error.
"""
sourcePath = self.makeTempFile("def foo(")
self.assertHasErrors(
sourcePath,
["""\
%s:1: unexpected EOF while parsing
def foo(
^
""" % (sourcePath,)])
def test_nonDefaultFollowsDefaultSyntaxError(self):
"""
Source which has a non-default argument following a default argument
should include the line number of the syntax error. However these
exceptions do not include an offset.
"""
source = """\
def foo(bar=baz, bax):
pass
"""
sourcePath = self.makeTempFile(source)
last_line = ' ^\n' if sys.version_info >= (3, 2) else ''
self.assertHasErrors(
sourcePath,
["""\
%s:1: non-default argument follows default argument
def foo(bar=baz, bax):
%s""" % (sourcePath, last_line)])
def test_nonKeywordAfterKeywordSyntaxError(self):
"""
Source which has a non-keyword argument after a keyword argument should
include the line number of the syntax error. However these exceptions
do not include an offset.
"""
source = """\
foo(bar=baz, bax)
"""
sourcePath = self.makeTempFile(source)
last_line = ' ^\n' if sys.version_info >= (3, 2) else ''
self.assertHasErrors(
sourcePath,
["""\
%s:1: non-keyword arg after keyword arg
foo(bar=baz, bax)
%s""" % (sourcePath, last_line)])
def test_invalidEscape(self):
"""
The invalid escape syntax raises ValueError in Python 2
"""
# ValueError: invalid \x escape
sourcePath = self.makeTempFile(r"foo = '\xyz'")
if sys.version_info < (3,):
decoding_error = "%s: problem decoding source\n" % (sourcePath,)
else:
decoding_error = """\
%s:1: (unicode error) 'unicodeescape' codec can't decode bytes \
in position 0-2: truncated \\xXX escape
foo = '\\xyz'
%s""" % (sourcePath, ' ^\n' if sys.version_info >= (3, 2) else '')
self.assertHasErrors(
sourcePath, [decoding_error])
def test_permissionDenied(self):
"""
If the source file is not readable, this is reported on standard
error.
"""
sourcePath = self.makeTempFile('')
os.chmod(sourcePath, 0)
count, errors = self.getErrors(sourcePath)
self.assertEquals(count, 1)
self.assertEquals(
errors,
[('unexpectedError', sourcePath, "Permission denied")])
def test_pyflakesWarning(self):
"""
If the source file has a pyflakes warning, this is reported as a
'flake'.
"""
sourcePath = self.makeTempFile("import foo")
count, errors = self.getErrors(sourcePath)
self.assertEquals(count, 1)
self.assertEquals(
errors, [('flake', str(UnusedImport(sourcePath, Node(1), 'foo')))])
@skipIf(sys.version_info >= (3,), "not relevant")
def test_misencodedFileUTF8(self):
"""
If a source file contains bytes which cannot be decoded, this is
reported on stderr.
"""
SNOWMAN = unichr(0x2603)
source = ("""\
# coding: ascii
x = "%s"
""" % SNOWMAN).encode('utf-8')
sourcePath = self.makeTempFile(source)
self.assertHasErrors(
sourcePath, ["%s: problem decoding source\n" % (sourcePath,)])
def test_misencodedFileUTF16(self):
"""
If a source file contains bytes which cannot be decoded, this is
reported on stderr.
"""
SNOWMAN = unichr(0x2603)
source = ("""\
# coding: ascii
x = "%s"
""" % SNOWMAN).encode('utf-16')
sourcePath = self.makeTempFile(source)
self.assertHasErrors(
sourcePath, ["%s: problem decoding source\n" % (sourcePath,)])
def test_checkRecursive(self):
"""
L{checkRecursive} descends into each directory, finding Python files
and reporting problems.
"""
tempdir = tempfile.mkdtemp()
os.mkdir(os.path.join(tempdir, 'foo'))
file1 = os.path.join(tempdir, 'foo', 'bar.py')
fd = open(file1, 'wb')
fd.write("import baz\n".encode('ascii'))
fd.close()
file2 = os.path.join(tempdir, 'baz.py')
fd = open(file2, 'wb')
fd.write("import contraband".encode('ascii'))
fd.close()
log = []
reporter = LoggingReporter(log)
warnings = checkRecursive([tempdir], reporter)
self.assertEqual(warnings, 2)
self.assertEqual(
sorted(log),
sorted([('flake', str(UnusedImport(file1, Node(1), 'baz'))),
('flake',
str(UnusedImport(file2, Node(1), 'contraband')))]))
class IntegrationTests(TestCase):
"""
Tests of the pyflakes script that actually spawn the script.
"""
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.tempfilepath = os.path.join(self.tempdir, 'temp')
def tearDown(self):
shutil.rmtree(self.tempdir)
def getPyflakesBinary(self):
"""
Return the path to the pyflakes binary.
"""
import pyflakes
package_dir = os.path.dirname(pyflakes.__file__)
return os.path.join(package_dir, '..', 'bin', 'pyflakes')
def runPyflakes(self, paths, stdin=None):
"""
Launch a subprocess running C{pyflakes}.
@param args: Command-line arguments to pass to pyflakes.
@param kwargs: Options passed on to C{subprocess.Popen}.
@return: C{(returncode, stdout, stderr)} of the completed pyflakes
process.
"""
env = dict(os.environ)
env['PYTHONPATH'] = os.pathsep.join(sys.path)
command = [sys.executable, self.getPyflakesBinary()]
command.extend(paths)
if stdin:
p = subprocess.Popen(command, env=env, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate(stdin)
else:
p = subprocess.Popen(command, env=env,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
rv = p.wait()
if sys.version_info >= (3,):
stdout = stdout.decode('utf-8')
stderr = stderr.decode('utf-8')
return (stdout, stderr, rv)
def test_goodFile(self):
"""
When a Python source file is all good, the return code is zero and no
messages are printed to either stdout or stderr.
"""
fd = open(self.tempfilepath, 'a')
fd.close()
d = self.runPyflakes([self.tempfilepath])
self.assertEqual(d, ('', '', 0))
def test_fileWithFlakes(self):
"""
When a Python source file has warnings, the return code is non-zero
and the warnings are printed to stdout.
"""
fd = open(self.tempfilepath, 'wb')
fd.write("import contraband\n".encode('ascii'))
fd.close()
d = self.runPyflakes([self.tempfilepath])
expected = UnusedImport(self.tempfilepath, Node(1), 'contraband')
self.assertEqual(d, ("%s\n" % expected, '', 1))
def test_errors(self):
"""
When pyflakes finds errors with the files it's given, (if they don't
exist, say), then the return code is non-zero and the errors are
printed to stderr.
"""
d = self.runPyflakes([self.tempfilepath])
error_msg = '%s: No such file or directory\n' % (self.tempfilepath,)
self.assertEqual(d, ('', error_msg, 1))
def test_readFromStdin(self):
"""
If no arguments are passed to C{pyflakes} then it reads from stdin.
"""
d = self.runPyflakes([], stdin='import contraband'.encode('ascii'))
expected = UnusedImport('<stdin>', Node(1), 'contraband')
self.assertEqual(d, ("%s\n" % expected, '', 1))
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mock import patch
import django
from django.conf import settings
from django.core.exceptions import MiddlewareNotUsed
from django.http import HttpResponseRedirect
from django.test.utils import override_settings
from django.utils import timezone
from horizon import exceptions
from horizon import middleware
from horizon.test import helpers as test
class MiddlewareTests(test.TestCase):
def setUp(self):
self._timezone_backup = timezone.get_current_timezone_name()
return super(MiddlewareTests, self).setUp()
def tearDown(self):
timezone.activate(self._timezone_backup)
return super(MiddlewareTests, self).tearDown()
def test_redirect_login_fail_to_login(self):
url = settings.LOGIN_URL
request = self.factory.post(url)
mw = middleware.HorizonMiddleware()
resp = mw.process_exception(request, exceptions.NotAuthorized())
resp.client = self.client
if django.VERSION >= (1, 9):
self.assertRedirects(resp, settings.TESTSERVER + url)
else:
self.assertRedirects(resp, url)
def test_process_response_redirect_on_ajax_request(self):
url = settings.LOGIN_URL
mw = middleware.HorizonMiddleware()
request = self.factory.post(url,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
request.META['HTTP_X_REQUESTED_WITH'] = 'XMLHttpRequest'
request.horizon = {'async_messages':
[('error', 'error_msg', 'extra_tag')]}
response = HttpResponseRedirect(url)
response.client = self.client
resp = mw.process_response(request, response)
self.assertEqual(200, resp.status_code)
self.assertEqual(url, resp['X-Horizon-Location'])
def test_timezone_awareness(self):
url = settings.LOGIN_REDIRECT_URL
mw = middleware.HorizonMiddleware()
request = self.factory.get(url)
request.session['django_timezone'] = 'America/Chicago'
mw.process_request(request)
self.assertEqual(
timezone.get_current_timezone_name(), 'America/Chicago')
request.session['django_timezone'] = 'Europe/Paris'
mw.process_request(request)
self.assertEqual(timezone.get_current_timezone_name(), 'Europe/Paris')
request.session['django_timezone'] = 'UTC'
mw.process_request(request)
self.assertEqual(timezone.get_current_timezone_name(), 'UTC')
class OperationLogMiddlewareTest(test.TestCase):
http_host = u'test_host'
http_referer = u'/dashboard/test_http_referer'
def test_middleware_not_used(self):
with self.assertRaises(MiddlewareNotUsed):
middleware.OperationLogMiddleware()
def _test_ready_for_post(self):
url = settings.LOGIN_URL
request = self.factory.post(url)
request.META['HTTP_HOST'] = self.http_host
request.META['HTTP_REFERER'] = self.http_referer
request.POST = {
"username": u"admin",
"password": u"pass"
}
request.user.username = u'test_user_name'
response = HttpResponseRedirect(url)
response.client = self.client
return request, response
def _test_ready_for_get(self, url=None):
if url is None:
url = '/dashboard/project/?start=2016-03-01&end=2016-03-11'
request = self.factory.get(url)
request.META['HTTP_HOST'] = self.http_host
request.META['HTTP_REFERER'] = self.http_referer
request.user.username = u'test_user_name'
response = HttpResponseRedirect(url)
response.client = self.client
return request, response
@override_settings(OPERATION_LOG_ENABLED=True)
@patch(('horizon.middleware.operation_log.OperationLogMiddleware.'
'OPERATION_LOG'))
def test_process_response_for_post(self, mock_logger):
olm = middleware.OperationLogMiddleware()
request, response = self._test_ready_for_post()
resp = olm.process_response(request, response)
self.assertTrue(mock_logger.info.called)
self.assertEqual(302, resp.status_code)
log_args = mock_logger.info.call_args[0]
logging_str = log_args[0] % log_args[1]
self.assertIn(request.user.username, logging_str)
self.assertIn(self.http_referer, logging_str)
self.assertIn(settings.LOGIN_URL, logging_str)
self.assertIn('POST', logging_str)
self.assertIn('302', logging_str)
post_data = ['"username": "admin"', '"password": "********"']
for data in post_data:
self.assertIn(data, logging_str)
@override_settings(OPERATION_LOG_ENABLED=True)
@override_settings(OPERATION_LOG_OPTIONS={'target_methods': ['GET']})
@patch(('horizon.middleware.operation_log.OperationLogMiddleware.'
'OPERATION_LOG'))
def test_process_response_for_get(self, mock_logger):
olm = middleware.OperationLogMiddleware()
request, response = self._test_ready_for_get()
resp = olm.process_response(request, response)
self.assertTrue(mock_logger.info.called)
self.assertEqual(302, resp.status_code)
log_args = mock_logger.info.call_args[0]
logging_str = log_args[0] % log_args[1]
self.assertIn(request.user.username, logging_str)
self.assertIn(self.http_referer, logging_str)
self.assertIn(request.path, logging_str)
self.assertIn('GET', logging_str)
self.assertIn('302', logging_str)
@override_settings(OPERATION_LOG_ENABLED=True)
@patch(('horizon.middleware.operation_log.OperationLogMiddleware.'
'OPERATION_LOG'))
def test_process_response_for_get_no_target(self, mock_logger):
"""In default setting, Get method is not logged"""
olm = middleware.OperationLogMiddleware()
request, response = self._test_ready_for_get()
resp = olm.process_response(request, response)
self.assertEqual(0, mock_logger.info.call_count)
self.assertEqual(302, resp.status_code)
@override_settings(OPERATION_LOG_ENABLED=True)
@patch(('horizon.middleware.operation_log.OperationLogMiddleware.'
'OPERATION_LOG'))
def test_process_exception(self, mock_logger):
olm = middleware.OperationLogMiddleware()
request, response = self._test_ready_for_post()
exception = Exception("Unexpected error occurred.")
olm.process_exception(request, exception)
log_args = mock_logger.info.call_args[0]
logging_str = log_args[0] % log_args[1]
self.assertTrue(mock_logger.info.called)
self.assertIn(request.user.username, logging_str)
self.assertIn(self.http_referer, logging_str)
self.assertIn(settings.LOGIN_URL, logging_str)
self.assertIn('Unexpected error occurred.', logging_str)
post_data = ['"username": "admin"', '"password": "********"']
for data in post_data:
self.assertIn(data, logging_str)
@override_settings(OPERATION_LOG_ENABLED=True)
@override_settings(OPERATION_LOG_OPTIONS={'target_methods': ['GET']})
@patch(('horizon.middleware.operation_log.OperationLogMiddleware.'
'OPERATION_LOG'))
def test_get_log_format(self, mock_logger):
olm = middleware.OperationLogMiddleware()
request, _ = self._test_ready_for_get()
self.assertEqual(olm._default_format, olm._get_log_format(request))
@override_settings(OPERATION_LOG_ENABLED=True)
@patch(('horizon.middleware.operation_log.OperationLogMiddleware.'
'OPERATION_LOG'))
def test_get_log_format_no_user(self, mock_logger):
olm = middleware.OperationLogMiddleware()
request, _ = self._test_ready_for_get()
delattr(request, "user")
self.assertIsNone(olm._get_log_format(request))
@override_settings(OPERATION_LOG_ENABLED=True)
@patch(('horizon.middleware.operation_log.OperationLogMiddleware.'
'OPERATION_LOG'))
def test_get_log_format_unknown_method(self, mock_logger):
olm = middleware.OperationLogMiddleware()
request, _ = self._test_ready_for_get()
request.method = "FAKE"
self.assertIsNone(olm._get_log_format(request))
@override_settings(OPERATION_LOG_ENABLED=True)
@patch(('horizon.middleware.operation_log.OperationLogMiddleware.'
'OPERATION_LOG'))
def test_get_log_format_ignored_url(self, mock_logger):
olm = middleware.OperationLogMiddleware()
request, _ = self._test_ready_for_get("/api/policy")
self.assertIsNone(olm._get_log_format(request))
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import pandas as pd
from pandas.api.types import CategoricalDtype
from pyspark import pandas as ps
from pyspark.pandas.tests.data_type_ops.testing_utils import TestCasesUtils
from pyspark.testing.pandasutils import PandasOnSparkTestCase
class DatetimeOpsTest(PandasOnSparkTestCase, TestCasesUtils):
@property
def pser(self):
return pd.Series(pd.date_range("1994-1-31 10:30:15", periods=3, freq="D"))
@property
def psser(self):
return ps.from_pandas(self.pser)
@property
def datetime_pdf(self):
psers = {
"this": self.pser,
"that": pd.Series(pd.date_range("1994-2-1 10:30:15", periods=3, freq="D")),
}
return pd.concat(psers, axis=1)
@property
def datetime_psdf(self):
return ps.from_pandas(self.datetime_pdf)
@property
def some_datetime(self):
return datetime.datetime(1994, 1, 31, 10, 30, 00)
def test_add(self):
self.assertRaises(TypeError, lambda: self.psser + "x")
self.assertRaises(TypeError, lambda: self.psser + 1)
self.assertRaises(TypeError, lambda: self.psser + self.some_datetime)
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser + psser)
def test_sub(self):
self.assertRaises(TypeError, lambda: self.psser - "x")
self.assertRaises(TypeError, lambda: self.psser - 1)
self.assert_eq(
(self.pser - self.some_datetime).dt.total_seconds().astype("int"),
self.psser - self.some_datetime,
)
pdf, psdf = self.pdf, self.psdf
for col in self.df_cols:
if col == "datetime":
self.assert_eq(
(pdf["datetime"] - pdf[col]).dt.total_seconds().astype("int"),
psdf["datetime"] - psdf[col],
)
else:
self.assertRaises(TypeError, lambda: psdf["datetime"] - psdf[col])
pdf, psdf = self.datetime_pdf, self.datetime_psdf
self.assert_eq(
(pdf["that"] - pdf["this"]).dt.total_seconds().astype("int"),
psdf["that"] - psdf["this"],
)
def test_mul(self):
self.assertRaises(TypeError, lambda: self.psser * "x")
self.assertRaises(TypeError, lambda: self.psser * 1)
self.assertRaises(TypeError, lambda: self.psser * self.some_datetime)
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser * psser)
def test_truediv(self):
self.assertRaises(TypeError, lambda: self.psser / "x")
self.assertRaises(TypeError, lambda: self.psser / 1)
self.assertRaises(TypeError, lambda: self.psser / self.some_datetime)
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser / psser)
def test_floordiv(self):
self.assertRaises(TypeError, lambda: self.psser // "x")
self.assertRaises(TypeError, lambda: self.psser // 1)
self.assertRaises(TypeError, lambda: self.psser // self.some_datetime)
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser // psser)
def test_mod(self):
self.assertRaises(TypeError, lambda: self.psser % "x")
self.assertRaises(TypeError, lambda: self.psser % 1)
self.assertRaises(TypeError, lambda: self.psser % self.some_datetime)
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser % psser)
def test_pow(self):
self.assertRaises(TypeError, lambda: self.psser ** "x")
self.assertRaises(TypeError, lambda: self.psser ** 1)
self.assertRaises(TypeError, lambda: self.psser ** self.some_datetime)
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser ** psser)
def test_radd(self):
self.assertRaises(TypeError, lambda: "x" + self.psser)
self.assertRaises(TypeError, lambda: 1 + self.psser)
self.assertRaises(TypeError, lambda: self.some_datetime + self.psser)
def test_rsub(self):
self.assertRaises(TypeError, lambda: "x" - self.psser)
self.assertRaises(TypeError, lambda: 1 - self.psser)
self.assert_eq(
(self.some_datetime - self.pser).dt.total_seconds().astype("int"),
self.some_datetime - self.psser,
)
def test_rmul(self):
self.assertRaises(TypeError, lambda: "x" * self.psser)
self.assertRaises(TypeError, lambda: 1 * self.psser)
self.assertRaises(TypeError, lambda: self.some_datetime * self.psser)
def test_rtruediv(self):
self.assertRaises(TypeError, lambda: "x" / self.psser)
self.assertRaises(TypeError, lambda: 1 / self.psser)
self.assertRaises(TypeError, lambda: self.some_datetime / self.psser)
def test_rfloordiv(self):
self.assertRaises(TypeError, lambda: "x" // self.psser)
self.assertRaises(TypeError, lambda: 1 // self.psser)
self.assertRaises(TypeError, lambda: self.some_datetime // self.psser)
def test_rmod(self):
self.assertRaises(TypeError, lambda: 1 % self.psser)
self.assertRaises(TypeError, lambda: self.some_datetime % self.psser)
def test_rpow(self):
self.assertRaises(TypeError, lambda: "x" ** self.psser)
self.assertRaises(TypeError, lambda: 1 ** self.psser)
self.assertRaises(TypeError, lambda: self.some_datetime ** self.psser)
def test_and(self):
self.assertRaises(TypeError, lambda: self.psser & True)
self.assertRaises(TypeError, lambda: self.psser & False)
self.assertRaises(TypeError, lambda: self.psser & self.psser)
def test_rand(self):
self.assertRaises(TypeError, lambda: True & self.psser)
self.assertRaises(TypeError, lambda: False & self.psser)
def test_or(self):
self.assertRaises(TypeError, lambda: self.psser | True)
self.assertRaises(TypeError, lambda: self.psser | False)
self.assertRaises(TypeError, lambda: self.psser | self.psser)
def test_ror(self):
self.assertRaises(TypeError, lambda: True | self.psser)
self.assertRaises(TypeError, lambda: False | self.psser)
def test_from_to_pandas(self):
data = pd.date_range("1994-1-31 10:30:15", periods=3, freq="M")
pser = pd.Series(data)
psser = ps.Series(data)
self.assert_eq(pser, psser.to_pandas())
self.assert_eq(ps.from_pandas(pser), psser)
def test_isnull(self):
self.assert_eq(self.pser.isnull(), self.psser.isnull())
def test_astype(self):
pser = self.pser
psser = self.psser
self.assert_eq(pser.astype(str), psser.astype(str))
self.assert_eq(pser.astype("category"), psser.astype("category"))
cat_type = CategoricalDtype(categories=["a", "b", "c"])
self.assert_eq(pser.astype(cat_type), psser.astype(cat_type))
def test_neg(self):
self.assertRaises(TypeError, lambda: -self.psser)
def test_abs(self):
self.assertRaises(TypeError, lambda: abs(self.psser))
def test_invert(self):
self.assertRaises(TypeError, lambda: ~self.psser)
def test_eq(self):
pdf, psdf = self.datetime_pdf, self.datetime_psdf
self.assert_eq(pdf["this"] == pdf["that"], psdf["this"] == psdf["that"])
self.assert_eq(pdf["this"] == pdf["this"], psdf["this"] == psdf["this"])
def test_ne(self):
pdf, psdf = self.datetime_pdf, self.datetime_psdf
self.assert_eq(pdf["this"] != pdf["that"], psdf["this"] != psdf["that"])
self.assert_eq(pdf["this"] != pdf["this"], psdf["this"] != psdf["this"])
def test_lt(self):
pdf, psdf = self.datetime_pdf, self.datetime_psdf
self.assert_eq(pdf["this"] < pdf["that"], psdf["this"] < psdf["that"])
self.assert_eq(pdf["this"] < pdf["this"], psdf["this"] < psdf["this"])
def test_le(self):
pdf, psdf = self.datetime_pdf, self.datetime_psdf
self.assert_eq(pdf["this"] <= pdf["that"], psdf["this"] <= psdf["that"])
self.assert_eq(pdf["this"] <= pdf["this"], psdf["this"] <= psdf["this"])
def test_gt(self):
pdf, psdf = self.datetime_pdf, self.datetime_psdf
self.assert_eq(pdf["this"] > pdf["that"], psdf["this"] > psdf["that"])
self.assert_eq(pdf["this"] > pdf["this"], psdf["this"] > psdf["this"])
def test_ge(self):
pdf, psdf = self.datetime_pdf, self.datetime_psdf
self.assert_eq(pdf["this"] >= pdf["that"], psdf["this"] >= psdf["that"])
self.assert_eq(pdf["this"] >= pdf["this"], psdf["this"] >= psdf["this"])
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.data_type_ops.test_datetime_ops import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
|
#!/usr/bin/env python
# ctypes-opencv - A Python wrapper for OpenCV using ctypes
# Copyright (c) 2008, Minh-Tri Pham
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# * Neither the name of ctypes-opencv's copyright holders nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# For further inquiries, please contact Minh-Tri Pham at pmtri80@gmail.com.
# ----------------------------------------------------------------------------
from ctypes import *
from cxcore import *
import cxcore
from highgui import CV_CVTIMG_SWAP_RB
#=============================================================================
# Helpers for access to images for other GUI packages
#=============================================================================
__all__ = ['cvCopyImg', 'cvCreateImageAs']
#-----------------------------------------------------------------------------
# wx -- by Gary Bishop
#-----------------------------------------------------------------------------
# modified a bit by Minh-Tri Pham
try:
import wx
def cvIplImageAsBitmap(self, flip=True):
flags = CV_CVTIMG_SWAP_RB
if flip:
flags |= CV_CVTIMG_FLIP
cvConvertImage(self, self, flags)
return wx.BitmapFromBuffer(self.width, self.height, self.data_as_string())
IplImage.as_wx_bitmap = cvIplImageAsBitmap
__all__ += ['cvIplImageAsBitmap']
except ImportError:
pass
#-----------------------------------------------------------------------------
# PIL -- by Jeremy Bethmont
#-----------------------------------------------------------------------------
try:
from PIL import Image
from cv import cvCvtColor, CV_RGB2BGR
def pil_to_ipl(im_pil):
im_ipl = cvCreateImageHeader(cvSize(im_pil.size[0], im_pil.size[1]),
IPL_DEPTH_8U, 3)
data = im_pil.tostring('raw', 'RGB', im_pil.size[0] * 3)
cvSetData(im_ipl, cast(data, POINTER(c_byte)), im_pil.size[0] * 3)
cvCvtColor(im_ipl, im_ipl, CV_RGB2BGR)
im_ipl._depends = (data,)
return im_ipl
def ipl_to_pil(im_ipl):
size = (im_ipl.width, im_ipl.height)
data = im_ipl.data_as_string()
im_pil = Image.fromstring(
"RGB", size, data,
'raw', "BGR", im_ipl.widthStep
)
return im_pil
__all__ += ['ipl_to_pil', 'pil_to_ipl']
except ImportError:
pass
#-----------------------------------------------------------------------------
# numpy's ndarray -- by Minh-Tri Pham
#-----------------------------------------------------------------------------
try:
import numpy
# create a read/write buffer from memory
from_memory = pythonapi.PyBuffer_FromReadWriteMemory
from_memory.restype = py_object
def as_numpy_2darray(ctypes_ptr, width_step, width, height, dtypename, nchannels=1):
esize = numpy.dtype(dtypename).itemsize
if width_step == 0:
width_step = width*esize
buf = from_memory(ctypes_ptr, width_step*height)
arr = numpy.frombuffer(buf, dtype=dtypename, count=width*nchannels*height)
if nchannels > 1:
arr = arr.reshape(height, width, nchannels)
arr.strides = (width_step, esize*nchannels, esize)
else:
arr = arr.reshape(height, width)
arr.strides = (width_step, esize)
return arr
ipldepth2dtype = {
IPL_DEPTH_1U: numpy.bool,
IPL_DEPTH_8U: numpy.uint8,
IPL_DEPTH_8S: numpy.int8,
IPL_DEPTH_16U: numpy.uint16,
IPL_DEPTH_16S: numpy.int16,
IPL_DEPTH_32S: numpy.int32,
IPL_DEPTH_32F: numpy.float32,
IPL_DEPTH_64F: numpy.float64,
}
def _iplimage_as_numpy_array(self):
"""Converts an IplImage into ndarray"""
return as_numpy_2darray(self.imageData, self.widthStep, self.width, self.height, ipldepth2dtype[self.depth], self.nChannels)
IplImage.as_numpy_array = _iplimage_as_numpy_array
def cvCreateImageFromNumpyArray(a):
"""Creates an IplImage from a numpy array. Raises TypeError if not successful.
Inline function: cvGetImage(cvCreateMatFromNumpyArray(a))
"""
return cvGetImage(cvCreateMatFromNumpyArray(a))
mat_to_dtype = {
CV_8U : numpy.uint8,
CV_8S : numpy.int8,
CV_16U : numpy.uint16,
CV_16S : numpy.int16,
CV_32S : numpy.int32,
CV_32F : numpy.float32,
CV_64F : numpy.float64,
}
dtype_to_mat = {
numpy.dtype('uint8') : 'CV_8U',
numpy.dtype('int8') : 'CV_8S',
numpy.dtype('uint16') : 'CV_16U',
numpy.dtype('int16') : 'CV_16S',
numpy.dtype('int32') : 'CV_32S',
numpy.dtype('float32') : 'CV_32F',
numpy.dtype('float64') : 'CV_64F',
}
def _cvmat_as_numpy_array(self):
"""Converts a CvMat into ndarray"""
return as_numpy_2darray(self.data.ptr, self.step, self.cols, self.rows, mat_to_dtype[CV_MAT_DEPTH(self.type)], CV_MAT_CN(self.type))
CvMat.as_numpy_array = _cvmat_as_numpy_array
def cvCreateMatFromNumpyArray(arr):
"""Creates a CvMat from a numpy array. Raises TypeError if not successful.
The numpy array must be of rank 1 or 2.
If it is of rank 1, it is converted into a row vector.
If it is of rank 2, it is converted into a matrix.
"""
if not isinstance(arr, numpy.ndarray):
raise TypeError("'a' is not a numpy ndarray.")
shape = arr.shape
rank = len(shape)
if rank == 1:
shape = (shape[0], 1,1)
elif rank == 2:
shape = (shape[0], shape[1], 1,)
height, width, depth = shape
mat_type = dtype_to_mat[arr.dtype] + "C%d" % depth
b = cvMat(height, width, getattr(cxcore, mat_type), arr.ctypes.data)
b.depends = (arr,)
return b
def _cvmatnd_as_numpy_array(self):
"""Converts a CvMatND into ndarray"""
nc = CV_MAT_CN(self.type)
dtypename = mat_to_dtype[CV_MAT_DEPTH(self.type)]
esize = numpy.dtype(dtypename).itemsize
sd = self.dim[:self.dims]
strides = [x.step for x in sd]
size = [x.size for x in sd]
if nc > 1:
strides += [esize]
size += [nc]
buf = from_memory(self.data.ptr, strides[0]*size[0])
arr = numpy.frombuffer(buf, dtype=dtypename, count=numpy.prod(size)).reshape(size)
arr.strides = tuple(strides)
return arr
CvMatND.as_numpy_array = _cvmatnd_as_numpy_array
__all__ += ['cvCreateImageFromNumpyArray', 'cvCreateMatFromNumpyArray']
except ImportError:
pass
def cvCreateImageAs(src):
return cvCreateImage((src.width, src.height), src.depth, src.nChannels)
def cvCopyImg(src, dest = None):
if dest is None:
dest = cvCreateImageAs(src)
cvCopy(src, dest)
return dest
|
|
from django.contrib.admin.filterspecs import FilterSpec
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.util import quote
from django.core.paginator import Paginator, InvalidPage
from django.db import models
from django.utils.encoding import force_unicode, smart_str
from django.utils.translation import ugettext
from django.utils.http import urlencode
import operator
# The system will display a "Show all" link on the change list only if the
# total result count is less than or equal to this setting.
MAX_SHOW_ALL_ALLOWED = 200
# Changelist settings
ALL_VAR = 'all'
ORDER_VAR = 'o'
ORDER_TYPE_VAR = 'ot'
PAGE_VAR = 'p'
SEARCH_VAR = 'q'
TO_FIELD_VAR = 't'
IS_POPUP_VAR = 'pop'
ERROR_FLAG = 'e'
# Text to display within change-list table cells if the value is blank.
EMPTY_CHANGELIST_VALUE = '(None)'
class ChangeList(object):
def __init__(self, request, model, list_display, list_display_links, list_filter, date_hierarchy, search_fields, list_select_related, list_per_page, list_editable, model_admin):
self.model = model
self.opts = model._meta
self.lookup_opts = self.opts
self.root_query_set = model_admin.queryset(request)
self.list_display = list_display
self.list_display_links = list_display_links
self.list_filter = list_filter
self.date_hierarchy = date_hierarchy
self.search_fields = search_fields
self.list_select_related = list_select_related
self.list_per_page = list_per_page
self.list_editable = list_editable
self.model_admin = model_admin
# Get search parameters from the query string.
try:
self.page_num = int(request.GET.get(PAGE_VAR, 0))
except ValueError:
self.page_num = 0
self.show_all = ALL_VAR in request.GET
self.is_popup = IS_POPUP_VAR in request.GET
self.to_field = request.GET.get(TO_FIELD_VAR)
self.params = dict(request.GET.items())
if PAGE_VAR in self.params:
del self.params[PAGE_VAR]
if TO_FIELD_VAR in self.params:
del self.params[TO_FIELD_VAR]
if ERROR_FLAG in self.params:
del self.params[ERROR_FLAG]
self.order_field, self.order_type = self.get_ordering()
self.query = request.GET.get(SEARCH_VAR, '')
self.query_set = self.get_query_set()
self.get_results(request)
self.title = (self.is_popup and ugettext('Select %s') % force_unicode(self.opts.verbose_name) or ugettext('Select %s to change') % force_unicode(self.opts.verbose_name))
self.filter_specs, self.has_filters = self.get_filters(request)
self.pk_attname = self.lookup_opts.pk.attname
def get_filters(self, request):
filter_specs = []
if self.list_filter:
filter_fields = [self.lookup_opts.get_field(field_name) for field_name in self.list_filter]
for f in filter_fields:
spec = FilterSpec.create(f, request, self.params, self.model, self.model_admin)
if spec and spec.has_output():
filter_specs.append(spec)
return filter_specs, bool(filter_specs)
def get_query_string(self, new_params=None, remove=None):
if new_params is None: new_params = {}
if remove is None: remove = []
p = self.params.copy()
for r in remove:
for k in p.keys():
if k.startswith(r):
del p[k]
for k, v in new_params.items():
if v is None:
if k in p:
del p[k]
else:
p[k] = v
return '?%s' % urlencode(p)
def get_results(self, request):
paginator = Paginator(self.query_set, self.list_per_page)
# Get the number of objects, with admin filters applied.
result_count = paginator.count
# Get the total number of objects, with no admin filters applied.
# Perform a slight optimization: Check to see whether any filters were
# given. If not, use paginator.hits to calculate the number of objects,
# because we've already done paginator.hits and the value is cached.
if not self.query_set.query.where:
full_result_count = result_count
else:
full_result_count = self.root_query_set.count()
can_show_all = result_count <= MAX_SHOW_ALL_ALLOWED
multi_page = result_count > self.list_per_page
# Get the list of objects to display on this page.
if (self.show_all and can_show_all) or not multi_page:
result_list = self.query_set._clone()
else:
try:
result_list = paginator.page(self.page_num+1).object_list
except InvalidPage:
result_list = ()
self.result_count = result_count
self.full_result_count = full_result_count
self.result_list = result_list
self.can_show_all = can_show_all
self.multi_page = multi_page
self.paginator = paginator
def get_ordering(self):
lookup_opts, params = self.lookup_opts, self.params
# For ordering, first check the "ordering" parameter in the admin
# options, then check the object's default ordering. If neither of
# those exist, order descending by ID by default. Finally, look for
# manually-specified ordering from the query string.
ordering = self.model_admin.ordering or lookup_opts.ordering or ['-' + lookup_opts.pk.name]
if ordering[0].startswith('-'):
order_field, order_type = ordering[0][1:], 'desc'
else:
order_field, order_type = ordering[0], 'asc'
if ORDER_VAR in params:
try:
field_name = self.list_display[int(params[ORDER_VAR])]
try:
f = lookup_opts.get_field(field_name)
except models.FieldDoesNotExist:
# See whether field_name is a name of a non-field
# that allows sorting.
try:
if callable(field_name):
attr = field_name
elif hasattr(self.model_admin, field_name):
attr = getattr(self.model_admin, field_name)
else:
attr = getattr(self.model, field_name)
order_field = attr.admin_order_field
except AttributeError:
pass
else:
order_field = f.name
except (IndexError, ValueError):
pass # Invalid ordering specified. Just use the default.
if ORDER_TYPE_VAR in params and params[ORDER_TYPE_VAR] in ('asc', 'desc'):
order_type = params[ORDER_TYPE_VAR]
return order_field, order_type
def get_query_set(self):
qs = self.root_query_set
lookup_params = self.params.copy() # a dictionary of the query string
for i in (ALL_VAR, ORDER_VAR, ORDER_TYPE_VAR, SEARCH_VAR, IS_POPUP_VAR):
if i in lookup_params:
del lookup_params[i]
for key, value in lookup_params.items():
if not isinstance(key, str):
# 'key' will be used as a keyword argument later, so Python
# requires it to be a string.
del lookup_params[key]
lookup_params[smart_str(key)] = value
# if key ends with __in, split parameter into separate values
if key.endswith('__in'):
lookup_params[key] = value.split(',')
# if key ends with __isnull, special case '' and false
if key.endswith('__isnull'):
if value.lower() in ('', 'false'):
lookup_params[key] = False
else:
lookup_params[key] = True
# Apply lookup parameters from the query string.
try:
qs = qs.filter(**lookup_params)
# Naked except! Because we don't have any other way of validating "params".
# They might be invalid if the keyword arguments are incorrect, or if the
# values are not in the correct type, so we might get FieldError, ValueError,
# ValicationError, or ? from a custom field that raises yet something else
# when handed impossible data.
except:
raise IncorrectLookupParameters
# Use select_related() if one of the list_display options is a field
# with a relationship and the provided queryset doesn't already have
# select_related defined.
if not qs.query.select_related:
if self.list_select_related:
qs = qs.select_related()
else:
for field_name in self.list_display:
try:
f = self.lookup_opts.get_field(field_name)
except models.FieldDoesNotExist:
pass
else:
if isinstance(f.rel, models.ManyToOneRel):
qs = qs.select_related()
break
# Set ordering.
if self.order_field:
qs = qs.order_by('%s%s' % ((self.order_type == 'desc' and '-' or ''), self.order_field))
# Apply keyword searches.
def construct_search(field_name):
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
elif field_name.startswith('@'):
return "%s__search" % field_name[1:]
else:
return "%s__icontains" % field_name
if self.search_fields and self.query:
for bit in self.query.split():
or_queries = [models.Q(**{construct_search(str(field_name)): bit}) for field_name in self.search_fields]
qs = qs.filter(reduce(operator.or_, or_queries))
for field_name in self.search_fields:
if '__' in field_name:
qs = qs.distinct()
break
return qs
def url_for_result(self, result):
return "%s/" % quote(getattr(result, self.pk_attname))
|
|
# Copyright 2012 Locaweb.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# based on
# https://github.com/openstack/nova/blob/master/nova/network/linux_net.py
"""Implements iptables rules using linux utilities."""
import contextlib
import os
import re
import sys
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from neutron.agent.common import config
from neutron.agent.linux import iptables_comments as ic
from neutron.agent.linux import utils as linux_utils
from neutron.common import exceptions as n_exc
from neutron.common import utils
from neutron.i18n import _LE, _LW
LOG = logging.getLogger(__name__)
config.register_iptables_opts(cfg.CONF)
# NOTE(vish): Iptables supports chain names of up to 28 characters, and we
# add up to 12 characters to binary_name which is used as a prefix,
# so we limit it to 16 characters.
# (max_chain_name_length - len('-POSTROUTING') == 16)
def get_binary_name():
"""Grab the name of the binary we're running in."""
return os.path.basename(sys.argv[0])[:16].replace(' ', '_')
binary_name = get_binary_name()
# A length of a chain name must be less than or equal to 11 characters.
# <max length of iptables chain name> - (<binary_name> + '-') = 28-(16+1) = 11
MAX_CHAIN_LEN_WRAP = 11
MAX_CHAIN_LEN_NOWRAP = 28
# Number of iptables rules to print before and after a rule that causes a
# a failure during iptables-restore
IPTABLES_ERROR_LINES_OF_CONTEXT = 5
def comment_rule(rule, comment):
if not cfg.CONF.AGENT.comment_iptables_rules or not comment:
return rule
return '%s -m comment --comment "%s"' % (rule, comment)
def get_chain_name(chain_name, wrap=True):
if wrap:
return chain_name[:MAX_CHAIN_LEN_WRAP]
else:
return chain_name[:MAX_CHAIN_LEN_NOWRAP]
class IptablesRule(object):
"""An iptables rule.
You shouldn't need to use this class directly, it's only used by
IptablesManager.
"""
def __init__(self, chain, rule, wrap=True, top=False,
binary_name=binary_name, tag=None, comment=None):
self.chain = get_chain_name(chain, wrap)
self.rule = rule
self.wrap = wrap
self.top = top
self.wrap_name = binary_name[:16]
self.tag = tag
self.comment = comment
def __eq__(self, other):
return ((self.chain == other.chain) and
(self.rule == other.rule) and
(self.top == other.top) and
(self.wrap == other.wrap))
def __ne__(self, other):
return not self == other
def __str__(self):
if self.wrap:
chain = '%s-%s' % (self.wrap_name, self.chain)
else:
chain = self.chain
return comment_rule('-A %s %s' % (chain, self.rule), self.comment)
class IptablesTable(object):
"""An iptables table."""
def __init__(self, binary_name=binary_name):
self.rules = []
self.remove_rules = []
self.chains = set()
self.unwrapped_chains = set()
self.remove_chains = set()
self.wrap_name = binary_name[:16]
def add_chain(self, name, wrap=True):
"""Adds a named chain to the table.
The chain name is wrapped to be unique for the component creating
it, so different components of Nova can safely create identically
named chains without interfering with one another.
At the moment, its wrapped name is <binary name>-<chain name>,
so if neutron-openvswitch-agent creates a chain named 'OUTPUT',
it'll actually end up being named 'neutron-openvswi-OUTPUT'.
"""
name = get_chain_name(name, wrap)
if wrap:
self.chains.add(name)
else:
self.unwrapped_chains.add(name)
def _select_chain_set(self, wrap):
if wrap:
return self.chains
else:
return self.unwrapped_chains
def remove_chain(self, name, wrap=True):
"""Remove named chain.
This removal "cascades". All rule in the chain are removed, as are
all rules in other chains that jump to it.
If the chain is not found, this is merely logged.
"""
name = get_chain_name(name, wrap)
chain_set = self._select_chain_set(wrap)
if name not in chain_set:
LOG.debug('Attempted to remove chain %s which does not exist',
name)
return
chain_set.remove(name)
if not wrap:
# non-wrapped chains and rules need to be dealt with specially,
# so we keep a list of them to be iterated over in apply()
self.remove_chains.add(name)
# first, add rules to remove that have a matching chain name
self.remove_rules += [r for r in self.rules if r.chain == name]
# next, remove rules from list that have a matching chain name
self.rules = [r for r in self.rules if r.chain != name]
if not wrap:
jump_snippet = '-j %s' % name
# next, add rules to remove that have a matching jump chain
self.remove_rules += [r for r in self.rules
if jump_snippet in r.rule]
else:
jump_snippet = '-j %s-%s' % (self.wrap_name, name)
# finally, remove rules from list that have a matching jump chain
self.rules = [r for r in self.rules
if jump_snippet not in r.rule]
def add_rule(self, chain, rule, wrap=True, top=False, tag=None,
comment=None):
"""Add a rule to the table.
This is just like what you'd feed to iptables, just without
the '-A <chain name>' bit at the start.
However, if you need to jump to one of your wrapped chains,
prepend its name with a '$' which will ensure the wrapping
is applied correctly.
"""
chain = get_chain_name(chain, wrap)
if wrap and chain not in self.chains:
raise LookupError(_('Unknown chain: %r') % chain)
if '$' in rule:
rule = ' '.join(
self._wrap_target_chain(e, wrap) for e in rule.split(' '))
self.rules.append(IptablesRule(chain, rule, wrap, top, self.wrap_name,
tag, comment))
def _wrap_target_chain(self, s, wrap):
if s.startswith('$'):
s = ('%s-%s' % (self.wrap_name, get_chain_name(s[1:], wrap)))
return s
def remove_rule(self, chain, rule, wrap=True, top=False, comment=None):
"""Remove a rule from a chain.
Note: The rule must be exactly identical to the one that was added.
You cannot switch arguments around like you can with the iptables
CLI tool.
"""
chain = get_chain_name(chain, wrap)
try:
if '$' in rule:
rule = ' '.join(
self._wrap_target_chain(e, wrap) for e in rule.split(' '))
self.rules.remove(IptablesRule(chain, rule, wrap, top,
self.wrap_name,
comment=comment))
if not wrap:
self.remove_rules.append(IptablesRule(chain, rule, wrap, top,
self.wrap_name,
comment=comment))
except ValueError:
LOG.warn(_LW('Tried to remove rule that was not there:'
' %(chain)r %(rule)r %(wrap)r %(top)r'),
{'chain': chain, 'rule': rule,
'top': top, 'wrap': wrap})
def _get_chain_rules(self, chain, wrap):
chain = get_chain_name(chain, wrap)
return [rule for rule in self.rules
if rule.chain == chain and rule.wrap == wrap]
def empty_chain(self, chain, wrap=True):
"""Remove all rules from a chain."""
chained_rules = self._get_chain_rules(chain, wrap)
for rule in chained_rules:
self.rules.remove(rule)
def clear_rules_by_tag(self, tag):
if not tag:
return
rules = [rule for rule in self.rules if rule.tag == tag]
for rule in rules:
self.rules.remove(rule)
class IptablesManager(object):
"""Wrapper for iptables.
See IptablesTable for some usage docs
A number of chains are set up to begin with.
First, neutron-filter-top. It's added at the top of FORWARD and OUTPUT.
Its name is not wrapped, so it's shared between the various neutron
workers. It's intended for rules that need to live at the top of the
FORWARD and OUTPUT chains. It's in both the ipv4 and ipv6 set of tables.
For ipv4 and ipv6, the built-in INPUT, OUTPUT, and FORWARD filter chains
are wrapped, meaning that the "real" INPUT chain has a rule that jumps to
the wrapped INPUT chain, etc. Additionally, there's a wrapped chain named
"local" which is jumped to from neutron-filter-top.
For ipv4, the built-in PREROUTING, OUTPUT, and POSTROUTING nat chains are
wrapped in the same was as the built-in filter chains. Additionally,
there's a snat chain that is applied after the POSTROUTING chain.
"""
def __init__(self, _execute=None, state_less=False, use_ipv6=False,
namespace=None, binary_name=binary_name):
if _execute:
self.execute = _execute
else:
self.execute = linux_utils.execute
self.use_ipv6 = use_ipv6
self.namespace = namespace
self.iptables_apply_deferred = False
self.wrap_name = binary_name[:16]
self.ipv4 = {'filter': IptablesTable(binary_name=self.wrap_name)}
self.ipv6 = {'filter': IptablesTable(binary_name=self.wrap_name)}
# Add a neutron-filter-top chain. It's intended to be shared
# among the various neutron components. It sits at the very top
# of FORWARD and OUTPUT.
for tables in [self.ipv4, self.ipv6]:
tables['filter'].add_chain('neutron-filter-top', wrap=False)
tables['filter'].add_rule('FORWARD', '-j neutron-filter-top',
wrap=False, top=True)
tables['filter'].add_rule('OUTPUT', '-j neutron-filter-top',
wrap=False, top=True)
tables['filter'].add_chain('local')
tables['filter'].add_rule('neutron-filter-top', '-j $local',
wrap=False)
# Wrap the built-in chains
builtin_chains = {4: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']},
6: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}}
if not state_less:
self.ipv4.update(
{'mangle': IptablesTable(binary_name=self.wrap_name)})
builtin_chains[4].update(
{'mangle': ['PREROUTING', 'INPUT', 'FORWARD', 'OUTPUT',
'POSTROUTING']})
self.ipv4.update(
{'nat': IptablesTable(binary_name=self.wrap_name)})
builtin_chains[4].update({'nat': ['PREROUTING',
'OUTPUT', 'POSTROUTING']})
self.ipv4.update({'raw': IptablesTable(binary_name=self.wrap_name)})
builtin_chains[4].update({'raw': ['PREROUTING', 'OUTPUT']})
self.ipv6.update({'raw': IptablesTable(binary_name=self.wrap_name)})
builtin_chains[6].update({'raw': ['PREROUTING', 'OUTPUT']})
for ip_version in builtin_chains:
if ip_version == 4:
tables = self.ipv4
elif ip_version == 6:
tables = self.ipv6
for table, chains in builtin_chains[ip_version].iteritems():
for chain in chains:
tables[table].add_chain(chain)
tables[table].add_rule(chain, '-j $%s' %
(chain), wrap=False)
if not state_less:
# Add a neutron-postrouting-bottom chain. It's intended to be
# shared among the various neutron components. We set it as the
# last chain of POSTROUTING chain.
self.ipv4['nat'].add_chain('neutron-postrouting-bottom',
wrap=False)
self.ipv4['nat'].add_rule('POSTROUTING',
'-j neutron-postrouting-bottom',
wrap=False)
# We add a snat chain to the shared neutron-postrouting-bottom
# chain so that it's applied last.
self.ipv4['nat'].add_chain('snat')
self.ipv4['nat'].add_rule('neutron-postrouting-bottom',
'-j $snat', wrap=False,
comment=ic.SNAT_OUT)
# And then we add a float-snat chain and jump to first thing in
# the snat chain.
self.ipv4['nat'].add_chain('float-snat')
self.ipv4['nat'].add_rule('snat', '-j $float-snat')
# Add a mark chain to mangle PREROUTING chain. It is used to
# identify ingress packets from a certain interface.
self.ipv4['mangle'].add_chain('mark')
self.ipv4['mangle'].add_rule('PREROUTING', '-j $mark')
def get_chain(self, table, chain, ip_version=4, wrap=True):
try:
requested_table = {4: self.ipv4, 6: self.ipv6}[ip_version][table]
except KeyError:
return []
return requested_table._get_chain_rules(chain, wrap)
def is_chain_empty(self, table, chain, ip_version=4, wrap=True):
return not self.get_chain(table, chain, ip_version, wrap)
@contextlib.contextmanager
def defer_apply(self):
"""Defer apply context."""
self.defer_apply_on()
try:
yield
finally:
try:
self.defer_apply_off()
except Exception:
msg = _LE('Failure applying iptables rules')
LOG.exception(msg)
raise n_exc.IpTablesApplyException(msg)
def defer_apply_on(self):
self.iptables_apply_deferred = True
def defer_apply_off(self):
self.iptables_apply_deferred = False
self._apply()
def apply(self):
if self.iptables_apply_deferred:
return
self._apply()
def _apply(self):
lock_name = 'iptables'
if self.namespace:
lock_name += '-' + self.namespace
try:
with lockutils.lock(lock_name, utils.SYNCHRONIZED_PREFIX, True):
LOG.debug('Got semaphore / lock "%s"', lock_name)
return self._apply_synchronized()
finally:
LOG.debug('Semaphore / lock released "%s"', lock_name)
def _apply_synchronized(self):
"""Apply the current in-memory set of iptables rules.
This will blow away any rules left over from previous runs of the
same component of Nova, and replace them with our current set of
rules. This happens atomically, thanks to iptables-restore.
"""
s = [('iptables', self.ipv4)]
if self.use_ipv6:
s += [('ip6tables', self.ipv6)]
for cmd, tables in s:
args = ['%s-save' % (cmd,), '-c']
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
all_tables = self.execute(args, run_as_root=True)
all_lines = all_tables.split('\n')
# Traverse tables in sorted order for predictable dump output
for table_name in sorted(tables):
table = tables[table_name]
start, end = self._find_table(all_lines, table_name)
all_lines[start:end] = self._modify_rules(
all_lines[start:end], table, table_name)
args = ['%s-restore' % (cmd,), '-c']
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
try:
self.execute(args, process_input='\n'.join(all_lines),
run_as_root=True)
except RuntimeError as r_error:
with excutils.save_and_reraise_exception():
try:
line_no = int(re.search(
'iptables-restore: line ([0-9]+?) failed',
str(r_error)).group(1))
context = IPTABLES_ERROR_LINES_OF_CONTEXT
log_start = max(0, line_no - context)
log_end = line_no + context
except AttributeError:
# line error wasn't found, print all lines instead
log_start = 0
log_end = len(all_lines)
log_lines = ('%7d. %s' % (idx, l)
for idx, l in enumerate(
all_lines[log_start:log_end],
log_start + 1)
)
LOG.error(_LE("IPTablesManager.apply failed to apply the "
"following set of iptables rules:\n%s"),
'\n'.join(log_lines))
LOG.debug("IPTablesManager.apply completed with success")
def _find_table(self, lines, table_name):
if len(lines) < 3:
# length only <2 when fake iptables
return (0, 0)
try:
start = lines.index('*%s' % table_name) - 1
except ValueError:
# Couldn't find table_name
LOG.debug('Unable to find table %s', table_name)
return (0, 0)
end = lines[start:].index('COMMIT') + start + 2
return (start, end)
def _find_rules_index(self, lines):
seen_chains = False
rules_index = 0
for rules_index, rule in enumerate(lines):
if not seen_chains:
if rule.startswith(':'):
seen_chains = True
else:
if not rule.startswith(':'):
break
if not seen_chains:
rules_index = 2
return rules_index
def _find_last_entry(self, filter_list, match_str):
# find a matching entry, starting from the bottom
for s in reversed(filter_list):
s = s.strip()
if match_str in s:
return s
def _modify_rules(self, current_lines, table, table_name):
# Chains are stored as sets to avoid duplicates.
# Sort the output chains here to make their order predictable.
unwrapped_chains = sorted(table.unwrapped_chains)
chains = sorted(table.chains)
remove_chains = table.remove_chains
rules = table.rules
remove_rules = table.remove_rules
if not current_lines:
fake_table = ['# Generated by iptables_manager',
'*' + table_name, 'COMMIT',
'# Completed by iptables_manager']
current_lines = fake_table
# Fill old_filter with any chains or rules we might have added,
# they could have a [packet:byte] count we want to preserve.
# Fill new_filter with any chains or rules without our name in them.
old_filter, new_filter = [], []
for line in current_lines:
(old_filter if self.wrap_name in line else
new_filter).append(line.strip())
rules_index = self._find_rules_index(new_filter)
all_chains = [':%s' % name for name in unwrapped_chains]
all_chains += [':%s-%s' % (self.wrap_name, name) for name in chains]
# Iterate through all the chains, trying to find an existing
# match.
our_chains = []
for chain in all_chains:
chain_str = str(chain).strip()
old = self._find_last_entry(old_filter, chain_str)
if not old:
dup = self._find_last_entry(new_filter, chain_str)
new_filter = [s for s in new_filter if chain_str not in s.strip()]
# if no old or duplicates, use original chain
if old or dup:
chain_str = str(old or dup)
else:
# add-on the [packet:bytes]
chain_str += ' - [0:0]'
our_chains += [chain_str]
# Iterate through all the rules, trying to find an existing
# match.
our_rules = []
bot_rules = []
for rule in rules:
rule_str = str(rule).strip()
# Further down, we weed out duplicates from the bottom of the
# list, so here we remove the dupes ahead of time.
old = self._find_last_entry(old_filter, rule_str)
if not old:
dup = self._find_last_entry(new_filter, rule_str)
new_filter = [s for s in new_filter if rule_str not in s.strip()]
# if no old or duplicates, use original rule
if old or dup:
rule_str = str(old or dup)
# backup one index so we write the array correctly
if not old:
rules_index -= 1
else:
# add-on the [packet:bytes]
rule_str = '[0:0] ' + rule_str
if rule.top:
# rule.top == True means we want this rule to be at the top.
our_rules += [rule_str]
else:
bot_rules += [rule_str]
our_rules += bot_rules
new_filter[rules_index:rules_index] = our_rules
new_filter[rules_index:rules_index] = our_chains
def _strip_packets_bytes(line):
# strip any [packet:byte] counts at start or end of lines
if line.startswith(':'):
# it's a chain, for example, ":neutron-billing - [0:0]"
line = line.split(':')[1]
line = line.split(' - [', 1)[0]
elif line.startswith('['):
# it's a rule, for example, "[0:0] -A neutron-billing..."
line = line.split('] ', 1)[1]
line = line.strip()
return line
seen_chains = set()
def _weed_out_duplicate_chains(line):
# ignore [packet:byte] counts at end of lines
if line.startswith(':'):
line = _strip_packets_bytes(line)
if line in seen_chains:
return False
else:
seen_chains.add(line)
# Leave it alone
return True
seen_rules = set()
def _weed_out_duplicate_rules(line):
if line.startswith('['):
line = _strip_packets_bytes(line)
if line in seen_rules:
return False
else:
seen_rules.add(line)
# Leave it alone
return True
def _weed_out_removes(line):
# We need to find exact matches here
if line.startswith(':'):
line = _strip_packets_bytes(line)
for chain in remove_chains:
if chain == line:
remove_chains.remove(chain)
return False
elif line.startswith('['):
line = _strip_packets_bytes(line)
for rule in remove_rules:
rule_str = _strip_packets_bytes(str(rule))
if rule_str == line:
remove_rules.remove(rule)
return False
# Leave it alone
return True
# We filter duplicates. Go through the chains and rules, letting
# the *last* occurrence take precedence since it could have a
# non-zero [packet:byte] count we want to preserve. We also filter
# out anything in the "remove" list.
new_filter.reverse()
new_filter = [line for line in new_filter
if _weed_out_duplicate_chains(line) and
_weed_out_duplicate_rules(line) and
_weed_out_removes(line)]
new_filter.reverse()
# flush lists, just in case we didn't find something
remove_chains.clear()
for rule in remove_rules:
remove_rules.remove(rule)
return new_filter
def _get_traffic_counters_cmd_tables(self, chain, wrap=True):
name = get_chain_name(chain, wrap)
cmd_tables = [('iptables', key) for key, table in self.ipv4.items()
if name in table._select_chain_set(wrap)]
if self.use_ipv6:
cmd_tables += [('ip6tables', key)
for key, table in self.ipv6.items()
if name in table._select_chain_set(wrap)]
return cmd_tables
def get_traffic_counters(self, chain, wrap=True, zero=False):
"""Return the sum of the traffic counters of all rules of a chain."""
cmd_tables = self._get_traffic_counters_cmd_tables(chain, wrap)
if not cmd_tables:
LOG.warn(_LW('Attempted to get traffic counters of chain %s which '
'does not exist'), chain)
return
name = get_chain_name(chain, wrap)
acc = {'pkts': 0, 'bytes': 0}
for cmd, table in cmd_tables:
args = [cmd, '-t', table, '-L', name, '-n', '-v', '-x']
if zero:
args.append('-Z')
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
current_table = self.execute(args, run_as_root=True)
current_lines = current_table.split('\n')
for line in current_lines[2:]:
if not line:
break
data = line.split()
if (len(data) < 2 or
not data[0].isdigit() or
not data[1].isdigit()):
break
acc['pkts'] += int(data[0])
acc['bytes'] += int(data[1])
return acc
|
|
'''
Filename: OffOnlineMode.py
Description: offline and online mode
1) auto. Change from offline to online mode after meet a 90% full
@author: yan, chungyan5@gmail.com, 2Store
'''
## import other libraries
##################################################
### logging library
import logging
### file names matching library and file system searching
import fnmatch
import os
#import scandir # using scandir lib. instead of standard os due to more faster speed
import shutil
### configuration file parser library
import ConfigParser
### global variables
import globalMod
### execute linux command
import subprocess
### error handling
import errno
## create logging
##################################################
serverModLogger = logging.getLogger('ServerMon.OffOnlineMode')
## this module variables
##################################################
## this module class
##################################################
class OffOnlineMode(object):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
## find the corresponding Pool folder (.../user/files/...) from Sync device folder
## syncDeviceFolder -- sync device folder path
## return the path at pool
##################################################
def findCorrPoolFolder(self, syncDeviceFolder):
path_list = syncDeviceFolder.split(os.sep)
i = path_list.index(globalMod.SYNC_DEVICES)
path_list.pop(i+1)
path_list.remove(globalMod.SYNC_DEVICES)
return os.sep.join(path_list)
## Description: move data from src to dest
## - avoiding overwrite file, just skip this moving process
## eg. mv src/sameFileName dest/sameFileName
## or mv src/fileName dest/anotherFileName
## or mv src/fileName dest/anotherFolder where dest/anotherFolder contains sameFileName (dest/anotherFolder/sameFileName)
## i.e. keep all of them in src/sameFileName, dest/sameFileName, dest/anotherFileName and dest/anotherFolder/sameFileName
## AVOID: standard posix will overwrite the dest/sameFileName or dest/anotherFileName or dest/anotherFolder/sameFileName by src/fileName
## - if same folder name(src/oneFolder and dest/oneFolder), move files inside src folder one by one
## eg. mv src/oneFolder dest/oneFolder,
## i.e. mv src/oneFolder/* to dest/oneFolder/*
## AVOID: standard posix will mv src/oneFolder to dest/oneFolder/oneFolder
## Testing Cases:
## - move src/fileName and dest/fileName
## * move src/sameFileName and dest/sameFileName; skip standard process and do nothing
## * move src/fileName and dest/anotherFileName; skip standard process and do nothing
## - move src/folderName and dest/fileName; standard process will raise err and do nothing
## - move src/fileName and dest/folderName; skip standard process and chk dest/folderName/content
## - move src/folderName and dest/folderName
## * move src/sameFolderName and dest/sameFolderName; skip standard process and merge both Folders content into dest/sameFolderName
## * move src/folderName and dest/anotherFolderName; standard process, rise err and do nothing when dest/folderName/subFolderName same to srcFolderName
## pseudo code:
## if both are files, do nothing
## else if src is file and dest is folder
## list dest content
## if have same file name by scanning, do nothing
## if do not have same file name after scanning, do moving
## else if both are folders and same name
## list src content
## one by one of src content to call this function again as file/folder to folder
## if src folder is empty, remove it
## else standard process
## catch the exceptional errors, then skip them
##################################################
def mvData(self, srcPath, destPath):
# get full path src file/folder and dest file/folder
sDirFlag = os.path.isdir(srcPath) # get file or folder attribute
dDirFlag = os.path.isdir(destPath)
srcName = os.path.basename(srcPath) # remove path and get base name only
destName = os.path.basename(destPath)
# /srcFileName and /destFileName
if (sDirFlag | dDirFlag) == 0:
serverModLogger.debug("both as files %s %s", srcPath, destPath)
# do nothing
pass
# /srcFileName and /destFolderName
elif (sDirFlag==0) & (dDirFlag==1):
serverModLogger.debug("/srcFileName and /destFolderName %s %s", srcPath, destPath)
# list dest content
for destSubFileFolderName in os.listdir(destPath):
serverModLogger.debug("%s at list dest dir %s", destSubFileFolderName, destPath)
# if have same file name(/srcFileName == /destFolderName/destSubFileOrFolderName), do nothing
serverModLogger.debug("destFileFolderName %s and srcName %s", destSubFileFolderName, srcName)
if fnmatch.fnmatch(destSubFileFolderName, srcName):
serverModLogger.debug("/srcFileName == /destFolderName/destSubFileOrFolderName, so do nothing")
return
# if do not have same file name after scanning, do moving
serverModLogger.debug("shutil.move(%s, %s)", srcPath, destPath)
shutil.move(srcPath, destPath) # this function will mv file, soft-link
# /sameSrcFolderName == /sameDestFolderName
elif (sDirFlag & dDirFlag) & (srcName == destName):
serverModLogger.debug("/sameSrcFolderName == /sameDestFolderName %s %s", srcPath, destPath)
# list src content
for srcSubFileFolderName in os.listdir(srcPath):
serverModLogger.debug("%s at list src dir %s %s", srcSubFileFolderName, srcPath, destPath)
# one by one of src content to call this function again as file/folder to folder
self.mvData(os.path.join(srcPath, srcSubFileFolderName), destPath)
# all others are using standard process
try:
os.rmdir(srcPath)
except OSError as ose:
if ose.errno == errno.ENOTEMPTY:
serverModLogger.debug("cannot remove %s due to directory not empty", srcPath)
# all others are using standard process
else:
try:
shutil.move(srcPath, destPath)
except OSError as ose:
serverModLogger.debug("OSError %s", ose)
except shutil.Error as shutilErr:
serverModLogger.debug("Error in dest/folderName/subFolderName same to srcFolderName: %s", shutilErr)
## Handle meta file
##################################################
def handleMetaFile(self, thisFolder, thisMetaFileName):
### read each meta file (detect the 1st meta file only, then ignore all sub-folders meta file)
# TODO: no verify the right format of "*.2storeMeta" file. if wrong format, just exception quit and broken this daemon
meta = ConfigParser.ConfigParser()
meta.read(os.path.join(thisFolder, thisMetaFileName))
serverModLogger.debug("os.path.join(thisFolder, thisMetaFileName) as %s", os.path.join(thisFolder, thisMetaFileName))
#### get meta -> ON_OFF_STATUS
onOffStatus = meta.get("default", "ON_OFF_STATUS")
#### if default offline
if onOffStatus == "0":
##### if at Sync_devices folder
if globalMod.SYNC_PATH in thisFolder:
serverModLogger.debug( os.path.join(thisFolder, thisMetaFileName) + " default offline at Sync_devices")
###### get a meta -> max. folder size
try:
maxFolderSize = int(meta.get("default", "MAX_FOLDER_SIZE")) * 1024 * 1024 # change fr. ??G to ??k
###### read the existing folder file size
# reference from http://stackoverflow.com/questions/1392413/calculating-a-directory-size-using-python
existingFolderSize = int(subprocess.check_output(["du", "-s", thisFolder]).split()[0]) # return in ???k unit
###### if existing folder file size is ~80% of the meta -> max. folder size
serverModLogger.debug( "compare %d and %d", existingFolderSize, (maxFolderSize*0.8))
if existingFolderSize >= (maxFolderSize*0.8):
####### locate the corresponding folder at pool (.../user/files/...) or create a new one
serverModLogger.debug( "existingFolderSize larger %s", thisFolder)
update_path = self.findCorrPoolFolder(thisFolder)
serverModLogger.debug( "update_path %s", update_path)
######## create this folder if not existing .../user/files/device.../...
try:
statinfo = os.stat(thisFolder)
os.makedirs(update_path, statinfo[0])
except OSError:
os.chmod(update_path, statinfo[0]) # change the mode same to original
######## move this 50% older files to this new pool folder
# exclusive .2storeMeta
theseFiles = []
for f in os.listdir(thisFolder):
if not fnmatch.fnmatch(f, globalMod.META_FILE_NAME):
theseFiles.append(os.path.join(thisFolder, f))
try:
thisSortedFiles = sorted(theseFiles, key=os.path.getmtime)
except Exception as e:
serverModLogger.debug( "exception %s", e)
total_size = 0
for fileName in thisSortedFiles:
# check accumulated file size
# chk link
# chk folder
# then file
if (os.path.islink(fileName)):
thisFileSize = os.path.getsize(fileName) /1024 # in XXXkbyte
elif os.path.isdir(fileName): # return TURE for both folder and soft-link
thisFileSize = int(subprocess.check_output(["du", "-s", fileName]).split()[0]) # return in XXXkbyte
else:
thisFileSize = os.path.getsize(fileName) /1024 # in XXXkbyte
total_size += thisFileSize
self.mvData(fileName, update_path)
#serverModLogger.debug( "total_size %d ", total_size)
#serverModLogger.debug( "maxFolderSize*0.5 %d ", maxFolderSize*0.5)
if (total_size>maxFolderSize*0.5):
break
######## create a new .meta file at pool new folder with online mode
# cp the existing meta file and modify it
serverModLogger.debug( "os.path.join(thisFolder, META_FILE_NAME) ", os.path.join(thisFolder, globalMod.META_FILE_NAME))
shutil.copy(os.path.join(thisFolder, globalMod.META_FILE_NAME), update_path)
newMeta = ConfigParser.SafeConfigParser()
newMetaFile = os.path.join(update_path, globalMod.META_FILE_NAME)
newMeta.read(newMetaFile)
newMeta.set('default', 'ON_OFF_STATUS', '2') # change to online mode
with open(newMetaFile, 'wb') as configfile:
newMeta.write(configfile)
###### it is not a folder, so skip
except ConfigParser.NoOptionError:
pass
##### if at pool folder
else:
serverModLogger.debug( os.path.join(thisFolder, thisMetaFileName) + " default offline at pool")
#### if offline
elif onOffStatus == "1":
##### if at Sync_devices folder
if globalMod.SYNC_PATH in thisFolder:
serverModLogger.debug( os.path.join(thisFolder, thisMetaFileName) + " offline at Sync_devices")
##### if at pool folder
else:
serverModLogger.debug( os.path.join(thisFolder, thisMetaFileName) + " offline at pool")
#### if online
elif onOffStatus == "2":
##### if at Sync_devices folder
if globalMod.SYNC_PATH in thisFolder:
serverModLogger.debug( os.path.join(thisFolder, thisMetaFileName) + " online at Sync_devices")
# locate the corresponding folder at pool (.../user/files/...) XXXor create a new oneXXX
update_path = self.findCorrPoolFolder(thisFolder)
#statinfo = os.stat(thisFolder)
# move files to this new pool folder
serverModLogger.debug( "thisFolder %s", thisFolder)
serverModLogger.debug( "update_path %s", update_path)
# moving files
self.mvData(thisFolder, update_path)
# TODO: change the new created folder permission
#for root, dirs, files in os.walk(update_path, topdown=False):
# for dir in dirs:
# os.chmod(dir, statinfo[0])
# for file in files:
# os.chmod(file, statinfo[0])
##### if at pool folder
else:
serverModLogger.debug( os.path.join(thisFolder, thisMetaFileName) + " online at pool")
#else: TODO: adding err. handling in future
## Scan meta file corresponding folder and its recursive sub-folders (top-down)
##################################################
def scanMetaFolderTopDown(self, folder):
### monitoring the .meta
##################################################
#### scan each folders and files for .meta file at
#### each location from .../owncloud/data/"user".../files/... folders AND
#### .../owncloud/data/"user".../files/Sync_Devices/... folders
# TODO: may be a faster solution than os.walk()
# os.walk() -- no error in encoding, but slower
# scandir.walk() -- has error in encoding, but faster
# may add a exception handling to skip this err. due to not my matching in .meta
# TODO: remove the following warning
serverModLogger.debug("base folder fr. Top down as %s", folder)
for curWalkingDirName, subdirList, fileList in os.walk(folder):
#serverModLogger.debug('curWalkingDirName %s', curWalkingDirName)
#for dir in subdirList:
# serverModLogger.debug('subdirList %s', dir)
#for file in fileList:
# serverModLogger.debug('fileList %s', file)
# TODO: only one .2storeMeta need to handle, ignore the sub-folder .2storeMeta
# and XXX_FILE.2storeMeta, so should not use the following for loop
for filename in fnmatch.filter(fileList, '*' + globalMod.META_FILE_NAME):
self.handleMetaFile(curWalkingDirName, filename)
## Scan meta file corresponding folder and its recursive sub-folders (buttom-up)
##################################################
def scanMetaFolderBottomUp(self, folder):
curWalkingDirName = folder
while True:
### ignore these folders
if globalMod.ignoreFolderListMatch(os.path.basename(curWalkingDirName)):
return
serverModLogger.debug("base folder fr. bottom up as %s", curWalkingDirName)
try:
### open this folder content
fileList = os.listdir(curWalkingDirName)
#### TODO: only one .2storeMeta need to handle, ignore the sub-folder .2storeMeta
#### and XXX_FILE.2storeMeta, so should not use the following for loop
### No such file or directory
except OSError as ose:
serverModLogger.debug("scanMetaFolderBottomUp Err as %s", ose)
else:
### seek the meta file current folder
for filename in fnmatch.filter(fileList, '*' + globalMod.META_FILE_NAME):
### if find the meta file, do something and break this loop
self.handleMetaFile(curWalkingDirName, filename)
return
### if already at upper level arrive, exit this loop
if curWalkingDirName == globalMod.getBasePath():
return
### go to upper folder
curWalkingDirName = os.path.abspath(os.path.join(curWalkingDirName, os.pardir))
|
|
import dsz, dsz.lp
import sys, random, time
import ops, util.ip, ops.timehelper
import os.path
import helper, scanbase
from math import floor
from datetime import datetime
import scanengine2
import monitorengine
import exceptions
def main(arguments):
scanbase.setup_db()
failout = False
scansweepHelper = helper.scansweepHelper([x.lower() for x in arguments])
scansweepHelper.check_env()
create_mode = False
if (scansweepHelper.options.database is not None):
database_op = scansweepHelper.options.database
if (not (database_op == 'create')):
if (scansweepHelper.options.session is not None):
scansweepHelper.database_display(database_op)
else:
scansweepHelper.database_display(database_op)
return
else:
create_mode = True
if (scansweepHelper.options.update is not None):
scansweepHelper.handleupdate()
return
scanbase.write_metadata(scansweepHelper.scansweep_env, scansweepHelper.session, scansweepHelper.scansweep_logfile, scansweepHelper.scansweep_results, scansweepHelper.verbose)
if (scansweepHelper.options.exclude is not None):
scansweepHelper.parseexcludes(scansweepHelper.options.exclude)
if (scansweepHelper.session == scansweepHelper.scansweep_env):
if ((scansweepHelper.options.monitor is None) and (scansweepHelper.options.type is None)):
dsz.ui.Echo('You must specify a type.', dsz.ERROR)
return 0
if ((scansweepHelper.options.monitor is None) and os.path.exists(scansweepHelper.options.type[0])):
if (scansweepHelper.options.target is not None):
dsz.ui.Echo('You cannot use -target when specifying a queue file.', dsz.ERROR)
return 0
queuefile = scansweepHelper.options.type[0]
if (not scansweepHelper.verifyqueue(queuefile)):
failout = True
else:
queuelist = scansweepHelper.getqueuefromfile(queuefile)
for item in queuelist:
scansweepHelper.addtoqueue(item[0], item[1], scansweepHelper.scansweep_env)
elif (scansweepHelper.options.type is not None):
job_type = scansweepHelper.options.type[0].lower()
job = '|'.join(scansweepHelper.options.type)
if (not scansweepHelper.verifyjob(job_type, scansweepHelper.options.type)):
dsz.ui.Echo('Invalid -type options, please verify your parameters.', dsz.ERROR)
return 0
candidate_list = []
if (scansweepHelper.options.target is not None):
if (type(scansweepHelper.options.target) == type([])):
for target_flag in scansweepHelper.options.target:
candidate_list.extend(scansweepHelper.parsetarget(target_flag))
else:
candidate_list = scansweepHelper.parsetarget(scansweepHelper.options.target)
else:
dsz.ui.Echo('You must provide some targets with your scan.', dsz.ERROR)
return 0
if ((len(candidate_list) > 255) and (not scansweepHelper.options.cidroverride)):
dsz.ui.Echo('You cannot specify more then 255 targets without the -cidroverride option', dsz.ERROR)
failout = True
else:
scansweepHelper.addlisttoqueue({job: candidate_list})
if (scansweepHelper.monitor is not None):
for monitortype in scansweepHelper.monitor:
if (scansweepHelper.verifymonitor(monitortype) is False):
dsz.ui.Echo(('%s is an invalid monitor type' % monitortype))
failout = True
if ((scanbase.num_jobs(scansweepHelper.session) > 255) and (not scansweepHelper.options.cidroverride)):
dsz.ui.Echo('You cannot specify more then 255 targets without the -cidroverride option', dsz.ERROR)
failout = True
if (scansweepHelper.options.escalate is not None):
rulelist = scansweepHelper.parseescalate(scansweepHelper.options.escalate)
if (len(rulelist) == 0):
dsz.ui.Echo('You specified -escalate, but had only invalid rules. Exiting.', dsz.ERROR)
failout = True
for rule in rulelist:
scantype = rule[1].split('|')[0]
current_rulelist = scanbase.get_escalate_rules(scansweepHelper.session)
if (rule not in current_rulelist):
scanbase.write_escalate_rule(scansweepHelper.session, rule)
if (not (scantype == 'alert')):
scanbase.set_jobtype(scansweepHelper.session, scantype)
elif ((scansweepHelper.options.type is not None) or (scansweepHelper.options.target is not None)):
dsz.ui.Echo('You cannot specify -target or -type when using -session.', dsz.WARNING)
failout = True
else:
dsz.ui.Echo('You are joining another session, and so will use the already available job queue and escalate rules.', dsz.WARNING)
if (not scansweepHelper.verifytime(scanbase.get_jobtypes(scansweepHelper.session))):
failout = True
if failout:
return 0
scansweepHelper.printconfig()
if create_mode:
dsz.ui.Echo('Ran in create mode. Exiting.', dsz.WARNING)
return
dsz.lp.RecordToolUse('scansweep', scansweepHelper.toolversion, usage='EXERCISED', comment=' '.join([x.lower() for x in arguments]))
try:
scan(scansweepHelper)
finally:
dsz.ui.Echo(('=' * 100))
scansweepHelper.showstats()
print '\n\n'
scansweepHelper.generateresults(quiet=False)
def scan(scansweepHelper):
lastresults = 0
alreadyoutput = []
num_remaining = scanbase.num_jobs(scansweepHelper.session)
sanity_string = ('[%s] Sanity output: %s jobs remaining, %s-%s remaining' % (dsz.Timestamp(), num_remaining, ops.timehelper.get_age_from_seconds((num_remaining * scansweepHelper.min_seconds)), ops.timehelper.get_age_from_seconds((num_remaining * scansweepHelper.max_seconds))))
dsz.ui.Echo(sanity_string, dsz.GOOD)
scansweepHelper.showstats()
if (not os.path.exists(os.path.dirname(scansweepHelper.scansweep_logfile))):
os.mkdir(os.path.dirname(scansweepHelper.scansweep_logfile))
with open(scansweepHelper.scansweep_logfile, 'a') as f:
f.write(('%s\n' % sanity_string))
delta = time.time()
scantime = time.time()
originaltime = time.time()
if (scansweepHelper.monitor is not None):
scansweepHelper.activatemonitors()
while True:
if ((time.time() - originaltime) > scansweepHelper.maxtime):
dsz.ui.Echo(('Maxtime of %s has been exceeded. Exiting.' % ops.timehelper.get_age_from_seconds(scansweepHelper.maxtime)), dsz.ERROR)
break
scan_job = scanbase.get_job(scansweepHelper.session)
if (scan_job == False):
if (scansweepHelper.monitor is None):
break
else:
try:
target = scan_job[1]
job_info = scan_job[0].split('|')
job_type = job_info[0]
if (not util.ip.validate(target)):
target = scansweepHelper.resolvehostname(target)
if (target == None):
continue
target_scanner = scanengine2.get_scanengine(scan_job, scansweepHelper.timeout)
target_scanner.execute_scan(False)
if target_scanner.multiple_responses:
multi_response = target_scanner.return_data()
for response in multi_response:
scanbase.write_result(scansweepHelper.session, response.scan_type, response.target, response.return_data(), response.success, scan_job[0])
else:
scanbase.write_result(scansweepHelper.session, target_scanner.scan_type, target_scanner.target, target_scanner.return_data(), target_scanner.success, scan_job[0])
if target_scanner.success:
succ_out_string = ('[%s] %s (%s jobs remaining)' % (target_scanner.timestamp, target_scanner.return_success_message(), scanbase.num_jobs(scansweepHelper.session)))
dsz.ui.Echo(succ_out_string)
with open(scansweepHelper.scansweep_logfile, 'a') as f:
f.write(('%s\n' % succ_out_string))
rulelist = scanbase.get_escalate_rules(scansweepHelper.session)
for rule in rulelist:
if target_scanner.check_escalation(rule[0]):
if (rule[1] == 'alert'):
if (target_scanner.success == True):
esc_output_string = ('[%s]\t\tAlerting on %s by rule: (%s->%s)' % (dsz.Timestamp(), target, rule[0], rule[1]))
else:
esc_output_string = ('[%s] Alerting on %s by rule: (%s->%s)' % (dsz.Timestamp(), target, rule[0], rule[1]))
scansweepHelper.alert(esc_output_string)
dsz.ui.Echo(esc_output_string, dsz.WARNING)
else:
add_succ = scansweepHelper.addtoqueue(rule[1], target, scansweepHelper.scansweep_env)
if ((target_scanner.success == True) and add_succ):
esc_output_string = ('[%s]\t\tEscalating %s by rule: (%s->%s) (%s jobs remaining)' % (dsz.Timestamp(), target, rule[0], rule[1], scanbase.num_jobs(scansweepHelper.session)))
elif add_succ:
esc_output_string = ('[%s] Escalating %s by rule: (%s->%s) (%s jobs remaining)' % (dsz.Timestamp(), target, rule[0], rule[1], scanbase.num_jobs(scansweepHelper.session)))
dsz.ui.Echo(esc_output_string)
with open(scansweepHelper.scansweep_logfile, 'a') as f:
f.write(('%s\n' % esc_output_string))
except Exception as e:
if dsz.ui.Prompt(('The current job failed for some reason. Would you like to quit? %s' % e), False):
break
else:
continue
if scansweepHelper.monitor:
for monitor_handler in scansweepHelper.monitorengines:
found_connections = monitor_handler.execute_monitor()
for connection in found_connections:
rulelist = scanbase.get_escalate_rules(scansweepHelper.session)
for rule in rulelist:
if monitor_handler.check_escalation(rule[0], connection):
found = False
add_succ = True
if (not scansweepHelper.internaloverride):
for network in scansweepHelper.local_networks:
if util.ip.validate_ipv6(connection.target):
if (util.ip.expand_ipv6(connection.target)[:19] == network[1]):
found = True
break
elif ((not (network[0] == '')) and (scansweepHelper.getnetwork(connection.target, util.ip.get_cidr_from_subnet(network[0])) == network[1])):
found = True
break
if ((not scansweepHelper.internaloverride) and (not found)):
esc_output_string = ('[%s] Escalation failed (outside subnet) %s by rule: (%s->%s) (%s jobs remaining)' % (dsz.Timestamp(), connection.target, rule[0], rule[1], scanbase.num_jobs(scansweepHelper.session)))
dsz.ui.Echo(esc_output_string, dsz.WARNING)
elif (rule[1] == 'alert'):
esc_output_string = ('[%s] Alerting on %s by rule: (%s->%s)' % (dsz.Timestamp(), connection.target, rule[0], rule[1]))
scansweepHelper.alert(esc_output_string)
dsz.ui.Echo(esc_output_string, dsz.WARNING)
else:
add_succ = scansweepHelper.addtoqueue(rule[1], connection.target, scansweepHelper.scansweep_env)
if add_succ:
esc_output_string = ('[%s] Escalating %s by rule: (%s->%s) (%s jobs remaining)' % (dsz.Timestamp(), connection.target, rule[0], rule[1], scanbase.num_jobs(scansweepHelper.session)))
dsz.ui.Echo(esc_output_string)
if add_succ:
with open(scansweepHelper.scansweep_logfile, 'a') as f:
f.write(('%s\n' % esc_output_string))
newdelta = time.time()
num_remaining = scanbase.num_jobs(scansweepHelper.session)
if ((((num_remaining % 10) == 0) and (not (num_remaining in alreadyoutput))) or ((newdelta - delta) > (5 * 60))):
maxremaining = int((scansweepHelper.maxtime - (time.time() - originaltime)))
sanity_string = ('[%s] Sanity output: %s jobs remaining, %s-%s remaining (max %s), %0.1fs since last sanity' % (dsz.Timestamp(), num_remaining, ops.timehelper.get_age_from_seconds((num_remaining * scansweepHelper.min_seconds)), ops.timehelper.get_age_from_seconds((num_remaining * scansweepHelper.max_seconds)), ops.timehelper.get_age_from_seconds(maxremaining), (newdelta - delta)))
dsz.ui.Echo(sanity_string, dsz.GOOD)
with open(scansweepHelper.scansweep_logfile, 'a') as f:
f.write(('%s\n' % sanity_string))
scansweepHelper.showstats()
alreadyoutput.append(scanbase.num_jobs(scansweepHelper.scansweep_env))
delta = newdelta
resultstotal = 0
type_list = scanbase.get_jobtypes(scansweepHelper.session)
for type in type_list:
resultstotal = (resultstotal + scansweepHelper.findlistsize(type))
if (not (lastresults == resultstotal)):
scansweepHelper.generateresults(quiet=True)
lastresults = resultstotal
if scanbase.check_kill(scansweepHelper.session):
dsz.ui.Echo(('This session (%s) is marked for death. Exiting.' % scansweepHelper.session), dsz.ERROR)
break
if ((not (scanbase.num_jobs(scansweepHelper.session) == 0)) or scansweepHelper.monitor):
sleep_in_secs = random.randint(scansweepHelper.min_seconds, scansweepHelper.max_seconds)
if (not scansweepHelper.nowait):
if scansweepHelper.verbose:
dsz.ui.Echo(('[%s] Sleeping for %s seconds...' % (dsz.Timestamp(), sleep_in_secs)))
try:
dsz.Sleep((sleep_in_secs * 1000))
except exceptions.RuntimeError as e:
dsz.ui.Echo(('%s' % e), dsz.ERROR)
break
elif ((time.time() - scantime) < sleep_in_secs):
nowaitsleep = int((sleep_in_secs - floor((time.time() - scantime))))
if scansweepHelper.verbose:
dsz.ui.Echo(('[%s] Sleeping for %s seconds (%s seconds remain)...' % (dsz.Timestamp(), sleep_in_secs, nowaitsleep)))
try:
dsz.Sleep((sleep_in_secs * 1000))
except exceptions.RuntimeError as e:
dsz.ui.Echo(('%s' % e), dsz.ERROR)
break
elif scansweepHelper.verbose:
dsz.ui.Echo(('[%s] Would sleep for %s seconds but we are overdue...' % (dsz.Timestamp(), sleep_in_secs)))
scantime = time.time()
if scanbase.check_kill(scansweepHelper.session):
dsz.ui.Echo(('This session (%s) is marked for death. Exiting.' % scansweepHelper.session), dsz.ERROR)
break
if (__name__ == '__main__'):
main(sys.argv[1:])
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains simple functions for dealing with circular statistics, for
instance, mean, variance, standard deviation, correlation coefficient, and so
on. This module also cover tests of uniformity, e.g., the Rayleigh and V tests.
The Maximum Likelihood Estimator for the Von Mises distribution along with the
Cramer-Rao Lower Bounds are also implemented. Almost all of the implementations
are based on reference [1]_, which is also the basis for the R package
'CircStats' [2]_.
"""
import numpy as np
from astropy.units import Quantity
__all__ = ['circmean', 'circvar', 'circmoment', 'circcorrcoef', 'rayleightest',
'vtest', 'vonmisesmle']
__doctest_requires__ = {'vtest': ['scipy.stats']}
def _components(data, p=1, phi=0.0, axis=None, weights=None):
# Utility function for computing the generalized rectangular components
# of the circular data.
if weights is None:
weights = np.ones((1,))
try:
weights = np.broadcast_to(weights, data.shape)
except ValueError:
raise ValueError('Weights and data have inconsistent shape.')
C = np.sum(weights * np.cos(p * (data - phi)), axis)/np.sum(weights, axis)
S = np.sum(weights * np.sin(p * (data - phi)), axis)/np.sum(weights, axis)
return C, S
def _angle(data, p=1, phi=0.0, axis=None, weights=None):
# Utility function for computing the generalized sample mean angle
C, S = _components(data, p, phi, axis, weights)
# theta will be an angle in the interval [-np.pi, np.pi)
# [-180, 180)*u.deg in case data is a Quantity
theta = np.arctan2(S, C)
if isinstance(data, Quantity):
theta = theta.to(data.unit)
return theta
def _length(data, p=1, phi=0.0, axis=None, weights=None):
# Utility function for computing the generalized sample length
C, S = _components(data, p, phi, axis, weights)
return np.hypot(S, C)
def circmean(data, axis=None, weights=None):
""" Computes the circular mean angle of an array of circular data.
Parameters
----------
data : numpy.ndarray or Quantity
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
axis : int, optional
Axis along which circular means are computed. The default is to compute
the mean of the flattened array.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``sum(weights, axis)``
equals the number of observations. See [1]_, remark 1.4, page 22, for
detailed explanation.
Returns
-------
circmean : numpy.ndarray or Quantity
Circular mean.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import circmean
>>> from astropy import units as u
>>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg
>>> circmean(data) # doctest: +FLOAT_CMP
<Quantity 48.62718088722989 deg>
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
"""
return _angle(data, 1, 0.0, axis, weights)
def circvar(data, axis=None, weights=None):
""" Computes the circular variance of an array of circular data.
There are some concepts for defining measures of dispersion for circular
data. The variance implemented here is based on the definition given by
[1]_, which is also the same used by the R package 'CircStats' [2]_.
Parameters
----------
data : numpy.ndarray or dimensionless Quantity
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
axis : int, optional
Axis along which circular variances are computed. The default is to
compute the variance of the flattened array.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``sum(weights, axis)``
equals the number of observations. See [1]_, remark 1.4, page 22,
for detailed explanation.
Returns
-------
circvar : numpy.ndarray or dimensionless Quantity
Circular variance.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import circvar
>>> from astropy import units as u
>>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg
>>> circvar(data) # doctest: +FLOAT_CMP
<Quantity 0.16356352748437508>
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
Notes
-----
The definition used here differs from the one in scipy.stats.circvar.
Precisely, Scipy circvar uses an approximation based on the limit of small
angles which approaches the linear variance.
"""
return 1.0 - _length(data, 1, 0.0, axis, weights)
def circmoment(data, p=1.0, centered=False, axis=None, weights=None):
""" Computes the ``p``-th trigonometric circular moment for an array
of circular data.
Parameters
----------
data : numpy.ndarray or Quantity
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
p : float, optional
Order of the circular moment.
centered : Boolean, optional
If ``True``, central circular moments are computed. Default value is
``False``.
axis : int, optional
Axis along which circular moments are computed. The default is to
compute the circular moment of the flattened array.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``sum(weights, axis)``
equals the number of observations. See [1]_, remark 1.4, page 22,
for detailed explanation.
Returns
-------
circmoment : numpy.ndarray or Quantity
The first and second elements correspond to the direction and length of
the ``p``-th circular moment, respectively.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import circmoment
>>> from astropy import units as u
>>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg
>>> circmoment(data, p=2) # doctest: +FLOAT_CMP
(<Quantity 90.99263082432564 deg>, <Quantity 0.48004283892950717>)
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
"""
if centered:
phi = circmean(data, axis, weights)
else:
phi = 0.0
return _angle(data, p, phi, axis, weights), _length(data, p, phi, axis,
weights)
def circcorrcoef(alpha, beta, axis=None, weights_alpha=None,
weights_beta=None):
""" Computes the circular correlation coefficient between two array of
circular data.
Parameters
----------
alpha : numpy.ndarray or Quantity
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
beta : numpy.ndarray or Quantity
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
axis : int, optional
Axis along which circular correlation coefficients are computed.
The default is the compute the circular correlation coefficient of the
flattened array.
weights_alpha : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights_alpha``
represents a weighting factor for each group such that
``sum(weights_alpha, axis)`` equals the number of observations.
See [1]_, remark 1.4, page 22, for detailed explanation.
weights_beta : numpy.ndarray, optional
See description of ``weights_alpha``.
Returns
-------
rho : numpy.ndarray or dimensionless Quantity
Circular correlation coefficient.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import circcorrcoef
>>> from astropy import units as u
>>> alpha = np.array([356, 97, 211, 232, 343, 292, 157, 302, 335, 302,
... 324, 85, 324, 340, 157, 238, 254, 146, 232, 122,
... 329])*u.deg
>>> beta = np.array([119, 162, 221, 259, 270, 29, 97, 292, 40, 313, 94,
... 45, 47, 108, 221, 270, 119, 248, 270, 45, 23])*u.deg
>>> circcorrcoef(alpha, beta) # doctest: +FLOAT_CMP
<Quantity 0.2704648826748831>
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
"""
if(np.size(alpha, axis) != np.size(beta, axis)):
raise ValueError("alpha and beta must be arrays of the same size")
mu_a = circmean(alpha, axis, weights_alpha)
mu_b = circmean(beta, axis, weights_beta)
sin_a = np.sin(alpha - mu_a)
sin_b = np.sin(beta - mu_b)
rho = np.sum(sin_a*sin_b)/np.sqrt(np.sum(sin_a*sin_a)*np.sum(sin_b*sin_b))
return rho
def rayleightest(data, axis=None, weights=None):
""" Performs the Rayleigh test of uniformity.
This test is used to identify a non-uniform distribution, i.e. it is
designed for detecting an unimodal deviation from uniformity. More
precisely, it assumes the following hypotheses:
- H0 (null hypothesis): The population is distributed uniformly around the
circle.
- H1 (alternative hypothesis): The population is not distributed uniformly
around the circle.
Small p-values suggest to reject the null hypothesis.
Parameters
----------
data : numpy.ndarray or Quantity
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
axis : int, optional
Axis along which the Rayleigh test will be performed.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``np.sum(weights, axis)``
equals the number of observations.
See [1]_, remark 1.4, page 22, for detailed explanation.
Returns
-------
p-value : float or dimensionless Quantity
p-value.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import rayleightest
>>> from astropy import units as u
>>> data = np.array([130, 90, 0, 145])*u.deg
>>> rayleightest(data) # doctest: +FLOAT_CMP
<Quantity 0.2563487733797317>
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
.. [3] M. Chirstman., C. Miller. "Testing a Sample of Directions for
Uniformity." Lecture Notes, STA 6934/5805. University of Florida, 2007.
.. [4] D. Wilkie. "Rayleigh Test for Randomness of Circular Data". Applied
Statistics. 1983.
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.211.4762>
"""
n = np.size(data, axis=axis)
Rbar = _length(data, 1, 0.0, axis, weights)
z = n*Rbar*Rbar
# see [3] and [4] for the formulae below
tmp = 1.0
if(n < 50):
tmp = 1.0 + (2.0*z - z*z)/(4.0*n) - (24.0*z - 132.0*z**2.0 +
76.0*z**3.0 - 9.0*z**4.0)/(288.0 *
n * n)
p_value = np.exp(-z)*tmp
return p_value
def vtest(data, mu=0.0, axis=None, weights=None):
""" Performs the Rayleigh test of uniformity where the alternative
hypothesis H1 is assumed to have a known mean angle ``mu``.
Parameters
----------
data : numpy.ndarray or Quantity
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
mu : float or Quantity, optional
Mean angle. Assumed to be known.
axis : int, optional
Axis along which the V test will be performed.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``sum(weights, axis)``
equals the number of observations. See [1]_, remark 1.4, page 22,
for detailed explanation.
Returns
-------
p-value : float or dimensionless Quantity
p-value.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import vtest
>>> from astropy import units as u
>>> data = np.array([130, 90, 0, 145])*u.deg
>>> vtest(data) # doctest: +FLOAT_CMP
<Quantity 0.6223678199713766>
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
.. [3] M. Chirstman., C. Miller. "Testing a Sample of Directions for
Uniformity." Lecture Notes, STA 6934/5805. University of Florida, 2007.
"""
from scipy.stats import norm
if weights is None:
weights = np.ones((1,))
try:
weights = np.broadcast_to(weights, data.shape)
except ValueError:
raise ValueError('Weights and data have inconsistent shape.')
n = np.size(data, axis=axis)
R0bar = np.sum(weights * np.cos(data - mu), axis)/np.sum(weights, axis)
z = np.sqrt(2.0 * n) * R0bar
pz = norm.cdf(z)
fz = norm.pdf(z)
# see reference [3]
p_value = 1 - pz + fz*((3*z - z**3)/(16.0*n) +
(15*z + 305*z**3 - 125*z**5 + 9*z**7)/(4608.0*n*n))
return p_value
def _A1inv(x):
# Approximation for _A1inv(x) according R Package 'CircStats'
# See http://www.scienceasia.org/2012.38.n1/scias38_118.pdf, equation (4)
if 0 <= x < 0.53:
return 2.0*x + x*x*x + (5.0*x**5)/6.0
elif x < 0.85:
return -0.4 + 1.39*x + 0.43/(1.0 - x)
else:
return 1.0/(x*x*x - 4.0*x*x + 3.0*x)
def vonmisesmle(data, axis=None):
""" Computes the Maximum Likelihood Estimator (MLE) for the parameters of
the von Mises distribution.
Parameters
----------
data : numpy.ndarray or Quantity
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
axis : int, optional
Axis along which the mle will be computed.
Returns
-------
mu : float or Quantity
the mean (aka location parameter).
kappa : float or dimensionless Quantity
the concentration parameter.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import vonmisesmle
>>> from astropy import units as u
>>> data = np.array([130, 90, 0, 145])*u.deg
>>> vonmisesmle(data) # doctest: +FLOAT_CMP
(<Quantity 101.16894320013179 deg>, <Quantity 1.49358958737054>)
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
"""
mu = circmean(data, axis=None)
kappa = _A1inv(np.mean(np.cos(data - mu), axis))
return mu, kappa
|
|
import uqbar.strings
import supriya.live
def test_build_synthdef_1_1():
synthdef = supriya.live.Send.build_synthdef(1, 1)
assert (
str(synthdef)
== uqbar.strings.normalize(
"""
synthdef:
name: mixer/send/1x1
ugens:
- Control.ir: null
- In.ar:
bus: Control.ir[0:in_]
- Control.kr: null
- Linen.kr/0:
attack_time: Control.kr[3:lag]
done_action: 2.0
gate: Control.kr[2:gate]
release_time: Control.kr[3:lag]
sustain_level: 1.0
- Linen.kr/1:
attack_time: Control.kr[3:lag]
done_action: 0.0
gate: Control.kr[0:active]
release_time: Control.kr[3:lag]
sustain_level: 1.0
- BinaryOpUGen(MULTIPLICATION).kr/0:
left: Linen.kr/0[0]
right: Linen.kr/1[0]
- UnaryOpUGen(DB_TO_AMPLITUDE).kr:
source: Control.kr[1:gain]
- BinaryOpUGen(GREATER_THAN).kr:
left: Control.kr[1:gain]
right: -96.0
- BinaryOpUGen(MULTIPLICATION).kr/1:
left: UnaryOpUGen(DB_TO_AMPLITUDE).kr[0]
right: BinaryOpUGen(GREATER_THAN).kr[0]
- Lag.kr:
lag_time: Control.kr[3:lag]
source: BinaryOpUGen(MULTIPLICATION).kr/1[0]
- BinaryOpUGen(MULTIPLICATION).kr/2:
left: BinaryOpUGen(MULTIPLICATION).kr/0[0]
right: Lag.kr[0]
- BinaryOpUGen(MULTIPLICATION).ar:
left: In.ar[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- Out.ar:
bus: Control.ir[1:out]
source[0]: BinaryOpUGen(MULTIPLICATION).ar[0]
"""
)
+ "\n"
)
def test_build_synthdef_1_2():
synthdef = supriya.live.Send.build_synthdef(1, 2)
assert (
str(synthdef)
== uqbar.strings.normalize(
"""
synthdef:
name: mixer/send/1x2
ugens:
- Control.ir: null
- In.ar:
bus: Control.ir[0:in_]
- Control.kr: null
- Linen.kr/0:
attack_time: Control.kr[3:lag]
done_action: 2.0
gate: Control.kr[2:gate]
release_time: Control.kr[3:lag]
sustain_level: 1.0
- Linen.kr/1:
attack_time: Control.kr[3:lag]
done_action: 0.0
gate: Control.kr[0:active]
release_time: Control.kr[3:lag]
sustain_level: 1.0
- BinaryOpUGen(MULTIPLICATION).kr/0:
left: Linen.kr/0[0]
right: Linen.kr/1[0]
- UnaryOpUGen(DB_TO_AMPLITUDE).kr:
source: Control.kr[1:gain]
- BinaryOpUGen(GREATER_THAN).kr:
left: Control.kr[1:gain]
right: -96.0
- BinaryOpUGen(MULTIPLICATION).kr/1:
left: UnaryOpUGen(DB_TO_AMPLITUDE).kr[0]
right: BinaryOpUGen(GREATER_THAN).kr[0]
- Lag.kr:
lag_time: Control.kr[3:lag]
source: BinaryOpUGen(MULTIPLICATION).kr/1[0]
- BinaryOpUGen(MULTIPLICATION).kr/2:
left: BinaryOpUGen(MULTIPLICATION).kr/0[0]
right: Lag.kr[0]
- BinaryOpUGen(MULTIPLICATION).ar/0:
left: In.ar[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/1:
left: In.ar[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- Out.ar:
bus: Control.ir[1:out]
source[0]: BinaryOpUGen(MULTIPLICATION).ar/0[0]
source[1]: BinaryOpUGen(MULTIPLICATION).ar/1[0]
"""
)
+ "\n"
)
def test_build_synthdef_1_4():
synthdef = supriya.live.Send.build_synthdef(1, 4)
assert (
str(synthdef)
== uqbar.strings.normalize(
"""
synthdef:
name: mixer/send/1x4
ugens:
- Control.ir: null
- In.ar:
bus: Control.ir[0:in_]
- Control.kr: null
- Linen.kr/0:
attack_time: Control.kr[3:lag]
done_action: 2.0
gate: Control.kr[2:gate]
release_time: Control.kr[3:lag]
sustain_level: 1.0
- Linen.kr/1:
attack_time: Control.kr[3:lag]
done_action: 0.0
gate: Control.kr[0:active]
release_time: Control.kr[3:lag]
sustain_level: 1.0
- BinaryOpUGen(MULTIPLICATION).kr/0:
left: Linen.kr/0[0]
right: Linen.kr/1[0]
- UnaryOpUGen(DB_TO_AMPLITUDE).kr:
source: Control.kr[1:gain]
- BinaryOpUGen(GREATER_THAN).kr:
left: Control.kr[1:gain]
right: -96.0
- BinaryOpUGen(MULTIPLICATION).kr/1:
left: UnaryOpUGen(DB_TO_AMPLITUDE).kr[0]
right: BinaryOpUGen(GREATER_THAN).kr[0]
- Lag.kr:
lag_time: Control.kr[3:lag]
source: BinaryOpUGen(MULTIPLICATION).kr/1[0]
- BinaryOpUGen(MULTIPLICATION).kr/2:
left: BinaryOpUGen(MULTIPLICATION).kr/0[0]
right: Lag.kr[0]
- BinaryOpUGen(MULTIPLICATION).ar/0:
left: In.ar[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/1:
left: In.ar[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/2:
left: In.ar[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/3:
left: In.ar[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- Out.ar:
bus: Control.ir[1:out]
source[0]: BinaryOpUGen(MULTIPLICATION).ar/0[0]
source[1]: BinaryOpUGen(MULTIPLICATION).ar/1[0]
source[2]: BinaryOpUGen(MULTIPLICATION).ar/2[0]
source[3]: BinaryOpUGen(MULTIPLICATION).ar/3[0]
"""
)
+ "\n"
)
def test_build_synthdef_1_8():
synthdef = supriya.live.Send.build_synthdef(1, 8)
assert (
str(synthdef)
== uqbar.strings.normalize(
"""
synthdef:
name: mixer/send/1x8
ugens:
- Control.ir: null
- In.ar:
bus: Control.ir[0:in_]
- Control.kr: null
- Linen.kr/0:
attack_time: Control.kr[3:lag]
done_action: 2.0
gate: Control.kr[2:gate]
release_time: Control.kr[3:lag]
sustain_level: 1.0
- Linen.kr/1:
attack_time: Control.kr[3:lag]
done_action: 0.0
gate: Control.kr[0:active]
release_time: Control.kr[3:lag]
sustain_level: 1.0
- BinaryOpUGen(MULTIPLICATION).kr/0:
left: Linen.kr/0[0]
right: Linen.kr/1[0]
- UnaryOpUGen(DB_TO_AMPLITUDE).kr:
source: Control.kr[1:gain]
- BinaryOpUGen(GREATER_THAN).kr:
left: Control.kr[1:gain]
right: -96.0
- BinaryOpUGen(MULTIPLICATION).kr/1:
left: UnaryOpUGen(DB_TO_AMPLITUDE).kr[0]
right: BinaryOpUGen(GREATER_THAN).kr[0]
- Lag.kr:
lag_time: Control.kr[3:lag]
source: BinaryOpUGen(MULTIPLICATION).kr/1[0]
- BinaryOpUGen(MULTIPLICATION).kr/2:
left: BinaryOpUGen(MULTIPLICATION).kr/0[0]
right: Lag.kr[0]
- BinaryOpUGen(MULTIPLICATION).ar/0:
left: In.ar[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/1:
left: In.ar[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/2:
left: In.ar[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/3:
left: In.ar[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/4:
left: In.ar[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/5:
left: In.ar[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/6:
left: In.ar[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/7:
left: In.ar[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- Out.ar:
bus: Control.ir[1:out]
source[0]: BinaryOpUGen(MULTIPLICATION).ar/0[0]
source[1]: BinaryOpUGen(MULTIPLICATION).ar/1[0]
source[2]: BinaryOpUGen(MULTIPLICATION).ar/2[0]
source[3]: BinaryOpUGen(MULTIPLICATION).ar/3[0]
source[4]: BinaryOpUGen(MULTIPLICATION).ar/4[0]
source[5]: BinaryOpUGen(MULTIPLICATION).ar/5[0]
source[6]: BinaryOpUGen(MULTIPLICATION).ar/6[0]
source[7]: BinaryOpUGen(MULTIPLICATION).ar/7[0]
"""
)
+ "\n"
)
def test_build_synthdef_2_1():
synthdef = supriya.live.Send.build_synthdef(2, 1)
assert (
str(synthdef)
== uqbar.strings.normalize(
"""
synthdef:
name: mixer/send/2x1
ugens:
- Control.ir: null
- In.ar:
bus: Control.ir[0:in_]
- BinaryOpUGen(ADDITION).ar:
left: In.ar[0]
right: In.ar[1]
- BinaryOpUGen(FLOAT_DIVISION).ar:
left: BinaryOpUGen(ADDITION).ar[0]
right: 2.0
- Control.kr: null
- Linen.kr/0:
attack_time: Control.kr[3:lag]
done_action: 2.0
gate: Control.kr[2:gate]
release_time: Control.kr[3:lag]
sustain_level: 1.0
- Linen.kr/1:
attack_time: Control.kr[3:lag]
done_action: 0.0
gate: Control.kr[0:active]
release_time: Control.kr[3:lag]
sustain_level: 1.0
- BinaryOpUGen(MULTIPLICATION).kr/0:
left: Linen.kr/0[0]
right: Linen.kr/1[0]
- UnaryOpUGen(DB_TO_AMPLITUDE).kr:
source: Control.kr[1:gain]
- BinaryOpUGen(GREATER_THAN).kr:
left: Control.kr[1:gain]
right: -96.0
- BinaryOpUGen(MULTIPLICATION).kr/1:
left: UnaryOpUGen(DB_TO_AMPLITUDE).kr[0]
right: BinaryOpUGen(GREATER_THAN).kr[0]
- Lag.kr:
lag_time: Control.kr[3:lag]
source: BinaryOpUGen(MULTIPLICATION).kr/1[0]
- BinaryOpUGen(MULTIPLICATION).kr/2:
left: BinaryOpUGen(MULTIPLICATION).kr/0[0]
right: Lag.kr[0]
- BinaryOpUGen(MULTIPLICATION).ar:
left: BinaryOpUGen(FLOAT_DIVISION).ar[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- Out.ar:
bus: Control.ir[1:out]
source[0]: BinaryOpUGen(MULTIPLICATION).ar[0]
"""
)
+ "\n"
)
def test_build_synthdef_2_2():
synthdef = supriya.live.Send.build_synthdef(2, 2)
assert (
str(synthdef)
== uqbar.strings.normalize(
"""
synthdef:
name: mixer/send/2x2
ugens:
- Control.ir: null
- In.ar:
bus: Control.ir[0:in_]
- Control.kr: null
- Linen.kr/0:
attack_time: Control.kr[3:lag]
done_action: 2.0
gate: Control.kr[2:gate]
release_time: Control.kr[3:lag]
sustain_level: 1.0
- Linen.kr/1:
attack_time: Control.kr[3:lag]
done_action: 0.0
gate: Control.kr[0:active]
release_time: Control.kr[3:lag]
sustain_level: 1.0
- BinaryOpUGen(MULTIPLICATION).kr/0:
left: Linen.kr/0[0]
right: Linen.kr/1[0]
- UnaryOpUGen(DB_TO_AMPLITUDE).kr:
source: Control.kr[1:gain]
- BinaryOpUGen(GREATER_THAN).kr:
left: Control.kr[1:gain]
right: -96.0
- BinaryOpUGen(MULTIPLICATION).kr/1:
left: UnaryOpUGen(DB_TO_AMPLITUDE).kr[0]
right: BinaryOpUGen(GREATER_THAN).kr[0]
- Lag.kr:
lag_time: Control.kr[3:lag]
source: BinaryOpUGen(MULTIPLICATION).kr/1[0]
- BinaryOpUGen(MULTIPLICATION).kr/2:
left: BinaryOpUGen(MULTIPLICATION).kr/0[0]
right: Lag.kr[0]
- BinaryOpUGen(MULTIPLICATION).ar/0:
left: In.ar[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/1:
left: In.ar[1]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- Out.ar:
bus: Control.ir[1:out]
source[0]: BinaryOpUGen(MULTIPLICATION).ar/0[0]
source[1]: BinaryOpUGen(MULTIPLICATION).ar/1[0]
"""
)
+ "\n"
)
def test_build_synthdef_2_4():
synthdef = supriya.live.Send.build_synthdef(2, 4)
assert (
str(synthdef)
== uqbar.strings.normalize(
"""
synthdef:
name: mixer/send/2x4
ugens:
- Control.ir: null
- In.ar:
bus: Control.ir[0:in_]
- PanAz.ar/0:
amplitude: 1.0
orientation: 0.5
position: -0.5
source: In.ar[0]
width: 4.0
- PanAz.ar/1:
amplitude: 1.0
orientation: 0.5
position: 0.5
source: In.ar[1]
width: 4.0
- BinaryOpUGen(ADDITION).ar/0:
left: PanAz.ar/0[0]
right: PanAz.ar/1[0]
- BinaryOpUGen(ADDITION).ar/1:
left: PanAz.ar/0[1]
right: PanAz.ar/1[1]
- BinaryOpUGen(ADDITION).ar/2:
left: PanAz.ar/0[2]
right: PanAz.ar/1[2]
- BinaryOpUGen(ADDITION).ar/3:
left: PanAz.ar/0[3]
right: PanAz.ar/1[3]
- Control.kr: null
- Linen.kr/0:
attack_time: Control.kr[3:lag]
done_action: 2.0
gate: Control.kr[2:gate]
release_time: Control.kr[3:lag]
sustain_level: 1.0
- Linen.kr/1:
attack_time: Control.kr[3:lag]
done_action: 0.0
gate: Control.kr[0:active]
release_time: Control.kr[3:lag]
sustain_level: 1.0
- BinaryOpUGen(MULTIPLICATION).kr/0:
left: Linen.kr/0[0]
right: Linen.kr/1[0]
- UnaryOpUGen(DB_TO_AMPLITUDE).kr:
source: Control.kr[1:gain]
- BinaryOpUGen(GREATER_THAN).kr:
left: Control.kr[1:gain]
right: -96.0
- BinaryOpUGen(MULTIPLICATION).kr/1:
left: UnaryOpUGen(DB_TO_AMPLITUDE).kr[0]
right: BinaryOpUGen(GREATER_THAN).kr[0]
- Lag.kr:
lag_time: Control.kr[3:lag]
source: BinaryOpUGen(MULTIPLICATION).kr/1[0]
- BinaryOpUGen(MULTIPLICATION).kr/2:
left: BinaryOpUGen(MULTIPLICATION).kr/0[0]
right: Lag.kr[0]
- BinaryOpUGen(MULTIPLICATION).ar/0:
left: BinaryOpUGen(ADDITION).ar/0[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/1:
left: BinaryOpUGen(ADDITION).ar/1[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/2:
left: BinaryOpUGen(ADDITION).ar/2[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/3:
left: BinaryOpUGen(ADDITION).ar/3[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- Out.ar:
bus: Control.ir[1:out]
source[0]: BinaryOpUGen(MULTIPLICATION).ar/0[0]
source[1]: BinaryOpUGen(MULTIPLICATION).ar/1[0]
source[2]: BinaryOpUGen(MULTIPLICATION).ar/2[0]
source[3]: BinaryOpUGen(MULTIPLICATION).ar/3[0]
"""
)
+ "\n"
)
def test_build_synthdef_2_8():
synthdef = supriya.live.Send.build_synthdef(2, 8)
assert (
str(synthdef)
== uqbar.strings.normalize(
"""
synthdef:
name: mixer/send/2x8
ugens:
- Control.ir: null
- In.ar:
bus: Control.ir[0:in_]
- PanAz.ar/0:
amplitude: 1.0
orientation: 0.5
position: -0.5
source: In.ar[0]
width: 8.0
- PanAz.ar/1:
amplitude: 1.0
orientation: 0.5
position: 0.5
source: In.ar[1]
width: 8.0
- BinaryOpUGen(ADDITION).ar/0:
left: PanAz.ar/0[0]
right: PanAz.ar/1[0]
- BinaryOpUGen(ADDITION).ar/1:
left: PanAz.ar/0[1]
right: PanAz.ar/1[1]
- BinaryOpUGen(ADDITION).ar/2:
left: PanAz.ar/0[2]
right: PanAz.ar/1[2]
- BinaryOpUGen(ADDITION).ar/3:
left: PanAz.ar/0[3]
right: PanAz.ar/1[3]
- BinaryOpUGen(ADDITION).ar/4:
left: PanAz.ar/0[4]
right: PanAz.ar/1[4]
- BinaryOpUGen(ADDITION).ar/5:
left: PanAz.ar/0[5]
right: PanAz.ar/1[5]
- BinaryOpUGen(ADDITION).ar/6:
left: PanAz.ar/0[6]
right: PanAz.ar/1[6]
- BinaryOpUGen(ADDITION).ar/7:
left: PanAz.ar/0[7]
right: PanAz.ar/1[7]
- Control.kr: null
- Linen.kr/0:
attack_time: Control.kr[3:lag]
done_action: 2.0
gate: Control.kr[2:gate]
release_time: Control.kr[3:lag]
sustain_level: 1.0
- Linen.kr/1:
attack_time: Control.kr[3:lag]
done_action: 0.0
gate: Control.kr[0:active]
release_time: Control.kr[3:lag]
sustain_level: 1.0
- BinaryOpUGen(MULTIPLICATION).kr/0:
left: Linen.kr/0[0]
right: Linen.kr/1[0]
- UnaryOpUGen(DB_TO_AMPLITUDE).kr:
source: Control.kr[1:gain]
- BinaryOpUGen(GREATER_THAN).kr:
left: Control.kr[1:gain]
right: -96.0
- BinaryOpUGen(MULTIPLICATION).kr/1:
left: UnaryOpUGen(DB_TO_AMPLITUDE).kr[0]
right: BinaryOpUGen(GREATER_THAN).kr[0]
- Lag.kr:
lag_time: Control.kr[3:lag]
source: BinaryOpUGen(MULTIPLICATION).kr/1[0]
- BinaryOpUGen(MULTIPLICATION).kr/2:
left: BinaryOpUGen(MULTIPLICATION).kr/0[0]
right: Lag.kr[0]
- BinaryOpUGen(MULTIPLICATION).ar/0:
left: BinaryOpUGen(ADDITION).ar/0[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/1:
left: BinaryOpUGen(ADDITION).ar/1[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/2:
left: BinaryOpUGen(ADDITION).ar/2[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/3:
left: BinaryOpUGen(ADDITION).ar/3[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/4:
left: BinaryOpUGen(ADDITION).ar/4[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/5:
left: BinaryOpUGen(ADDITION).ar/5[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/6:
left: BinaryOpUGen(ADDITION).ar/6[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/7:
left: BinaryOpUGen(ADDITION).ar/7[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- Out.ar:
bus: Control.ir[1:out]
source[0]: BinaryOpUGen(MULTIPLICATION).ar/0[0]
source[1]: BinaryOpUGen(MULTIPLICATION).ar/1[0]
source[2]: BinaryOpUGen(MULTIPLICATION).ar/2[0]
source[3]: BinaryOpUGen(MULTIPLICATION).ar/3[0]
source[4]: BinaryOpUGen(MULTIPLICATION).ar/4[0]
source[5]: BinaryOpUGen(MULTIPLICATION).ar/5[0]
source[6]: BinaryOpUGen(MULTIPLICATION).ar/6[0]
source[7]: BinaryOpUGen(MULTIPLICATION).ar/7[0]
"""
)
+ "\n"
)
def test_build_synthdef_4_1():
synthdef = supriya.live.Send.build_synthdef(4, 1)
assert (
str(synthdef)
== uqbar.strings.normalize(
"""
synthdef:
name: mixer/send/4x1
ugens:
- Control.ir: null
- In.ar:
bus: Control.ir[0:in_]
- Sum4.ar:
input_four: In.ar[3]
input_one: In.ar[0]
input_three: In.ar[2]
input_two: In.ar[1]
- BinaryOpUGen(FLOAT_DIVISION).ar:
left: Sum4.ar[0]
right: 4.0
- Control.kr: null
- Linen.kr/0:
attack_time: Control.kr[3:lag]
done_action: 2.0
gate: Control.kr[2:gate]
release_time: Control.kr[3:lag]
sustain_level: 1.0
- Linen.kr/1:
attack_time: Control.kr[3:lag]
done_action: 0.0
gate: Control.kr[0:active]
release_time: Control.kr[3:lag]
sustain_level: 1.0
- BinaryOpUGen(MULTIPLICATION).kr/0:
left: Linen.kr/0[0]
right: Linen.kr/1[0]
- UnaryOpUGen(DB_TO_AMPLITUDE).kr:
source: Control.kr[1:gain]
- BinaryOpUGen(GREATER_THAN).kr:
left: Control.kr[1:gain]
right: -96.0
- BinaryOpUGen(MULTIPLICATION).kr/1:
left: UnaryOpUGen(DB_TO_AMPLITUDE).kr[0]
right: BinaryOpUGen(GREATER_THAN).kr[0]
- Lag.kr:
lag_time: Control.kr[3:lag]
source: BinaryOpUGen(MULTIPLICATION).kr/1[0]
- BinaryOpUGen(MULTIPLICATION).kr/2:
left: BinaryOpUGen(MULTIPLICATION).kr/0[0]
right: Lag.kr[0]
- BinaryOpUGen(MULTIPLICATION).ar:
left: BinaryOpUGen(FLOAT_DIVISION).ar[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- Out.ar:
bus: Control.ir[1:out]
source[0]: BinaryOpUGen(MULTIPLICATION).ar[0]
"""
)
+ "\n"
)
def test_build_synthdef_4_2():
synthdef = supriya.live.Send.build_synthdef(4, 2)
assert (
str(synthdef)
== uqbar.strings.normalize(
"""
synthdef:
name: mixer/send/4x2
ugens:
- Control.ir: null
- In.ar:
bus: Control.ir[0:in_]
- PanAz.ar/0:
amplitude: 0.5
orientation: 0.5
position: -0.25
source: In.ar[0]
width: 1.0
- PanAz.ar/1:
amplitude: 0.5
orientation: 0.5
position: 0.25
source: In.ar[1]
width: 1.0
- PanAz.ar/2:
amplitude: 0.5
orientation: 0.5
position: 0.75
source: In.ar[2]
width: 1.0
- PanAz.ar/3:
amplitude: 0.5
orientation: 0.5
position: 1.25
source: In.ar[3]
width: 1.0
- Sum4.ar/0:
input_four: PanAz.ar/3[0]
input_one: PanAz.ar/0[0]
input_three: PanAz.ar/2[0]
input_two: PanAz.ar/1[0]
- Sum4.ar/1:
input_four: PanAz.ar/3[1]
input_one: PanAz.ar/0[1]
input_three: PanAz.ar/2[1]
input_two: PanAz.ar/1[1]
- Control.kr: null
- Linen.kr/0:
attack_time: Control.kr[3:lag]
done_action: 2.0
gate: Control.kr[2:gate]
release_time: Control.kr[3:lag]
sustain_level: 1.0
- Linen.kr/1:
attack_time: Control.kr[3:lag]
done_action: 0.0
gate: Control.kr[0:active]
release_time: Control.kr[3:lag]
sustain_level: 1.0
- BinaryOpUGen(MULTIPLICATION).kr/0:
left: Linen.kr/0[0]
right: Linen.kr/1[0]
- UnaryOpUGen(DB_TO_AMPLITUDE).kr:
source: Control.kr[1:gain]
- BinaryOpUGen(GREATER_THAN).kr:
left: Control.kr[1:gain]
right: -96.0
- BinaryOpUGen(MULTIPLICATION).kr/1:
left: UnaryOpUGen(DB_TO_AMPLITUDE).kr[0]
right: BinaryOpUGen(GREATER_THAN).kr[0]
- Lag.kr:
lag_time: Control.kr[3:lag]
source: BinaryOpUGen(MULTIPLICATION).kr/1[0]
- BinaryOpUGen(MULTIPLICATION).kr/2:
left: BinaryOpUGen(MULTIPLICATION).kr/0[0]
right: Lag.kr[0]
- BinaryOpUGen(MULTIPLICATION).ar/0:
left: Sum4.ar/0[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/1:
left: Sum4.ar/1[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- Out.ar:
bus: Control.ir[1:out]
source[0]: BinaryOpUGen(MULTIPLICATION).ar/0[0]
source[1]: BinaryOpUGen(MULTIPLICATION).ar/1[0]
"""
)
+ "\n"
)
def test_build_synthdef_4_4():
synthdef = supriya.live.Send.build_synthdef(4, 4)
assert (
str(synthdef)
== uqbar.strings.normalize(
"""
synthdef:
name: mixer/send/4x4
ugens:
- Control.ir: null
- In.ar:
bus: Control.ir[0:in_]
- Control.kr: null
- Linen.kr/0:
attack_time: Control.kr[3:lag]
done_action: 2.0
gate: Control.kr[2:gate]
release_time: Control.kr[3:lag]
sustain_level: 1.0
- Linen.kr/1:
attack_time: Control.kr[3:lag]
done_action: 0.0
gate: Control.kr[0:active]
release_time: Control.kr[3:lag]
sustain_level: 1.0
- BinaryOpUGen(MULTIPLICATION).kr/0:
left: Linen.kr/0[0]
right: Linen.kr/1[0]
- UnaryOpUGen(DB_TO_AMPLITUDE).kr:
source: Control.kr[1:gain]
- BinaryOpUGen(GREATER_THAN).kr:
left: Control.kr[1:gain]
right: -96.0
- BinaryOpUGen(MULTIPLICATION).kr/1:
left: UnaryOpUGen(DB_TO_AMPLITUDE).kr[0]
right: BinaryOpUGen(GREATER_THAN).kr[0]
- Lag.kr:
lag_time: Control.kr[3:lag]
source: BinaryOpUGen(MULTIPLICATION).kr/1[0]
- BinaryOpUGen(MULTIPLICATION).kr/2:
left: BinaryOpUGen(MULTIPLICATION).kr/0[0]
right: Lag.kr[0]
- BinaryOpUGen(MULTIPLICATION).ar/0:
left: In.ar[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/1:
left: In.ar[1]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/2:
left: In.ar[2]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/3:
left: In.ar[3]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- Out.ar:
bus: Control.ir[1:out]
source[0]: BinaryOpUGen(MULTIPLICATION).ar/0[0]
source[1]: BinaryOpUGen(MULTIPLICATION).ar/1[0]
source[2]: BinaryOpUGen(MULTIPLICATION).ar/2[0]
source[3]: BinaryOpUGen(MULTIPLICATION).ar/3[0]
"""
)
+ "\n"
)
def test_build_synthdef_4_8():
synthdef = supriya.live.Send.build_synthdef(4, 8)
assert (
str(synthdef)
== uqbar.strings.normalize(
"""
synthdef:
name: mixer/send/4x8
ugens:
- Control.ir: null
- In.ar:
bus: Control.ir[0:in_]
- PanAz.ar/0:
amplitude: 1.0
orientation: 0.5
position: -0.25
source: In.ar[0]
width: 4.0
- PanAz.ar/1:
amplitude: 1.0
orientation: 0.5
position: 0.25
source: In.ar[1]
width: 4.0
- PanAz.ar/2:
amplitude: 1.0
orientation: 0.5
position: 0.75
source: In.ar[2]
width: 4.0
- PanAz.ar/3:
amplitude: 1.0
orientation: 0.5
position: 1.25
source: In.ar[3]
width: 4.0
- Sum4.ar/0:
input_four: PanAz.ar/3[0]
input_one: PanAz.ar/0[0]
input_three: PanAz.ar/2[0]
input_two: PanAz.ar/1[0]
- Sum4.ar/1:
input_four: PanAz.ar/3[1]
input_one: PanAz.ar/0[1]
input_three: PanAz.ar/2[1]
input_two: PanAz.ar/1[1]
- Sum4.ar/2:
input_four: PanAz.ar/3[2]
input_one: PanAz.ar/0[2]
input_three: PanAz.ar/2[2]
input_two: PanAz.ar/1[2]
- Sum4.ar/3:
input_four: PanAz.ar/3[3]
input_one: PanAz.ar/0[3]
input_three: PanAz.ar/2[3]
input_two: PanAz.ar/1[3]
- Sum4.ar/4:
input_four: PanAz.ar/3[4]
input_one: PanAz.ar/0[4]
input_three: PanAz.ar/2[4]
input_two: PanAz.ar/1[4]
- Sum4.ar/5:
input_four: PanAz.ar/3[5]
input_one: PanAz.ar/0[5]
input_three: PanAz.ar/2[5]
input_two: PanAz.ar/1[5]
- Sum4.ar/6:
input_four: PanAz.ar/3[6]
input_one: PanAz.ar/0[6]
input_three: PanAz.ar/2[6]
input_two: PanAz.ar/1[6]
- Sum4.ar/7:
input_four: PanAz.ar/3[7]
input_one: PanAz.ar/0[7]
input_three: PanAz.ar/2[7]
input_two: PanAz.ar/1[7]
- Control.kr: null
- Linen.kr/0:
attack_time: Control.kr[3:lag]
done_action: 2.0
gate: Control.kr[2:gate]
release_time: Control.kr[3:lag]
sustain_level: 1.0
- Linen.kr/1:
attack_time: Control.kr[3:lag]
done_action: 0.0
gate: Control.kr[0:active]
release_time: Control.kr[3:lag]
sustain_level: 1.0
- BinaryOpUGen(MULTIPLICATION).kr/0:
left: Linen.kr/0[0]
right: Linen.kr/1[0]
- UnaryOpUGen(DB_TO_AMPLITUDE).kr:
source: Control.kr[1:gain]
- BinaryOpUGen(GREATER_THAN).kr:
left: Control.kr[1:gain]
right: -96.0
- BinaryOpUGen(MULTIPLICATION).kr/1:
left: UnaryOpUGen(DB_TO_AMPLITUDE).kr[0]
right: BinaryOpUGen(GREATER_THAN).kr[0]
- Lag.kr:
lag_time: Control.kr[3:lag]
source: BinaryOpUGen(MULTIPLICATION).kr/1[0]
- BinaryOpUGen(MULTIPLICATION).kr/2:
left: BinaryOpUGen(MULTIPLICATION).kr/0[0]
right: Lag.kr[0]
- BinaryOpUGen(MULTIPLICATION).ar/0:
left: Sum4.ar/0[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/1:
left: Sum4.ar/1[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/2:
left: Sum4.ar/2[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/3:
left: Sum4.ar/3[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/4:
left: Sum4.ar/4[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/5:
left: Sum4.ar/5[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/6:
left: Sum4.ar/6[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/7:
left: Sum4.ar/7[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- Out.ar:
bus: Control.ir[1:out]
source[0]: BinaryOpUGen(MULTIPLICATION).ar/0[0]
source[1]: BinaryOpUGen(MULTIPLICATION).ar/1[0]
source[2]: BinaryOpUGen(MULTIPLICATION).ar/2[0]
source[3]: BinaryOpUGen(MULTIPLICATION).ar/3[0]
source[4]: BinaryOpUGen(MULTIPLICATION).ar/4[0]
source[5]: BinaryOpUGen(MULTIPLICATION).ar/5[0]
source[6]: BinaryOpUGen(MULTIPLICATION).ar/6[0]
source[7]: BinaryOpUGen(MULTIPLICATION).ar/7[0]
"""
)
+ "\n"
)
def test_build_synthdef_8_1():
synthdef = supriya.live.Send.build_synthdef(8, 1)
assert (
str(synthdef)
== uqbar.strings.normalize(
"""
synthdef:
name: mixer/send/8x1
ugens:
- Control.ir: null
- In.ar:
bus: Control.ir[0:in_]
- Sum4.ar/0:
input_four: In.ar[3]
input_one: In.ar[0]
input_three: In.ar[2]
input_two: In.ar[1]
- Sum4.ar/1:
input_four: In.ar[7]
input_one: In.ar[4]
input_three: In.ar[6]
input_two: In.ar[5]
- BinaryOpUGen(ADDITION).ar:
left: Sum4.ar/0[0]
right: Sum4.ar/1[0]
- BinaryOpUGen(FLOAT_DIVISION).ar:
left: BinaryOpUGen(ADDITION).ar[0]
right: 8.0
- Control.kr: null
- Linen.kr/0:
attack_time: Control.kr[3:lag]
done_action: 2.0
gate: Control.kr[2:gate]
release_time: Control.kr[3:lag]
sustain_level: 1.0
- Linen.kr/1:
attack_time: Control.kr[3:lag]
done_action: 0.0
gate: Control.kr[0:active]
release_time: Control.kr[3:lag]
sustain_level: 1.0
- BinaryOpUGen(MULTIPLICATION).kr/0:
left: Linen.kr/0[0]
right: Linen.kr/1[0]
- UnaryOpUGen(DB_TO_AMPLITUDE).kr:
source: Control.kr[1:gain]
- BinaryOpUGen(GREATER_THAN).kr:
left: Control.kr[1:gain]
right: -96.0
- BinaryOpUGen(MULTIPLICATION).kr/1:
left: UnaryOpUGen(DB_TO_AMPLITUDE).kr[0]
right: BinaryOpUGen(GREATER_THAN).kr[0]
- Lag.kr:
lag_time: Control.kr[3:lag]
source: BinaryOpUGen(MULTIPLICATION).kr/1[0]
- BinaryOpUGen(MULTIPLICATION).kr/2:
left: BinaryOpUGen(MULTIPLICATION).kr/0[0]
right: Lag.kr[0]
- BinaryOpUGen(MULTIPLICATION).ar:
left: BinaryOpUGen(FLOAT_DIVISION).ar[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- Out.ar:
bus: Control.ir[1:out]
source[0]: BinaryOpUGen(MULTIPLICATION).ar[0]
"""
)
+ "\n"
)
def test_build_synthdef_8_2():
synthdef = supriya.live.Send.build_synthdef(8, 2)
assert (
str(synthdef)
== uqbar.strings.normalize(
"""
synthdef:
name: mixer/send/8x2
ugens:
- Control.ir: null
- In.ar:
bus: Control.ir[0:in_]
- PanAz.ar/0:
amplitude: 0.25
orientation: 0.5
position: -0.125
source: In.ar[0]
width: 0.5
- PanAz.ar/1:
amplitude: 0.25
orientation: 0.5
position: 0.125
source: In.ar[1]
width: 0.5
- PanAz.ar/2:
amplitude: 0.25
orientation: 0.5
position: 0.375
source: In.ar[2]
width: 0.5
- PanAz.ar/3:
amplitude: 0.25
orientation: 0.5
position: 0.625
source: In.ar[3]
width: 0.5
- Sum4.ar/0:
input_four: PanAz.ar/3[0]
input_one: PanAz.ar/0[0]
input_three: PanAz.ar/2[0]
input_two: PanAz.ar/1[0]
- Sum4.ar/1:
input_four: PanAz.ar/3[1]
input_one: PanAz.ar/0[1]
input_three: PanAz.ar/2[1]
input_two: PanAz.ar/1[1]
- PanAz.ar/4:
amplitude: 0.25
orientation: 0.5
position: 0.875
source: In.ar[4]
width: 0.5
- PanAz.ar/5:
amplitude: 0.25
orientation: 0.5
position: 1.125
source: In.ar[5]
width: 0.5
- PanAz.ar/6:
amplitude: 0.25
orientation: 0.5
position: 1.375
source: In.ar[6]
width: 0.5
- PanAz.ar/7:
amplitude: 0.25
orientation: 0.5
position: 1.625
source: In.ar[7]
width: 0.5
- Sum4.ar/2:
input_four: PanAz.ar/7[0]
input_one: PanAz.ar/4[0]
input_three: PanAz.ar/6[0]
input_two: PanAz.ar/5[0]
- BinaryOpUGen(ADDITION).ar/0:
left: Sum4.ar/0[0]
right: Sum4.ar/2[0]
- Sum4.ar/3:
input_four: PanAz.ar/7[1]
input_one: PanAz.ar/4[1]
input_three: PanAz.ar/6[1]
input_two: PanAz.ar/5[1]
- BinaryOpUGen(ADDITION).ar/1:
left: Sum4.ar/1[0]
right: Sum4.ar/3[0]
- Control.kr: null
- Linen.kr/0:
attack_time: Control.kr[3:lag]
done_action: 2.0
gate: Control.kr[2:gate]
release_time: Control.kr[3:lag]
sustain_level: 1.0
- Linen.kr/1:
attack_time: Control.kr[3:lag]
done_action: 0.0
gate: Control.kr[0:active]
release_time: Control.kr[3:lag]
sustain_level: 1.0
- BinaryOpUGen(MULTIPLICATION).kr/0:
left: Linen.kr/0[0]
right: Linen.kr/1[0]
- UnaryOpUGen(DB_TO_AMPLITUDE).kr:
source: Control.kr[1:gain]
- BinaryOpUGen(GREATER_THAN).kr:
left: Control.kr[1:gain]
right: -96.0
- BinaryOpUGen(MULTIPLICATION).kr/1:
left: UnaryOpUGen(DB_TO_AMPLITUDE).kr[0]
right: BinaryOpUGen(GREATER_THAN).kr[0]
- Lag.kr:
lag_time: Control.kr[3:lag]
source: BinaryOpUGen(MULTIPLICATION).kr/1[0]
- BinaryOpUGen(MULTIPLICATION).kr/2:
left: BinaryOpUGen(MULTIPLICATION).kr/0[0]
right: Lag.kr[0]
- BinaryOpUGen(MULTIPLICATION).ar/0:
left: BinaryOpUGen(ADDITION).ar/0[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/1:
left: BinaryOpUGen(ADDITION).ar/1[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- Out.ar:
bus: Control.ir[1:out]
source[0]: BinaryOpUGen(MULTIPLICATION).ar/0[0]
source[1]: BinaryOpUGen(MULTIPLICATION).ar/1[0]
"""
)
+ "\n"
)
def test_build_synthdef_8_4():
synthdef = supriya.live.Send.build_synthdef(8, 4)
assert (
str(synthdef)
== uqbar.strings.normalize(
"""
synthdef:
name: mixer/send/8x4
ugens:
- Control.ir: null
- In.ar:
bus: Control.ir[0:in_]
- PanAz.ar/0:
amplitude: 0.5
orientation: 0.5
position: -0.125
source: In.ar[0]
width: 1.0
- PanAz.ar/1:
amplitude: 0.5
orientation: 0.5
position: 0.125
source: In.ar[1]
width: 1.0
- PanAz.ar/2:
amplitude: 0.5
orientation: 0.5
position: 0.375
source: In.ar[2]
width: 1.0
- PanAz.ar/3:
amplitude: 0.5
orientation: 0.5
position: 0.625
source: In.ar[3]
width: 1.0
- Sum4.ar/0:
input_four: PanAz.ar/3[0]
input_one: PanAz.ar/0[0]
input_three: PanAz.ar/2[0]
input_two: PanAz.ar/1[0]
- Sum4.ar/1:
input_four: PanAz.ar/3[1]
input_one: PanAz.ar/0[1]
input_three: PanAz.ar/2[1]
input_two: PanAz.ar/1[1]
- Sum4.ar/2:
input_four: PanAz.ar/3[2]
input_one: PanAz.ar/0[2]
input_three: PanAz.ar/2[2]
input_two: PanAz.ar/1[2]
- Sum4.ar/3:
input_four: PanAz.ar/3[3]
input_one: PanAz.ar/0[3]
input_three: PanAz.ar/2[3]
input_two: PanAz.ar/1[3]
- PanAz.ar/4:
amplitude: 0.5
orientation: 0.5
position: 0.875
source: In.ar[4]
width: 1.0
- PanAz.ar/5:
amplitude: 0.5
orientation: 0.5
position: 1.125
source: In.ar[5]
width: 1.0
- PanAz.ar/6:
amplitude: 0.5
orientation: 0.5
position: 1.375
source: In.ar[6]
width: 1.0
- PanAz.ar/7:
amplitude: 0.5
orientation: 0.5
position: 1.625
source: In.ar[7]
width: 1.0
- Sum4.ar/4:
input_four: PanAz.ar/7[0]
input_one: PanAz.ar/4[0]
input_three: PanAz.ar/6[0]
input_two: PanAz.ar/5[0]
- BinaryOpUGen(ADDITION).ar/0:
left: Sum4.ar/0[0]
right: Sum4.ar/4[0]
- Sum4.ar/5:
input_four: PanAz.ar/7[1]
input_one: PanAz.ar/4[1]
input_three: PanAz.ar/6[1]
input_two: PanAz.ar/5[1]
- BinaryOpUGen(ADDITION).ar/1:
left: Sum4.ar/1[0]
right: Sum4.ar/5[0]
- Sum4.ar/6:
input_four: PanAz.ar/7[2]
input_one: PanAz.ar/4[2]
input_three: PanAz.ar/6[2]
input_two: PanAz.ar/5[2]
- BinaryOpUGen(ADDITION).ar/2:
left: Sum4.ar/2[0]
right: Sum4.ar/6[0]
- Sum4.ar/7:
input_four: PanAz.ar/7[3]
input_one: PanAz.ar/4[3]
input_three: PanAz.ar/6[3]
input_two: PanAz.ar/5[3]
- BinaryOpUGen(ADDITION).ar/3:
left: Sum4.ar/3[0]
right: Sum4.ar/7[0]
- Control.kr: null
- Linen.kr/0:
attack_time: Control.kr[3:lag]
done_action: 2.0
gate: Control.kr[2:gate]
release_time: Control.kr[3:lag]
sustain_level: 1.0
- Linen.kr/1:
attack_time: Control.kr[3:lag]
done_action: 0.0
gate: Control.kr[0:active]
release_time: Control.kr[3:lag]
sustain_level: 1.0
- BinaryOpUGen(MULTIPLICATION).kr/0:
left: Linen.kr/0[0]
right: Linen.kr/1[0]
- UnaryOpUGen(DB_TO_AMPLITUDE).kr:
source: Control.kr[1:gain]
- BinaryOpUGen(GREATER_THAN).kr:
left: Control.kr[1:gain]
right: -96.0
- BinaryOpUGen(MULTIPLICATION).kr/1:
left: UnaryOpUGen(DB_TO_AMPLITUDE).kr[0]
right: BinaryOpUGen(GREATER_THAN).kr[0]
- Lag.kr:
lag_time: Control.kr[3:lag]
source: BinaryOpUGen(MULTIPLICATION).kr/1[0]
- BinaryOpUGen(MULTIPLICATION).kr/2:
left: BinaryOpUGen(MULTIPLICATION).kr/0[0]
right: Lag.kr[0]
- BinaryOpUGen(MULTIPLICATION).ar/0:
left: BinaryOpUGen(ADDITION).ar/0[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/1:
left: BinaryOpUGen(ADDITION).ar/1[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/2:
left: BinaryOpUGen(ADDITION).ar/2[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/3:
left: BinaryOpUGen(ADDITION).ar/3[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- Out.ar:
bus: Control.ir[1:out]
source[0]: BinaryOpUGen(MULTIPLICATION).ar/0[0]
source[1]: BinaryOpUGen(MULTIPLICATION).ar/1[0]
source[2]: BinaryOpUGen(MULTIPLICATION).ar/2[0]
source[3]: BinaryOpUGen(MULTIPLICATION).ar/3[0]
"""
)
+ "\n"
)
def test_build_synthdef_8_8():
synthdef = supriya.live.Send.build_synthdef(8, 8)
assert (
str(synthdef)
== uqbar.strings.normalize(
"""
synthdef:
name: mixer/send/8x8
ugens:
- Control.ir: null
- In.ar:
bus: Control.ir[0:in_]
- Control.kr: null
- Linen.kr/0:
attack_time: Control.kr[3:lag]
done_action: 2.0
gate: Control.kr[2:gate]
release_time: Control.kr[3:lag]
sustain_level: 1.0
- Linen.kr/1:
attack_time: Control.kr[3:lag]
done_action: 0.0
gate: Control.kr[0:active]
release_time: Control.kr[3:lag]
sustain_level: 1.0
- BinaryOpUGen(MULTIPLICATION).kr/0:
left: Linen.kr/0[0]
right: Linen.kr/1[0]
- UnaryOpUGen(DB_TO_AMPLITUDE).kr:
source: Control.kr[1:gain]
- BinaryOpUGen(GREATER_THAN).kr:
left: Control.kr[1:gain]
right: -96.0
- BinaryOpUGen(MULTIPLICATION).kr/1:
left: UnaryOpUGen(DB_TO_AMPLITUDE).kr[0]
right: BinaryOpUGen(GREATER_THAN).kr[0]
- Lag.kr:
lag_time: Control.kr[3:lag]
source: BinaryOpUGen(MULTIPLICATION).kr/1[0]
- BinaryOpUGen(MULTIPLICATION).kr/2:
left: BinaryOpUGen(MULTIPLICATION).kr/0[0]
right: Lag.kr[0]
- BinaryOpUGen(MULTIPLICATION).ar/0:
left: In.ar[0]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/1:
left: In.ar[1]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/2:
left: In.ar[2]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/3:
left: In.ar[3]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/4:
left: In.ar[4]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/5:
left: In.ar[5]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/6:
left: In.ar[6]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- BinaryOpUGen(MULTIPLICATION).ar/7:
left: In.ar[7]
right: BinaryOpUGen(MULTIPLICATION).kr/2[0]
- Out.ar:
bus: Control.ir[1:out]
source[0]: BinaryOpUGen(MULTIPLICATION).ar/0[0]
source[1]: BinaryOpUGen(MULTIPLICATION).ar/1[0]
source[2]: BinaryOpUGen(MULTIPLICATION).ar/2[0]
source[3]: BinaryOpUGen(MULTIPLICATION).ar/3[0]
source[4]: BinaryOpUGen(MULTIPLICATION).ar/4[0]
source[5]: BinaryOpUGen(MULTIPLICATION).ar/5[0]
source[6]: BinaryOpUGen(MULTIPLICATION).ar/6[0]
source[7]: BinaryOpUGen(MULTIPLICATION).ar/7[0]
"""
)
+ "\n"
)
|
|
#!/usr/bin/env python
"""
BuildconfigurationsetsApi.py
Copyright 2015 Reverb Technologies, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually.
"""
import sys
import os
import urllib
from models import *
class BuildconfigurationsetsApi(object):
def __init__(self, apiClient):
self.apiClient = apiClient
def getAll(self, **kwargs):
"""Gets all Build Configuration Sets
Args:
pageIndex, int: Page index (required)
pageSize, int: Pagination size (required)
sort, str: Sorting RSQL (required)
q, str: RSQL query (required)
Returns:
"""
allParams = ['pageIndex', 'pageSize', 'sort', 'q']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getAll" % key)
params[key] = val
del params['kwargs']
resourcePath = '/build-configuration-sets'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
if ('pageIndex' in params):
queryParams['pageIndex'] = self.apiClient.toPathValue(params['pageIndex'])
if ('pageSize' in params):
queryParams['pageSize'] = self.apiClient.toPathValue(params['pageSize'])
if ('sort' in params):
queryParams['sort'] = self.apiClient.toPathValue(params['sort'])
if ('q' in params):
queryParams['q'] = self.apiClient.toPathValue(params['q'])
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
return response
def createNew(self, **kwargs):
"""Creates a new Build Configuration Set
Args:
body, BuildConfigurationSet: (required)
Returns:
"""
allParams = ['body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method createNew" % key)
params[key] = val
del params['kwargs']
resourcePath = '/build-configuration-sets'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'POST'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
if ('body' in params):
bodyParam = params['body']
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
return response
def getSpecific(self, **kwargs):
"""Gets a specific Build Configuration Set
Args:
id, int: Build Configuration Set id (required)
Returns:
"""
allParams = ['id']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getSpecific" % key)
params[key] = val
del params['kwargs']
resourcePath = '/build-configuration-sets/{id}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
if ('id' in params):
replacement = str(self.apiClient.toPathValue(params['id']))
replacement = urllib.quote(replacement)
resourcePath = resourcePath.replace('{' + 'id' + '}',
replacement)
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
return response
def update(self, **kwargs):
"""Updates an existing Build Configuration Set
Args:
id, int: Build Configuration Set id (required)
body, BuildConfigurationSet: (required)
Returns:
"""
allParams = ['id', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method update" % key)
params[key] = val
del params['kwargs']
resourcePath = '/build-configuration-sets/{id}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'PUT'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
if ('id' in params):
replacement = str(self.apiClient.toPathValue(params['id']))
replacement = urllib.quote(replacement)
resourcePath = resourcePath.replace('{' + 'id' + '}',
replacement)
if ('body' in params):
bodyParam = params['body']
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
return response
def deleteSpecific(self, **kwargs):
"""Removes a specific Build Configuration Set
Args:
id, int: Build Configuration Set id (required)
Returns:
"""
allParams = ['id']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method deleteSpecific" % key)
params[key] = val
del params['kwargs']
resourcePath = '/build-configuration-sets/{id}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'DELETE'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
if ('id' in params):
replacement = str(self.apiClient.toPathValue(params['id']))
replacement = urllib.quote(replacement)
resourcePath = resourcePath.replace('{' + 'id' + '}',
replacement)
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
return response
def build(self, **kwargs):
"""Builds the Configurations for the Specified Set
Args:
id, int: Build Configuration Set id (required)
Returns:
"""
allParams = ['id']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method build" % key)
params[key] = val
del params['kwargs']
resourcePath = '/build-configuration-sets/{id}/build'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'POST'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
if ('id' in params):
replacement = str(self.apiClient.toPathValue(params['id']))
replacement = urllib.quote(replacement)
resourcePath = resourcePath.replace('{' + 'id' + '}',
replacement)
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
return response
def getConfigurations(self, **kwargs):
"""Gets the Configurations for the Specified Set
Args:
id, int: Build Configuration Set id (required)
Returns:
"""
allParams = ['id']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getConfigurations" % key)
params[key] = val
del params['kwargs']
resourcePath = '/build-configuration-sets/{id}/build-configurations'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
if ('id' in params):
replacement = str(self.apiClient.toPathValue(params['id']))
replacement = urllib.quote(replacement)
resourcePath = resourcePath.replace('{' + 'id' + '}',
replacement)
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
return response
def addConfiguration(self, **kwargs):
"""Adds a configuration to the Specified Set
Args:
id, int: Build Configuration Set id (required)
body, Configuration: (required)
Returns:
"""
allParams = ['id', 'body']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method addConfiguration" % key)
params[key] = val
del params['kwargs']
resourcePath = '/build-configuration-sets/{id}/build-configurations'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'POST'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
if ('id' in params):
replacement = str(self.apiClient.toPathValue(params['id']))
replacement = urllib.quote(replacement)
resourcePath = resourcePath.replace('{' + 'id' + '}',
replacement)
if ('body' in params):
bodyParam = params['body']
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
return response
def addConfiguration(self, **kwargs):
"""Removes a configuration from the specified config set
Args:
id, int: Build configuration set id (required)
configId, int: Build configuration id (required)
Returns:
"""
allParams = ['id', 'configId']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method addConfiguration" % key)
params[key] = val
del params['kwargs']
resourcePath = '/build-configuration-sets/{id}/build-configurations/{configId}'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'DELETE'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
if ('id' in params):
replacement = str(self.apiClient.toPathValue(params['id']))
replacement = urllib.quote(replacement)
resourcePath = resourcePath.replace('{' + 'id' + '}',
replacement)
if ('configId' in params):
replacement = str(self.apiClient.toPathValue(params['configId']))
replacement = urllib.quote(replacement)
resourcePath = resourcePath.replace('{' + 'configId' + '}',
replacement)
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
return response
def getBuildRecords(self, **kwargs):
"""Gets all build records associated with the contained build configurations
Args:
id, int: Build configuration set id (required)
pageIndex, int: Page index (required)
pageSize, int: Pagination size (required)
sort, str: Sorting RSQL (required)
q, str: RSQL query (required)
Returns:
"""
allParams = ['id', 'pageIndex', 'pageSize', 'sort', 'q']
params = locals()
for (key, val) in params['kwargs'].iteritems():
if key not in allParams:
raise TypeError("Got an unexpected keyword argument '%s' to method getBuildRecords" % key)
params[key] = val
del params['kwargs']
resourcePath = '/build-configuration-sets/{id}/build-records'
resourcePath = resourcePath.replace('{format}', 'json')
method = 'GET'
queryParams = {}
headerParams = {}
formParams = {}
files = {}
bodyParam = None
if ('pageIndex' in params):
queryParams['pageIndex'] = self.apiClient.toPathValue(params['pageIndex'])
if ('pageSize' in params):
queryParams['pageSize'] = self.apiClient.toPathValue(params['pageSize'])
if ('sort' in params):
queryParams['sort'] = self.apiClient.toPathValue(params['sort'])
if ('q' in params):
queryParams['q'] = self.apiClient.toPathValue(params['q'])
if ('id' in params):
replacement = str(self.apiClient.toPathValue(params['id']))
replacement = urllib.quote(replacement)
resourcePath = resourcePath.replace('{' + 'id' + '}',
replacement)
postData = (formParams if formParams else bodyParam)
response = self.apiClient.callAPI(resourcePath, method, queryParams,
postData, headerParams, files=files)
return response
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for matrix factorization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numbers
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.factorization.python.ops import gen_factorization_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import resource_loader
_factorization_ops = loader.load_op_library(
resource_loader.get_path_to_datafile("_factorization_ops.so"))
class WALSModel(object):
r"""A model for Weighted Alternating Least Squares matrix factorization.
It minimizes the following loss function over U, V:
$$
\|\sqrt W \odot (A - U V^T)\|_F^2 + \lambda (\|U\|_F^2 + \|V\|_F^2)
$$
where,
A: input matrix,
W: weight matrix. Note that the (element-wise) square root of the weights
is used in the objective function.
U, V: row_factors and column_factors matrices,
\\(\lambda)\\: regularization.
Also we assume that W is of the following special form:
\\( W_{ij} = W_0 + R_i * C_j \\) if \\(A_{ij} \ne 0\\),
\\(W_{ij} = W_0\\) otherwise.
where,
\\(W_0\\): unobserved_weight,
\\(R_i\\): row_weights,
\\(C_j\\): col_weights.
Note that the current implementation supports two operation modes: The default
mode is for the condition where row_factors and col_factors can individually
fit into the memory of each worker and these will be cached. When this
condition can't be met, setting use_factors_weights_cache to False allows the
larger problem sizes with slight performance penalty as this will avoid
creating the worker caches and instead the relevant weight and factor values
are looked up from parameter servers at each step.
Loss computation: The loss can be computed efficiently by decomposing it into
a sparse term and a Gramian term, see wals.md.
The loss is returned by the update_{col, row}_factors(sp_input), and is
normalized as follows:
_, _, unregularized_loss, regularization, sum_weights =
update_row_factors(sp_input)
if sp_input contains the rows \\({A_i, i \in I}\\), and the input matrix A
has n total rows, then the minibatch loss = unregularized_loss +
regularization is
$$
(\|\sqrt W_I \odot (A_I - U_I V^T)\|_F^2 + \lambda \|U_I\|_F^2) * n / |I| +
\lambda \|V\|_F^2
$$
The sum_weights tensor contains the normalized sum of weights
\\(sum(W_I) * n / |I|\\).
A typical usage example (pseudocode):
with tf.Graph().as_default():
# Set up the model object.
model = tf.contrib.factorization.WALSModel(....)
# To be run only once as part of session initialization. In distributed
# training setting, this should only be run by the chief trainer and all
# other trainers should block until this is done.
model_init_op = model.initialize_op
# To be run once per worker after session is available, prior to
# the prep_gramian_op for row(column) can be run.
worker_init_op = model.worker_init
# To be run once per iteration sweep before the row(column) update
# initialize ops can be run. Note that in the distributed training
# situations, this should only be run by the chief trainer. All other
# trainers need to block until this is done.
row_update_prep_gramian_op = model.row_update_prep_gramian_op
col_update_prep_gramian_op = model.col_update_prep_gramian_op
# To be run once per worker per iteration sweep. Must be run before
# any actual update ops can be run.
init_row_update_op = model.initialize_row_update_op
init_col_update_op = model.initialize_col_update_op
# Ops to update row(column). This can either take the entire sparse
# tensor or slices of sparse tensor. For distributed trainer, each
# trainer handles just part of the matrix.
_, row_update_op, unreg_row_loss, row_reg, _ = model.update_row_factors(
sp_input=matrix_slices_from_queue_for_worker_shard)
row_loss = unreg_row_loss + row_reg
_, col_update_op, unreg_col_loss, col_reg, _ = model.update_col_factors(
sp_input=transposed_matrix_slices_from_queue_for_worker_shard,
transpose_input=True)
col_loss = unreg_col_loss + col_reg
...
# model_init_op is passed to Supervisor. Chief trainer runs it. Other
# trainers wait.
sv = tf.train.Supervisor(is_chief=is_chief,
...,
init_op=tf.group(..., model_init_op, ...), ...)
...
with sv.managed_session(...) as sess:
# All workers/trainers run it after session becomes available.
worker_init_op.run(session=sess)
...
while i in iterations:
# All trainers need to sync up here.
while not_all_ready:
wait
# Row update sweep.
if is_chief:
row_update_prep_gramian_op.run(session=sess)
else:
wait_for_chief
# All workers run upate initialization.
init_row_update_op.run(session=sess)
# Go through the matrix.
reset_matrix_slices_queue_for_worker_shard
while_matrix_slices:
row_update_op.run(session=sess)
# All trainers need to sync up here.
while not_all_ready:
wait
# Column update sweep.
if is_chief:
col_update_prep_gramian_op.run(session=sess)
else:
wait_for_chief
# All workers run upate initialization.
init_col_update_op.run(session=sess)
# Go through the matrix.
reset_transposed_matrix_slices_queue_for_worker_shard
while_transposed_matrix_slices:
col_update_op.run(session=sess)
"""
def __init__(self,
input_rows,
input_cols,
n_components,
unobserved_weight=0.1,
regularization=None,
row_init="random",
col_init="random",
num_row_shards=1,
num_col_shards=1,
row_weights=1,
col_weights=1,
use_factors_weights_cache=True,
use_gramian_cache=True):
"""Creates model for WALS matrix factorization.
Args:
input_rows: total number of rows for input matrix.
input_cols: total number of cols for input matrix.
n_components: number of dimensions to use for the factors.
unobserved_weight: weight given to unobserved entries of matrix.
regularization: weight of L2 regularization term. If None, no
regularization is done.
row_init: initializer for row factor. Can be a tensor or numpy constant.
If set to "random", the value is initialized randomly.
col_init: initializer for column factor. See row_init for details.
num_row_shards: number of shards to use for row factors.
num_col_shards: number of shards to use for column factors.
row_weights: Must be in one of the following three formats: None, a list
of lists of non-negative real numbers (or equivalent iterables) or a
single non-negative real number.
- When set to None, w_ij = unobserved_weight, which simplifies to ALS.
Note that col_weights must also be set to "None" in this case.
- If it is a list of lists of non-negative real numbers, it needs to be
in the form of [[w_0, w_1, ...], [w_k, ... ], [...]], with the number of
inner lists matching the number of row factor shards and the elements in
each inner list are the weights for the rows of the corresponding row
factor shard. In this case, w_ij = unobserved_weight +
row_weights[i] * col_weights[j].
- If this is a single non-negative real number, this value is used for
all row weights and \\(w_ij\\) = unobserved_weight + row_weights *
col_weights[j].
Note that it is allowed to have row_weights as a list while col_weights
a single number or vice versa.
col_weights: See row_weights.
use_factors_weights_cache: When True, the factors and weights will be
cached on the workers before the updates start. Defaults to True. Note
that the weights cache is initialized through `worker_init`, and the
row/col factors cache is initialized through
`initialize_{col/row}_update_op`. In the case where the weights are
computed outside and set before the training iterations start, it is
important to ensure the `worker_init` op is run afterwards for the
weights cache to take effect.
use_gramian_cache: When True, the Gramians will be cached on the workers
before the updates start. Defaults to True.
"""
self._input_rows = input_rows
self._input_cols = input_cols
self._num_row_shards = num_row_shards
self._num_col_shards = num_col_shards
self._n_components = n_components
self._unobserved_weight = unobserved_weight
self._regularization = regularization
self._regularization_matrix = (
regularization * linalg_ops.eye(self._n_components)
if regularization is not None else None)
assert (row_weights is None) == (col_weights is None)
self._row_weights = WALSModel._create_weights(
row_weights, self._input_rows, self._num_row_shards, "row_weights")
self._col_weights = WALSModel._create_weights(
col_weights, self._input_cols, self._num_col_shards, "col_weights")
self._use_factors_weights_cache = use_factors_weights_cache
self._use_gramian_cache = use_gramian_cache
self._row_factors = self._create_factors(
self._input_rows, self._n_components, self._num_row_shards, row_init,
"row_factors")
self._col_factors = self._create_factors(
self._input_cols, self._n_components, self._num_col_shards, col_init,
"col_factors")
self._row_gramian = self._create_gramian(self._n_components, "row_gramian")
self._col_gramian = self._create_gramian(self._n_components, "col_gramian")
with ops.name_scope("row_prepare_gramian"):
self._row_update_prep_gramian = self._prepare_gramian(
self._col_factors, self._col_gramian)
with ops.name_scope("col_prepare_gramian"):
self._col_update_prep_gramian = self._prepare_gramian(
self._row_factors, self._row_gramian)
with ops.name_scope("transient_vars"):
self._create_transient_vars()
@property
def row_factors(self):
"""Returns a list of tensors corresponding to row factor shards."""
return self._row_factors
@property
def col_factors(self):
"""Returns a list of tensors corresponding to column factor shards."""
return self._col_factors
@property
def row_weights(self):
"""Returns a list of tensors corresponding to row weight shards."""
return self._row_weights
@property
def col_weights(self):
"""Returns a list of tensors corresponding to col weight shards."""
return self._col_weights
@property
def initialize_op(self):
"""Returns an op for initializing tensorflow variables."""
all_vars = self._row_factors + self._col_factors
all_vars.extend([self._row_gramian, self._col_gramian])
if self._row_weights is not None:
assert self._col_weights is not None
all_vars.extend(self._row_weights + self._col_weights)
return variables.variables_initializer(all_vars)
@classmethod
def _shard_sizes(cls, dims, num_shards):
"""Helper function to split dims values into num_shards."""
shard_size, residual = divmod(dims, num_shards)
return [shard_size + 1] * residual + [shard_size] * (num_shards - residual)
@classmethod
def _create_factors(cls, rows, cols, num_shards, init, name):
"""Helper function to create row and column factors."""
with ops.name_scope(name):
if callable(init):
init = init()
if isinstance(init, list):
assert len(init) == num_shards
elif isinstance(init, str) and init == "random":
pass
elif num_shards == 1:
init = [init]
sharded_matrix = []
sizes = cls._shard_sizes(rows, num_shards)
assert len(sizes) == num_shards
def make_initializer(i, size):
def initializer():
if init == "random":
return random_ops.random_normal([size, cols])
else:
return init[i]
return initializer
for i, size in enumerate(sizes):
var_name = "%s_shard_%d" % (name, i)
var_init = make_initializer(i, size)
sharded_matrix.append(
variable_scope.variable(
var_init, dtype=dtypes.float32, name=var_name))
return sharded_matrix
@classmethod
def _create_weights(cls, wt_init, num_wts, num_shards, name):
"""Helper function to create sharded weight vector.
Args:
wt_init: init value for the weight. If None, weights are not created. This
can be one of the None, a list of non-negative real numbers or a single
non-negative real number (or equivalent iterables).
num_wts: total size of all the weight shards
num_shards: number of shards for the weights
name: name for the new Variables.
Returns:
A list of weight shard Tensors.
Raises:
ValueError: If wt_init is not the right format.
"""
if wt_init is None:
return None
init_mode = "list"
if isinstance(wt_init, collections.Iterable):
if num_shards == 1 and len(wt_init) == num_wts:
wt_init = [wt_init]
assert len(wt_init) == num_shards
elif isinstance(wt_init, numbers.Real) and wt_init >= 0:
init_mode = "scalar"
else:
raise ValueError(
"Invalid weight initialization argument. Must be one of these: "
"None, a real non-negative real number, or a list of lists of "
"non-negative real numbers (or equivalent iterables) corresponding "
"to sharded factors.")
sizes = cls._shard_sizes(num_wts, num_shards)
assert len(sizes) == num_shards
with ops.name_scope(name):
def make_wt_initializer(i, size):
def initializer():
if init_mode == "scalar":
return wt_init * array_ops.ones([size])
else:
return wt_init[i]
return initializer
sharded_weight = []
for i, size in enumerate(sizes):
var_name = "%s_shard_%d" % (name, i)
var_init = make_wt_initializer(i, size)
sharded_weight.append(
variable_scope.variable(
var_init, dtype=dtypes.float32, name=var_name))
return sharded_weight
@staticmethod
def _create_gramian(n_components, name):
"""Helper function to create the gramian variable.
Args:
n_components: number of dimensions of the factors from which the gramian
will be calculated.
name: name for the new Variables.
Returns:
A gramian Tensor with shape of [n_components, n_components].
"""
return variable_scope.variable(
array_ops.zeros([n_components, n_components]),
dtype=dtypes.float32,
name=name)
@staticmethod
def _transient_var(name):
"""Helper function to create a Variable."""
return variable_scope.variable(
1.0,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
validate_shape=False,
name=name)
def _prepare_gramian(self, factors, gramian):
"""Helper function to create ops to prepare/calculate gramian.
Args:
factors: Variable or list of Variable representing (sharded) factors.
Used to compute the updated corresponding gramian value.
gramian: Variable storing the gramian calculated from the factors.
Returns:
An op that updates the gramian with the calculated value from the factors.
"""
partial_gramians = []
for f in factors:
with ops.colocate_with(f):
partial_gramians.append(math_ops.matmul(f, f, transpose_a=True))
with ops.colocate_with(gramian):
prep_gramian = state_ops.assign(gramian,
math_ops.add_n(partial_gramians)).op
return prep_gramian
def _cached_copy(self, var, name, pass_through=False):
"""Helper function to create a worker cached copy of a Variable.
This assigns the var (either a single Variable or a list of Variables) to
local transient cache Variable(s). Note that if var is a list of Variables,
the assignment is done sequentially to minimize the memory overheads.
Also note that if pass_through is set to True, this does not create new
Variables but simply return the input back.
Args:
var: A Variable or a list of Variables to cache.
name: name of cached Variable.
pass_through: when set to True, this simply pass through the var back
through identity operator and does not actually creates a cache.
Returns:
Tuple consisting of following three entries:
cache: the new transient Variable or list of transient Variables
corresponding one-to-one with var.
cache_init: op to initialize the Variable or the list of Variables.
cache_reset: op to reset the Variable or the list of Variables to some
default value.
"""
if var is None:
return None, None, None
elif pass_through:
cache = var
cache_init = control_flow_ops.no_op()
cache_reset = control_flow_ops.no_op()
elif isinstance(var, variables.Variable):
cache = WALSModel._transient_var(name=name)
with ops.colocate_with(cache):
cache_init = state_ops.assign(cache, var, validate_shape=False)
cache_reset = state_ops.assign(cache, 1.0, validate_shape=False)
else:
assert isinstance(var, list)
assert var
cache = [
WALSModel._transient_var(name="%s_shard_%d" % (name, i))
for i in xrange(len(var))
]
reset_ops = []
for i, c in enumerate(cache):
with ops.colocate_with(c):
if i == 0:
cache_init = state_ops.assign(c, var[i], validate_shape=False)
else:
with ops.control_dependencies([cache_init]):
cache_init = state_ops.assign(c, var[i], validate_shape=False)
reset_ops.append(state_ops.assign(c, 1.0, validate_shape=False))
cache_reset = control_flow_ops.group(*reset_ops)
return cache, cache_init, cache_reset
def _create_transient_vars(self):
"""Creates local cache of factors, weights and gramian for rows and columns.
Note that currently the caching strategy is as follows:
When initiating a row (resp. column) update:
- The column (resp. row) gramian is computed.
- Optionally, if use_gramian_cache is True, the column (resp. row) Gramian
is cached, while the row (resp. column) gramian is reset.
- Optionally, if use_factors_weights_cache is True, the column (resp. row)
factors and weights are cached, while the row (resp. column) factors and
weights are reset.
"""
(self._row_factors_cache, row_factors_cache_init,
row_factors_cache_reset) = self._cached_copy(
self._row_factors,
"row_factors_cache",
pass_through=not self._use_factors_weights_cache)
(self._col_factors_cache, col_factors_cache_init,
col_factors_cache_reset) = self._cached_copy(
self._col_factors,
"col_factors_cache",
pass_through=not self._use_factors_weights_cache)
(self._row_wt_cache, row_wt_cache_init, _) = self._cached_copy(
self._row_weights,
"row_wt_cache",
pass_through=not self._use_factors_weights_cache)
(self._col_wt_cache, col_wt_cache_init, _) = self._cached_copy(
self._col_weights,
"col_wt_cache",
pass_through=not self._use_factors_weights_cache)
(self._row_gramian_cache, row_gramian_cache_init,
row_gramian_cache_reset) = self._cached_copy(
self._row_gramian,
"row_gramian_cache",
pass_through=not self._use_gramian_cache)
(self._col_gramian_cache, col_gramian_cache_init,
col_gramian_cache_reset) = self._cached_copy(
self._col_gramian,
"col_gramian_cache",
pass_through=not self._use_gramian_cache)
self._row_updates_init = control_flow_ops.group(
col_factors_cache_init, row_factors_cache_reset, col_gramian_cache_init,
row_gramian_cache_reset)
self._col_updates_init = control_flow_ops.group(
row_factors_cache_init, col_factors_cache_reset, row_gramian_cache_init,
col_gramian_cache_reset)
if self._row_wt_cache is not None:
assert self._col_wt_cache is not None
self._worker_init = control_flow_ops.group(
row_wt_cache_init, col_wt_cache_init, name="worker_init")
else:
self._worker_init = control_flow_ops.no_op(name="worker_init")
@property
def worker_init(self):
"""Op to initialize worker state once before starting any updates.
Note that specifically this initializes the cache of the row and column
weights on workers when `use_factors_weights_cache` is True. In this case,
if these weights are being calculated and reset after the object is created,
it is important to ensure this ops is run afterwards so the cache reflects
the correct values.
"""
return self._worker_init
@property
def row_update_prep_gramian_op(self):
"""Op to form the gramian before starting row updates.
Must be run before initialize_row_update_op and should only be run by one
trainer (usually the chief) when doing distributed training.
Returns:
Op to form the gramian.
"""
return self._row_update_prep_gramian
@property
def col_update_prep_gramian_op(self):
"""Op to form the gramian before starting col updates.
Must be run before initialize_col_update_op and should only be run by one
trainer (usually the chief) when doing distributed training.
Returns:
Op to form the gramian.
"""
return self._col_update_prep_gramian
@property
def initialize_row_update_op(self):
"""Op to initialize worker state before starting row updates."""
return self._row_updates_init
@property
def initialize_col_update_op(self):
"""Op to initialize worker state before starting column updates."""
return self._col_updates_init
@staticmethod
def _get_sharding_func(size, num_shards):
"""Create sharding function for scatter update."""
def func(ids):
if num_shards == 1:
return None, ids
else:
ids_per_shard = size // num_shards
extras = size % num_shards
assignments = math_ops.maximum(ids // (ids_per_shard + 1),
(ids - extras) // ids_per_shard)
new_ids = array_ops.where(assignments < extras,
ids % (ids_per_shard + 1),
(ids - extras) % ids_per_shard)
return assignments, new_ids
return func
@classmethod
def scatter_update(cls, factor, indices, values, sharding_func, name=None):
"""Helper function for doing sharded scatter update."""
assert isinstance(factor, list)
if len(factor) == 1:
with ops.colocate_with(factor[0]):
# TODO(agarwal): assign instead of scatter update for full batch update.
return state_ops.scatter_update(
factor[0], indices, values, name=name).op
else:
num_shards = len(factor)
assignments, new_ids = sharding_func(indices)
assert assignments is not None
assignments = math_ops.cast(assignments, dtypes.int32)
sharded_ids = data_flow_ops.dynamic_partition(new_ids, assignments,
num_shards)
sharded_values = data_flow_ops.dynamic_partition(values, assignments,
num_shards)
updates = []
for i in xrange(num_shards):
updates.append(
state_ops.scatter_update(factor[i], sharded_ids[i], sharded_values[
i]))
return control_flow_ops.group(*updates, name=name)
def update_row_factors(self, sp_input=None, transpose_input=False):
r"""Updates the row factors.
Args:
sp_input: A SparseTensor representing a subset of rows of the full input
in any order. Please note that this SparseTensor must retain the
indexing as the original input.
transpose_input: If true, the input will be logically transposed and the
rows corresponding to the transposed input are updated.
Returns:
A tuple consisting of the following elements:
new_values: New values for the row factors.
update_op: An op that assigns the newly computed values to the row
factors.
unregularized_loss: A tensor (scalar) that contains the normalized
minibatch loss corresponding to sp_input, without the regularization
term. If sp_input contains the rows \\({A_{i, :}, i \in I}\\), and the
input matrix A has n total rows, then the unregularized loss is:
\\(\|\sqrt W_I \odot (A_I - U_I V^T)\|_F^2 * n / |I|\\)
The total loss is unregularized_loss + regularization.
regularization: A tensor (scalar) that contains the normalized
regularization term for the minibatch loss corresponding to sp_input.
If sp_input contains the rows \\({A_{i, :}, i \in I}\\), and the input
matrix A has n total rows, then the regularization term is:
\\(\lambda \|U_I\|_F^2) * n / |I| + \lambda \|V\|_F^2\\).
sum_weights: The sum of the weights W_I corresponding to sp_input,
normalized by a factor of \\(n / |I|\\). The root weighted squared
error is: \sqrt(unregularized_loss / sum_weights).
"""
return self._process_input_helper(
True, sp_input=sp_input, transpose_input=transpose_input)
def update_col_factors(self, sp_input=None, transpose_input=False):
r"""Updates the column factors.
Args:
sp_input: A SparseTensor representing a subset of columns of the full
input. Please refer to comments for update_row_factors for
restrictions.
transpose_input: If true, the input will be logically transposed and the
columns corresponding to the transposed input are updated.
Returns:
A tuple consisting of the following elements:
new_values: New values for the column factors.
update_op: An op that assigns the newly computed values to the column
factors.
unregularized_loss: A tensor (scalar) that contains the normalized
minibatch loss corresponding to sp_input, without the regularization
term. If sp_input contains the columns \\({A_{:, j}, j \in J}\\), and
the input matrix A has m total columns, then the unregularized loss is:
\\(\|\sqrt W_J \odot (A_J - U V_J^T)\|_F^2 * m / |I|\\)
The total loss is unregularized_loss + regularization.
regularization: A tensor (scalar) that contains the normalized
regularization term for the minibatch loss corresponding to sp_input.
If sp_input contains the columns \\({A_{:, j}, j \in J}\\), and the
input matrix A has m total columns, then the regularization term is:
\\(\lambda \|V_J\|_F^2) * m / |J| + \lambda \|U\|_F^2\\).
sum_weights: The sum of the weights W_J corresponding to sp_input,
normalized by a factor of \\(m / |J|\\). The root weighted squared
error is: \sqrt(unregularized_loss / sum_weights).
"""
return self._process_input_helper(
False, sp_input=sp_input, transpose_input=transpose_input)
def project_row_factors(self,
sp_input=None,
transpose_input=False,
projection_weights=None):
"""Projects the row factors.
This computes the row embedding \\(u_i\\) for an observed row \\(a_i\\) by
solving one iteration of the update equations.
Args:
sp_input: A SparseTensor representing a set of rows. Please note that the
column indices of this SparseTensor must match the model column feature
indexing while the row indices are ignored. The returned results will be
in the same ordering as the input rows.
transpose_input: If true, the input will be logically transposed and the
rows corresponding to the transposed input are projected.
projection_weights: The row weights to be used for the projection. If None
then 1.0 is used. This can be either a scaler or a rank-1 tensor with
the number of elements matching the number of rows to be projected.
Note that the column weights will be determined by the underlying WALS
model.
Returns:
Projected row factors.
"""
if projection_weights is None:
projection_weights = 1
return self._process_input_helper(
True,
sp_input=sp_input,
transpose_input=transpose_input,
row_weights=projection_weights)[0]
def project_col_factors(self,
sp_input=None,
transpose_input=False,
projection_weights=None):
"""Projects the column factors.
This computes the column embedding \\(v_j\\) for an observed column
\\(a_j\\) by solving one iteration of the update equations.
Args:
sp_input: A SparseTensor representing a set of columns. Please note that
the row indices of this SparseTensor must match the model row feature
indexing while the column indices are ignored. The returned results will
be in the same ordering as the input columns.
transpose_input: If true, the input will be logically transposed and the
columns corresponding to the transposed input are projected.
projection_weights: The column weights to be used for the projection. If
None then 1.0 is used. This can be either a scaler or a rank-1 tensor
with the number of elements matching the number of columns to be
projected. Note that the row weights will be determined by the
underlying WALS model.
Returns:
Projected column factors.
"""
if projection_weights is None:
projection_weights = 1
return self._process_input_helper(
False,
sp_input=sp_input,
transpose_input=transpose_input,
row_weights=projection_weights)[0]
def _process_input_helper(self,
update_row_factors,
sp_input=None,
transpose_input=False,
row_weights=None):
"""Creates the graph for processing a sparse slice of input.
Args:
update_row_factors: if True, update or project the row_factors, else
update or project the column factors.
sp_input: Please refer to comments for update_row_factors,
update_col_factors, project_row_factors, and project_col_factors for
restrictions.
transpose_input: If True, the input is logically transposed and then the
corresponding rows/columns of the transposed input are updated.
row_weights: If not None, this is the row/column weights to be used for
the update or projection. If None, use the corresponding weights from
the model. Note that the feature (column/row) weights will be
determined by the model. When not None, it can either be a scalar or
a rank-1 tensor with the same number of elements as the number of rows
of columns to be updated/projected.
Returns:
A tuple consisting of the following elements:
new_values: New values for the row/column factors.
update_op: An op that assigns the newly computed values to the row/column
factors.
unregularized_loss: A tensor (scalar) that contains the normalized
minibatch loss corresponding to sp_input, without the regularization
term. Add the regularization term below to yield the loss.
regularization: A tensor (scalar) that contains the normalized
regularization term for the minibatch loss corresponding to sp_input.
sum_weights: The sum of the weights corresponding to sp_input. This
can be used with unregularized loss to calculate the root weighted
squared error.
"""
assert isinstance(sp_input, sparse_tensor.SparseTensor)
if update_row_factors:
left = self._row_factors
right_factors = self._col_factors_cache
row_wt = self._row_wt_cache
col_wt = self._col_wt_cache
total_rows = self._input_rows
total_cols = self._input_cols
sharding_func = WALSModel._get_sharding_func(self._input_rows,
self._num_row_shards)
gramian = self._col_gramian_cache
else:
left = self._col_factors
right_factors = self._row_factors_cache
row_wt = self._col_wt_cache
col_wt = self._row_wt_cache
total_rows = self._input_cols
total_cols = self._input_rows
sharding_func = WALSModel._get_sharding_func(self._input_cols,
self._num_col_shards)
gramian = self._row_gramian_cache
transpose_input = not transpose_input
# Note that the row indices of sp_input are based on the original full input
# Here we reindex the rows and give them contiguous ids starting at 0.
# We use tf.unique to achieve this reindexing. Note that this is done so
# that the downstream kernel can assume that the input is "dense" along the
# row dimension.
row_ids, col_ids = array_ops.split(
value=sp_input.indices, num_or_size_splits=2, axis=1)
update_row_indices, all_row_ids = array_ops.unique(row_ids[:, 0])
update_col_indices, all_col_ids = array_ops.unique(col_ids[:, 0])
col_ids = array_ops.expand_dims(math_ops.cast(all_col_ids, dtypes.int64), 1)
row_ids = array_ops.expand_dims(math_ops.cast(all_row_ids, dtypes.int64), 1)
if transpose_input:
update_indices = update_col_indices
row_shape = [
math_ops.cast(array_ops.shape(update_row_indices)[0], dtypes.int64)
]
gather_indices = update_row_indices
else:
update_indices = update_row_indices
row_shape = [
math_ops.cast(array_ops.shape(update_col_indices)[0], dtypes.int64)
]
gather_indices = update_col_indices
num_rows = math_ops.cast(array_ops.shape(update_indices)[0], dtypes.int64)
col_shape = [num_rows]
right = embedding_ops.embedding_lookup(
right_factors, gather_indices, partition_strategy="div")
new_sp_indices = array_ops.concat([row_ids, col_ids], 1)
new_sp_shape = (array_ops.concat([row_shape, col_shape], 0)
if transpose_input else
array_ops.concat([col_shape, row_shape], 0))
new_sp_input = sparse_tensor.SparseTensor(
indices=new_sp_indices,
values=sp_input.values,
dense_shape=new_sp_shape)
# Compute lhs and rhs of the normal equations
total_lhs = (self._unobserved_weight * gramian)
if self._regularization_matrix is not None:
total_lhs += self._regularization_matrix
if self._row_weights is None:
# Special case of ALS. Use a much simpler update rule.
total_rhs = (
self._unobserved_weight * sparse_ops.sparse_tensor_dense_matmul(
new_sp_input, right, adjoint_a=transpose_input))
# TODO(rmlarsen): handle transposing in tf.matrix_solve instead of
# transposing explicitly.
# TODO(rmlarsen): multi-thread tf.matrix_solve.
new_left_values = array_ops.transpose(
linalg_ops.matrix_solve(total_lhs, array_ops.transpose(total_rhs)))
else:
if row_weights is None:
# TODO(yifanchen): Add special handling for single shard without using
# embedding_lookup and perform benchmarks for those cases. Same for
# col_weights lookup below.
row_weights_slice = embedding_ops.embedding_lookup(
row_wt, update_indices, partition_strategy="div")
else:
num_indices = array_ops.shape(update_indices)[0]
with ops.control_dependencies(
[check_ops.assert_less_equal(array_ops.rank(row_weights), 1)]):
row_weights_slice = control_flow_ops.cond(
math_ops.equal(array_ops.rank(row_weights), 0),
lambda: (array_ops.ones([num_indices]) * row_weights),
lambda: math_ops.cast(row_weights, dtypes.float32))
col_weights = embedding_ops.embedding_lookup(
col_wt, gather_indices, partition_strategy="div")
partial_lhs, total_rhs = (
gen_factorization_ops.wals_compute_partial_lhs_and_rhs(
right,
col_weights,
self._unobserved_weight,
row_weights_slice,
new_sp_input.indices,
new_sp_input.values,
num_rows,
transpose_input,
name="wals_compute_partial_lhs_rhs"))
total_lhs = array_ops.expand_dims(total_lhs, 0) + partial_lhs
total_rhs = array_ops.expand_dims(total_rhs, -1)
new_left_values = array_ops.squeeze(
linalg_ops.matrix_solve(total_lhs, total_rhs), [2])
update_op_name = "row_update" if update_row_factors else "col_update"
update_op = self.scatter_update(
left,
update_indices,
new_left_values,
sharding_func,
name=update_op_name)
# Create the loss subgraph
loss_sp_input = (sparse_ops.sparse_transpose(new_sp_input)
if transpose_input else new_sp_input)
# sp_approx is the low rank estimate of the input matrix, formed by
# computing the product <\\(u_i, v_j\\)> for (i, j) in loss_sp_input.indices.
sp_approx_vals = gen_factorization_ops.masked_matmul(
new_left_values,
right,
loss_sp_input.indices,
transpose_a=False,
transpose_b=True)
sp_approx = sparse_tensor.SparseTensor(
loss_sp_input.indices, sp_approx_vals, loss_sp_input.dense_shape)
sp_approx_sq = math_ops.square(sp_approx)
sp_residual = sparse_ops.sparse_add(loss_sp_input, sp_approx * (-1))
sp_residual_sq = math_ops.square(sp_residual)
row_wt_mat = (constant_op.constant(0.)
if self._row_weights is None else array_ops.expand_dims(
row_weights_slice, 1))
col_wt_mat = (constant_op.constant(0.)
if self._col_weights is None else array_ops.expand_dims(
col_weights, 0))
# We return the normalized loss
partial_row_gramian = math_ops.matmul(
new_left_values, new_left_values, transpose_a=True)
normalization_factor = total_rows / math_ops.cast(num_rows, dtypes.float32)
unregularized_loss = (
self._unobserved_weight * ( # pyformat line break
sparse_ops.sparse_reduce_sum(sp_residual_sq) - # pyformat break
sparse_ops.sparse_reduce_sum(sp_approx_sq) + # pyformat break
math_ops.trace(math_ops.matmul(partial_row_gramian, gramian))) +
sparse_ops.sparse_reduce_sum(row_wt_mat * (sp_residual_sq * col_wt_mat))
) * normalization_factor
if self._regularization is not None:
regularization = self._regularization * (
math_ops.trace(partial_row_gramian) * normalization_factor +
math_ops.trace(gramian))
else:
regularization = constant_op.constant(0.)
sum_weights = self._unobserved_weight * math_ops.cast(
total_rows * total_cols, dtypes.float32)
if self._row_weights is not None and self._col_weights is not None:
ones = sparse_tensor.SparseTensor(
indices=loss_sp_input.indices,
values=array_ops.ones(array_ops.shape(loss_sp_input.values)),
dense_shape=loss_sp_input.dense_shape)
sum_weights += sparse_ops.sparse_reduce_sum(row_wt_mat * (
ones * col_wt_mat)) * normalization_factor
return (new_left_values, update_op, unregularized_loss, regularization,
sum_weights)
|
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
from ambari_commons import OSCheck
from resource_management import get_bare_principal
from resource_management.libraries.functions.version import format_stack_version
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.default import default
# Local Imports
from status_params import *
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.is_empty import is_empty
from resource_management.libraries.functions.expect import expect
from resource_management.libraries.functions.setup_ranger_plugin_xml import generate_ranger_service_config
def configs_for_ha(atlas_hosts, metadata_port, is_atlas_ha_enabled, metadata_protocol):
"""
Return a dictionary of additional configs to merge if Atlas HA is enabled.
:param atlas_hosts: List of hostnames that contain Atlas
:param metadata_port: Port number
:param is_atlas_ha_enabled: None, True, or False
:param metadata_protocol: http or https
:return: Dictionary with additional configs to merge to application-properties if HA is enabled.
"""
additional_props = {}
if atlas_hosts is None or len(atlas_hosts) == 0 or metadata_port is None:
return additional_props
# Sort to guarantee each host sees the same values, assuming restarted at the same time.
atlas_hosts = sorted(atlas_hosts)
# E.g., id1,id2,id3,...,idn
_server_id_list = ["id" + str(i) for i in range(1, len(atlas_hosts) + 1)]
atlas_server_ids = ",".join(_server_id_list)
additional_props["atlas.server.ids"] = atlas_server_ids
i = 0
for curr_hostname in atlas_hosts:
id = _server_id_list[i]
prop_name = "atlas.server.address." + id
prop_value = curr_hostname + ":" + metadata_port
additional_props[prop_name] = prop_value
if "atlas.rest.address" in additional_props:
additional_props["atlas.rest.address"] += "," + metadata_protocol + "://" + prop_value
else:
additional_props["atlas.rest.address"] = metadata_protocol + "://" + prop_value
i += 1
# This may override the existing property
if i == 1 or (i > 1 and is_atlas_ha_enabled is False):
additional_props["atlas.server.ha.enabled"] = "false"
elif i > 1:
additional_props["atlas.server.ha.enabled"] = "true"
return additional_props
# server configurations
config = Script.get_config()
exec_tmp_dir = Script.get_tmp_dir()
stack_root = Script.get_stack_root()
# Needed since this is an Atlas Hook service.
cluster_name = config['clusterName']
java_version = expect("/hostLevelParams/java_version", int)
zk_root = default('/configurations/application-properties/atlas.server.ha.zookeeper.zkroot', '/apache_atlas')
stack_supports_zk_security = check_stack_feature(StackFeature.SECURE_ZOOKEEPER, version_for_stack_feature_checks)
atlas_kafka_group_id = default('/configurations/application-properties/atlas.kafka.hook.group.id', None)
if security_enabled:
_hostname_lowercase = config['hostname'].lower()
_atlas_principal_name = config['configurations']['application-properties']['atlas.authentication.principal']
atlas_jaas_principal = _atlas_principal_name.replace('_HOST',_hostname_lowercase)
atlas_keytab_path = config['configurations']['application-properties']['atlas.authentication.keytab']
# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade
version = default("/commandParams/version", None)
# stack version
stack_version_unformatted = config['hostLevelParams']['stack_version']
stack_version_formatted = format_stack_version(stack_version_unformatted)
metadata_home = format('{stack_root}/current/atlas-server')
metadata_bin = format("{metadata_home}/bin")
python_binary = os.environ['PYTHON_EXE'] if 'PYTHON_EXE' in os.environ else sys.executable
metadata_start_script = format("{metadata_bin}/atlas_start.py")
metadata_stop_script = format("{metadata_bin}/atlas_stop.py")
# metadata local directory structure
log_dir = config['configurations']['atlas-env']['metadata_log_dir']
# service locations
hadoop_conf_dir = os.path.join(os.environ["HADOOP_HOME"], "conf") if 'HADOOP_HOME' in os.environ else '/etc/hadoop/conf'
# some commands may need to supply the JAAS location when running as atlas
atlas_jaas_file = format("{conf_dir}/atlas_jaas.conf")
# user
user_group = config['configurations']['cluster-env']['user_group']
# metadata env
java64_home = config['hostLevelParams']['java_home']
java_exec = format("{java64_home}/bin/java")
env_sh_template = config['configurations']['atlas-env']['content']
# credential provider
credential_provider = format( "jceks://file@{conf_dir}/atlas-site.jceks")
# command line args
ssl_enabled = default("/configurations/application-properties/atlas.enableTLS", False)
http_port = default("/configurations/application-properties/atlas.server.http.port", "21000")
https_port = default("/configurations/application-properties/atlas.server.https.port", "21443")
if ssl_enabled:
metadata_port = https_port
metadata_protocol = 'https'
else:
metadata_port = http_port
metadata_protocol = 'http'
metadata_host = config['hostname']
atlas_hosts = sorted(default('/clusterHostInfo/atlas_server_hosts', []))
metadata_server_host = atlas_hosts[0] if len(atlas_hosts) > 0 else "UNKNOWN_HOST"
# application properties
application_properties = dict(config['configurations']['application-properties'])
application_properties["atlas.server.bind.address"] = metadata_host
# trimming knox_key
if 'atlas.sso.knox.publicKey' in application_properties:
knox_key = application_properties['atlas.sso.knox.publicKey']
knox_key_without_new_line = knox_key.replace("\n","")
application_properties['atlas.sso.knox.publicKey'] = knox_key_without_new_line
if check_stack_feature(StackFeature.ATLAS_UPGRADE_SUPPORT, version_for_stack_feature_checks):
metadata_server_url = application_properties["atlas.rest.address"]
else:
# In HDP 2.3 and 2.4 the property was computed and saved to the local config but did not exist in the database.
metadata_server_url = format('{metadata_protocol}://{metadata_server_host}:{metadata_port}')
application_properties["atlas.rest.address"] = metadata_server_url
# Atlas HA should populate
# atlas.server.ids = id1,id2,...,idn
# atlas.server.address.id# = host#:port
# User should not have to modify this property, but still allow overriding it to False if multiple Atlas servers exist
# This can be None, True, or False
is_atlas_ha_enabled = default("/configurations/application-properties/atlas.server.ha.enabled", None)
additional_ha_props = configs_for_ha(atlas_hosts, metadata_port, is_atlas_ha_enabled, metadata_protocol)
for k,v in additional_ha_props.iteritems():
application_properties[k] = v
metadata_env_content = config['configurations']['atlas-env']['content']
metadata_opts = config['configurations']['atlas-env']['metadata_opts']
metadata_classpath = config['configurations']['atlas-env']['metadata_classpath']
data_dir = format("{stack_root}/current/atlas-server/data")
expanded_war_dir = os.environ['METADATA_EXPANDED_WEBAPP_DIR'] if 'METADATA_EXPANDED_WEBAPP_DIR' in os.environ else format("{stack_root}/current/atlas-server/server/webapp")
metadata_log4j_content = config['configurations']['atlas-log4j']['content']
metadata_solrconfig_content = default("/configurations/atlas-solrconfig/content", None)
atlas_log_level = config['configurations']['atlas-log4j']['atlas_log_level']
audit_log_level = config['configurations']['atlas-log4j']['audit_log_level']
atlas_log_max_backup_size = default("/configurations/atlas-log4j/atlas_log_max_backup_size", 256)
atlas_log_number_of_backup_files = default("/configurations/atlas-log4j/atlas_log_number_of_backup_files", 20)
# smoke test
smoke_test_user = config['configurations']['cluster-env']['smokeuser']
smoke_test_password = 'smoke'
smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
smokeuser_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
security_check_status_file = format('{log_dir}/security_check.status')
# hbase
hbase_conf_dir = "/etc/hbase/conf"
atlas_search_backend = default("/configurations/application-properties/atlas.graph.index.search.backend", "")
search_backend_solr = atlas_search_backend.startswith('solr')
# infra solr
infra_solr_znode = default("/configurations/infra-solr-env/infra_solr_znode", None)
infra_solr_hosts = default("/clusterHostInfo/infra_solr_hosts", [])
infra_solr_replication_factor = 2 if len(infra_solr_hosts) > 1 else 1
if 'atlas_solr_replication_factor' in config['configurations']['atlas-env']:
infra_solr_replication_factor = int(default("/configurations/atlas-env/atlas_solr_replication_factor", 1))
atlas_solr_shards = default("/configurations/atlas-env/atlas_solr_shards", 1)
has_infra_solr = len(infra_solr_hosts) > 0
infra_solr_role_atlas = default('configurations/infra-solr-security-json/infra_solr_role_atlas', 'atlas_user')
infra_solr_role_dev = default('configurations/infra-solr-security-json/infra_solr_role_dev', 'dev')
infra_solr_role_ranger_audit = default('configurations/infra-solr-security-json/infra_solr_role_ranger_audit', 'ranger_audit_user')
# zookeeper
zookeeper_hosts = config['clusterHostInfo']['zookeeper_hosts']
zookeeper_port = default('/configurations/zoo.cfg/clientPort', None)
# get comma separated lists of zookeeper hosts from clusterHostInfo
index = 0
zookeeper_quorum = ""
for host in zookeeper_hosts:
zookeeper_host = host
if zookeeper_port is not None:
zookeeper_host = host + ":" + str(zookeeper_port)
zookeeper_quorum += zookeeper_host
index += 1
if index < len(zookeeper_hosts):
zookeeper_quorum += ","
stack_supports_atlas_hdfs_site_on_namenode_ha = check_stack_feature(StackFeature.ATLAS_HDFS_SITE_ON_NAMENODE_HA, version_for_stack_feature_checks)
atlas_server_xmx = default("configurations/atlas-env/atlas_server_xmx", 2048)
atlas_server_max_new_size = default("configurations/atlas-env/atlas_server_max_new_size", 614)
hbase_master_hosts = default('/clusterHostInfo/hbase_master_hosts', [])
has_hbase_master = not len(hbase_master_hosts) == 0
atlas_hbase_setup = format("{exec_tmp_dir}/atlas_hbase_setup.rb")
atlas_kafka_setup = format("{exec_tmp_dir}/atlas_kafka_acl.sh")
atlas_graph_storage_hbase_table = default('/configurations/application-properties/atlas.graph.storage.hbase.table', None)
atlas_audit_hbase_tablename = default('/configurations/application-properties/atlas.audit.hbase.tablename', None)
hbase_user_keytab = default('/configurations/hbase-env/hbase_user_keytab', None)
hbase_principal_name = default('/configurations/hbase-env/hbase_principal_name', None)
# ToDo: Kafka port to Atlas
# Used while upgrading the stack in a kerberized cluster and running kafka-acls.sh
hosts_with_kafka = default('/clusterHostInfo/kafka_broker_hosts', [])
host_with_kafka = hostname in hosts_with_kafka
ranger_tagsync_hosts = default("/clusterHostInfo/ranger_tagsync_hosts", [])
has_ranger_tagsync = len(ranger_tagsync_hosts) > 0
rangertagsync_user = "rangertagsync"
kafka_keytab = default('/configurations/kafka-env/kafka_keytab', None)
kafka_principal_name = default('/configurations/kafka-env/kafka_principal_name', None)
default_replication_factor = default('/configurations/application-properties/atlas.notification.replicas', None)
if check_stack_feature(StackFeature.ATLAS_UPGRADE_SUPPORT, version_for_stack_feature_checks):
default_replication_factor = default('/configurations/application-properties/atlas.notification.replicas', None)
kafka_env_sh_template = config['configurations']['kafka-env']['content']
kafka_home = os.path.join(stack_root, "current", "kafka-broker")
kafka_conf_dir = os.path.join(kafka_home, "config")
kafka_zk_endpoint = default("/configurations/kafka-broker/zookeeper.connect", None)
kafka_kerberos_enabled = (('security.inter.broker.protocol' in config['configurations']['kafka-broker']) and
((config['configurations']['kafka-broker']['security.inter.broker.protocol'] == "PLAINTEXTSASL") or
(config['configurations']['kafka-broker']['security.inter.broker.protocol'] == "SASL_PLAINTEXT")))
if security_enabled and stack_version_formatted != "" and 'kafka_principal_name' in config['configurations']['kafka-env'] \
and check_stack_feature(StackFeature.KAFKA_KERBEROS, stack_version_formatted):
_hostname_lowercase = config['hostname'].lower()
_kafka_principal_name = config['configurations']['kafka-env']['kafka_principal_name']
kafka_jaas_principal = _kafka_principal_name.replace('_HOST', _hostname_lowercase)
kafka_keytab_path = config['configurations']['kafka-env']['kafka_keytab']
kafka_bare_jaas_principal = get_bare_principal(_kafka_principal_name)
kafka_kerberos_params = "-Djava.security.auth.login.config={0}/kafka_jaas.conf".format(kafka_conf_dir)
else:
kafka_kerberos_params = ''
kafka_jaas_principal = None
kafka_keytab_path = None
namenode_host = set(default("/clusterHostInfo/namenode_host", []))
has_namenode = not len(namenode_host) == 0
# ranger altas plugin section start
# ranger host
ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
has_ranger_admin = not len(ranger_admin_hosts) == 0
retry_enabled = default("/commandParams/command_retry_enabled", False)
stack_supports_atlas_ranger_plugin = check_stack_feature(StackFeature.ATLAS_RANGER_PLUGIN_SUPPORT, version_for_stack_feature_checks)
stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks)
# ranger support xml_configuration flag, instead of depending on ranger xml_configurations_supported/ranger-env, using stack feature
xml_configurations_supported = check_stack_feature(StackFeature.RANGER_XML_CONFIGURATION, version_for_stack_feature_checks)
# ranger atlas plugin enabled property
enable_ranger_atlas = default("/configurations/ranger-atlas-plugin-properties/ranger-atlas-plugin-enabled", "No")
enable_ranger_atlas = True if enable_ranger_atlas.lower() == "yes" else False
# ranger hbase plugin enabled property
enable_ranger_hbase = default("/configurations/ranger-hbase-plugin-properties/ranger-hbase-plugin-enabled", "No")
enable_ranger_hbase = True if enable_ranger_hbase.lower() == 'yes' else False
if stack_supports_atlas_ranger_plugin and enable_ranger_atlas:
# for create_hdfs_directory
hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] if has_namenode else None
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab'] if has_namenode else None
hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name'] if has_namenode else None
hdfs_site = config['configurations']['hdfs-site']
default_fs = config['configurations']['core-site']['fs.defaultFS']
dfs_type = default("/commandParams/dfs_type", "")
import functools
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
#create partial functions with common arguments for every HdfsResource call
#to create hdfs directory we need to call params.HdfsResource in code
HdfsResource = functools.partial(
HdfsResource,
user = hdfs_user,
hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
kinit_path_local = kinit_path_local,
hadoop_bin_dir = hadoop_bin_dir,
hadoop_conf_dir = hadoop_conf_dir,
principal_name = hdfs_principal_name,
hdfs_site = hdfs_site,
default_fs = default_fs,
immutable_paths = get_not_managed_resources(),
dfs_type = dfs_type
)
# ranger atlas service/repository name
repo_name = str(config['clusterName']) + '_atlas'
repo_name_value = config['configurations']['ranger-atlas-security']['ranger.plugin.atlas.service.name']
if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}":
repo_name = repo_name_value
ssl_keystore_password = config['configurations']['ranger-atlas-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password']
ssl_truststore_password = config['configurations']['ranger-atlas-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']
credential_file = format('/etc/ranger/{repo_name}/cred.jceks')
xa_audit_hdfs_is_enabled = default('/configurations/ranger-atlas-audit/xasecure.audit.destination.hdfs', False)
# get ranger policy url
policymgr_mgr_url = config['configurations']['ranger-atlas-security']['ranger.plugin.atlas.policy.rest.url']
if not is_empty(policymgr_mgr_url) and policymgr_mgr_url.endswith('/'):
policymgr_mgr_url = policymgr_mgr_url.rstrip('/')
downloaded_custom_connector = None
driver_curl_source = None
driver_curl_target = None
ranger_env = config['configurations']['ranger-env']
# create ranger-env config having external ranger credential properties
if not has_ranger_admin and enable_ranger_atlas:
external_admin_username = default('/configurations/ranger-atlas-plugin-properties/external_admin_username', 'admin')
external_admin_password = default('/configurations/ranger-atlas-plugin-properties/external_admin_password', 'admin')
external_ranger_admin_username = default('/configurations/ranger-atlas-plugin-properties/external_ranger_admin_username', 'amb_ranger_admin')
external_ranger_admin_password = default('/configurations/ranger-atlas-plugin-properties/external_ranger_admin_password', 'amb_ranger_admin')
ranger_env = {}
ranger_env['admin_username'] = external_admin_username
ranger_env['admin_password'] = external_admin_password
ranger_env['ranger_admin_username'] = external_ranger_admin_username
ranger_env['ranger_admin_password'] = external_ranger_admin_password
ranger_plugin_properties = config['configurations']['ranger-atlas-plugin-properties']
ranger_atlas_audit = config['configurations']['ranger-atlas-audit']
ranger_atlas_audit_attrs = config['configuration_attributes']['ranger-atlas-audit']
ranger_atlas_security = config['configurations']['ranger-atlas-security']
ranger_atlas_security_attrs = config['configuration_attributes']['ranger-atlas-security']
ranger_atlas_policymgr_ssl = config['configurations']['ranger-atlas-policymgr-ssl']
ranger_atlas_policymgr_ssl_attrs = config['configuration_attributes']['ranger-atlas-policymgr-ssl']
policy_user = config['configurations']['ranger-atlas-plugin-properties']['policy_user']
atlas_repository_configuration = {
'username' : config['configurations']['ranger-atlas-plugin-properties']['REPOSITORY_CONFIG_USERNAME'],
'password' : unicode(config['configurations']['ranger-atlas-plugin-properties']['REPOSITORY_CONFIG_PASSWORD']),
'atlas.rest.address' : metadata_server_url,
'commonNameForCertificate' : config['configurations']['ranger-atlas-plugin-properties']['common.name.for.certificate'],
'ambari.service.check.user' : policy_user
}
custom_ranger_service_config = generate_ranger_service_config(ranger_plugin_properties)
if len(custom_ranger_service_config) > 0:
atlas_repository_configuration.update(custom_ranger_service_config)
if security_enabled:
atlas_repository_configuration['policy.download.auth.users'] = metadata_user
atlas_repository_configuration['tag.download.auth.users'] = metadata_user
atlas_ranger_plugin_repo = {
'isEnabled': 'true',
'configs': atlas_repository_configuration,
'description': 'atlas repo',
'name': repo_name,
'type': 'atlas',
}
# ranger atlas plugin section end
# atlas admin login username password
atlas_admin_username = config['configurations']['atlas-env']['atlas.admin.username']
atlas_admin_password = config['configurations']['atlas-env']['atlas.admin.password']
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
from prjxray.segmaker import Segmaker
import os
import os.path
def bitfilter(frame, word):
if frame < 25 or frame > 29:
return False
return True
def merge_lr_wires(wire):
wire = wire.replace('CMT_L_LOWER_B', 'CMT_LRMAP_LOWER_B')
wire = wire.replace('CMT_R_LOWER_B', 'CMT_LRMAP_LOWER_B')
return wire
def main():
segmk = Segmaker("design.bits")
designdata = {}
tiledata = {}
pipdata = {}
ppipdata = {}
ignpip = set()
all_clks = {}
piplists = ['cmt_top_l_lower_b.txt', 'cmt_top_r_lower_b.txt']
wirelists = ['cmt_top_l_lower_b_wires.txt', 'cmt_top_r_lower_b_wires.txt']
ppiplists = ['ppips_cmt_top_l_lower_b.db', 'ppips_cmt_top_r_lower_b.db']
# Load PIP lists
print("Loading PIP lists...")
for piplist in piplists:
with open(os.path.join(os.getenv('FUZDIR'), '..', 'piplist', 'build',
'cmt_top_lower', piplist)) as f:
for l in f:
pip, is_directional = l.strip().split(' ')
tile_type, dst, src = pip.split('.')
if tile_type not in pipdata:
pipdata[tile_type] = []
all_clks[tile_type] = set()
pipdata[tile_type].append((src, dst))
if dst.split('_')[-1].startswith('CLK'):
all_clks[tile_type].add(src)
if not int(is_directional):
pipdata[tile_type].append((dst, src))
if src.split('_')[-1].startswith('CLK'):
all_clks[tile_type].add(dst)
wiredata = {}
for wirelist in wirelists:
with open(os.path.join(os.getenv('FUZDIR'), '..', 'piplist', 'build',
'cmt_top_lower', wirelist)) as f:
for l in f:
tile_type, wire = l.strip().split()
if tile_type not in wiredata:
wiredata[tile_type] = set()
wiredata[tile_type].add(wire)
# Load PPIP lists (to exclude them)
print("Loading PPIP lists...")
for ppiplist in ppiplists:
fname = os.path.join(
os.getenv('FUZDIR'), '..', '071-ppips', 'build', ppiplist)
with open(fname, 'r') as f:
for l in f:
pip_data, pip_type = l.strip().split()
if pip_type != 'always':
continue
tile_type, dst, src = pip_data.split('.')
if tile_type not in ppipdata:
ppipdata[tile_type] = []
ppipdata[tile_type].append((src, dst))
# Load desgin data
print("Loading design data...")
with open("design.txt", "r") as f:
for line in f:
fields = line.strip().split(",")
designdata[fields[0]] = fields[1:]
with open("design_pips.txt", "r") as f:
for line in f:
tile, pip, src, dst, pnum, pdir = line.split()
if not tile.startswith('CMT_TOP'):
continue
if 'UPPER_B' in tile:
continue
if 'LOWER_T' in tile:
continue
pip_prefix, _ = pip.split(".")
tile_from_pip, tile_type = pip_prefix.split('/')
assert tile == tile_from_pip
_, src = src.split("/")
_, dst = dst.split("/")
pnum = int(pnum)
pdir = int(pdir)
if tile not in tiledata:
tiledata[tile] = {
"type": tile_type,
"pips": set(),
"srcs": set(),
"dsts": set(),
}
tiledata[tile]["pips"].add((src, dst))
tiledata[tile]["srcs"].add(src)
tiledata[tile]["dsts"].add(dst)
if pdir == 0:
tiledata[tile]["srcs"].add(dst)
tiledata[tile]["dsts"].add(src)
#if dst.startswith('CMT_TOP_R_LOWER_B_CLK') or \
# dst.startswith('CMT_TOP_L_LOWER_B_CLK'):
# ignpip.add((src, dst))
active_wires = {}
with open("design_wires.txt", "r") as f:
for l in f:
tile, wire = l.strip().split('/')
if tile not in active_wires:
active_wires[tile] = set()
active_wires[tile].add(wire)
tags = {}
# Populate IN_USE tags
for tile, (site, in_use) in designdata.items():
if tile not in tags:
tags[tile] = {}
tile_type = tile.rsplit("_", maxsplit=1)[0]
tags[tile]["IN_USE"] = int(in_use)
# Populate PIPs
active_clks = {}
for tile in tags.keys():
tile_type = tile.rsplit("_", maxsplit=1)[0]
in_use = tags[tile]["IN_USE"]
if not in_use:
active_pips = []
else:
active_pips = tiledata[tile]["pips"]
for src, dst in pipdata[tile_type]:
if (src, dst) in ignpip:
continue
if (src, dst) in ppipdata[tile_type]:
continue
tag = "{}.{}".format(merge_lr_wires(dst), merge_lr_wires(src))
val = in_use if (src, dst) in active_pips else False
if not (in_use and not val):
if tile not in active_clks:
active_clks[tile] = set()
active_clks[tile].add(src)
tags[tile][tag] = int(val)
for wire in wiredata[tile_type]:
if 'CLK' not in wire:
continue
if 'CLKOUT' in wire:
continue
if 'CLKFB' in wire:
continue
if 'REBUF' in wire:
continue
wire = merge_lr_wires(wire)
if tile not in active_wires:
active_wires[tile] = set()
segmk.add_tile_tag(
tile, '{}_ACTIVE'.format(wire), wire in active_wires[tile])
# Output tags
for tile, tile_tags in tags.items():
for t, v in tile_tags.items():
segmk.add_tile_tag(tile, t, v)
segmk.compile(bitfilter=bitfilter)
segmk.write()
if __name__ == "__main__":
main()
|
|
<<<<<<< HEAD
<<<<<<< HEAD
"""This will be the home for the policy that hooks in the new
code that adds all the email6 features.
"""
from email._policybase import Policy, Compat32, compat32, _extend_docstrings
from email.utils import _has_surrogates
from email.headerregistry import HeaderRegistry as HeaderRegistry
from email.contentmanager import raw_data_manager
__all__ = [
'Compat32',
'compat32',
'Policy',
'EmailPolicy',
'default',
'strict',
'SMTP',
'HTTP',
]
@_extend_docstrings
class EmailPolicy(Policy):
"""+
PROVISIONAL
The API extensions enabled by this policy are currently provisional.
Refer to the documentation for details.
This policy adds new header parsing and folding algorithms. Instead of
simple strings, headers are custom objects with custom attributes
depending on the type of the field. The folding algorithm fully
implements RFCs 2047 and 5322.
In addition to the settable attributes listed above that apply to
all Policies, this policy adds the following additional attributes:
refold_source -- if the value for a header in the Message object
came from the parsing of some source, this attribute
indicates whether or not a generator should refold
that value when transforming the message back into
stream form. The possible values are:
none -- all source values use original folding
long -- source values that have any line that is
longer than max_line_length will be
refolded
all -- all values are refolded.
The default is 'long'.
header_factory -- a callable that takes two arguments, 'name' and
'value', where 'name' is a header field name and
'value' is an unfolded header field value, and
returns a string-like object that represents that
header. A default header_factory is provided that
understands some of the RFC5322 header field types.
(Currently address fields and date fields have
special treatment, while all other fields are
treated as unstructured. This list will be
completed before the extension is marked stable.)
content_manager -- an object with at least two methods: get_content
and set_content. When the get_content or
set_content method of a Message object is called,
it calls the corresponding method of this object,
passing it the message object as its first argument,
and any arguments or keywords that were passed to
it as additional arguments. The default
content_manager is
:data:`~email.contentmanager.raw_data_manager`.
"""
refold_source = 'long'
header_factory = HeaderRegistry()
content_manager = raw_data_manager
def __init__(self, **kw):
# Ensure that each new instance gets a unique header factory
# (as opposed to clones, which share the factory).
if 'header_factory' not in kw:
object.__setattr__(self, 'header_factory', HeaderRegistry())
super().__init__(**kw)
def header_max_count(self, name):
"""+
The implementation for this class returns the max_count attribute from
the specialized header class that would be used to construct a header
of type 'name'.
"""
return self.header_factory[name].max_count
# The logic of the next three methods is chosen such that it is possible to
# switch a Message object between a Compat32 policy and a policy derived
# from this class and have the results stay consistent. This allows a
# Message object constructed with this policy to be passed to a library
# that only handles Compat32 objects, or to receive such an object and
# convert it to use the newer style by just changing its policy. It is
# also chosen because it postpones the relatively expensive full rfc5322
# parse until as late as possible when parsing from source, since in many
# applications only a few headers will actually be inspected.
def header_source_parse(self, sourcelines):
"""+
The name is parsed as everything up to the ':' and returned unmodified.
The value is determined by stripping leading whitespace off the
remainder of the first line, joining all subsequent lines together, and
stripping any trailing carriage return or linefeed characters. (This
is the same as Compat32).
"""
name, value = sourcelines[0].split(':', 1)
value = value.lstrip(' \t') + ''.join(sourcelines[1:])
return (name, value.rstrip('\r\n'))
def header_store_parse(self, name, value):
"""+
The name is returned unchanged. If the input value has a 'name'
attribute and it matches the name ignoring case, the value is returned
unchanged. Otherwise the name and value are passed to header_factory
method, and the resulting custom header object is returned as the
value. In this case a ValueError is raised if the input value contains
CR or LF characters.
"""
if hasattr(value, 'name') and value.name.lower() == name.lower():
return (name, value)
if isinstance(value, str) and len(value.splitlines())>1:
raise ValueError("Header values may not contain linefeed "
"or carriage return characters")
return (name, self.header_factory(name, value))
def header_fetch_parse(self, name, value):
"""+
If the value has a 'name' attribute, it is returned to unmodified.
Otherwise the name and the value with any linesep characters removed
are passed to the header_factory method, and the resulting custom
header object is returned. Any surrogateescaped bytes get turned
into the unicode unknown-character glyph.
"""
if hasattr(value, 'name'):
return value
return self.header_factory(name, ''.join(value.splitlines()))
def fold(self, name, value):
"""+
Header folding is controlled by the refold_source policy setting. A
value is considered to be a 'source value' if and only if it does not
have a 'name' attribute (having a 'name' attribute means it is a header
object of some sort). If a source value needs to be refolded according
to the policy, it is converted into a custom header object by passing
the name and the value with any linesep characters removed to the
header_factory method. Folding of a custom header object is done by
calling its fold method with the current policy.
Source values are split into lines using splitlines. If the value is
not to be refolded, the lines are rejoined using the linesep from the
policy and returned. The exception is lines containing non-ascii
binary data. In that case the value is refolded regardless of the
refold_source setting, which causes the binary data to be CTE encoded
using the unknown-8bit charset.
"""
return self._fold(name, value, refold_binary=True)
def fold_binary(self, name, value):
"""+
The same as fold if cte_type is 7bit, except that the returned value is
bytes.
If cte_type is 8bit, non-ASCII binary data is converted back into
bytes. Headers with binary data are not refolded, regardless of the
refold_header setting, since there is no way to know whether the binary
data consists of single byte characters or multibyte characters.
"""
folded = self._fold(name, value, refold_binary=self.cte_type=='7bit')
return folded.encode('ascii', 'surrogateescape')
def _fold(self, name, value, refold_binary=False):
if hasattr(value, 'name'):
return value.fold(policy=self)
maxlen = self.max_line_length if self.max_line_length else float('inf')
lines = value.splitlines()
refold = (self.refold_source == 'all' or
self.refold_source == 'long' and
(lines and len(lines[0])+len(name)+2 > maxlen or
any(len(x) > maxlen for x in lines[1:])))
if refold or refold_binary and _has_surrogates(value):
return self.header_factory(name, ''.join(lines)).fold(policy=self)
return name + ': ' + self.linesep.join(lines) + self.linesep
default = EmailPolicy()
# Make the default policy use the class default header_factory
del default.header_factory
strict = default.clone(raise_on_defect=True)
SMTP = default.clone(linesep='\r\n')
HTTP = default.clone(linesep='\r\n', max_line_length=None)
=======
"""This will be the home for the policy that hooks in the new
code that adds all the email6 features.
"""
from email._policybase import Policy, Compat32, compat32, _extend_docstrings
from email.utils import _has_surrogates
from email.headerregistry import HeaderRegistry as HeaderRegistry
from email.contentmanager import raw_data_manager
__all__ = [
'Compat32',
'compat32',
'Policy',
'EmailPolicy',
'default',
'strict',
'SMTP',
'HTTP',
]
@_extend_docstrings
class EmailPolicy(Policy):
"""+
PROVISIONAL
The API extensions enabled by this policy are currently provisional.
Refer to the documentation for details.
This policy adds new header parsing and folding algorithms. Instead of
simple strings, headers are custom objects with custom attributes
depending on the type of the field. The folding algorithm fully
implements RFCs 2047 and 5322.
In addition to the settable attributes listed above that apply to
all Policies, this policy adds the following additional attributes:
refold_source -- if the value for a header in the Message object
came from the parsing of some source, this attribute
indicates whether or not a generator should refold
that value when transforming the message back into
stream form. The possible values are:
none -- all source values use original folding
long -- source values that have any line that is
longer than max_line_length will be
refolded
all -- all values are refolded.
The default is 'long'.
header_factory -- a callable that takes two arguments, 'name' and
'value', where 'name' is a header field name and
'value' is an unfolded header field value, and
returns a string-like object that represents that
header. A default header_factory is provided that
understands some of the RFC5322 header field types.
(Currently address fields and date fields have
special treatment, while all other fields are
treated as unstructured. This list will be
completed before the extension is marked stable.)
content_manager -- an object with at least two methods: get_content
and set_content. When the get_content or
set_content method of a Message object is called,
it calls the corresponding method of this object,
passing it the message object as its first argument,
and any arguments or keywords that were passed to
it as additional arguments. The default
content_manager is
:data:`~email.contentmanager.raw_data_manager`.
"""
refold_source = 'long'
header_factory = HeaderRegistry()
content_manager = raw_data_manager
def __init__(self, **kw):
# Ensure that each new instance gets a unique header factory
# (as opposed to clones, which share the factory).
if 'header_factory' not in kw:
object.__setattr__(self, 'header_factory', HeaderRegistry())
super().__init__(**kw)
def header_max_count(self, name):
"""+
The implementation for this class returns the max_count attribute from
the specialized header class that would be used to construct a header
of type 'name'.
"""
return self.header_factory[name].max_count
# The logic of the next three methods is chosen such that it is possible to
# switch a Message object between a Compat32 policy and a policy derived
# from this class and have the results stay consistent. This allows a
# Message object constructed with this policy to be passed to a library
# that only handles Compat32 objects, or to receive such an object and
# convert it to use the newer style by just changing its policy. It is
# also chosen because it postpones the relatively expensive full rfc5322
# parse until as late as possible when parsing from source, since in many
# applications only a few headers will actually be inspected.
def header_source_parse(self, sourcelines):
"""+
The name is parsed as everything up to the ':' and returned unmodified.
The value is determined by stripping leading whitespace off the
remainder of the first line, joining all subsequent lines together, and
stripping any trailing carriage return or linefeed characters. (This
is the same as Compat32).
"""
name, value = sourcelines[0].split(':', 1)
value = value.lstrip(' \t') + ''.join(sourcelines[1:])
return (name, value.rstrip('\r\n'))
def header_store_parse(self, name, value):
"""+
The name is returned unchanged. If the input value has a 'name'
attribute and it matches the name ignoring case, the value is returned
unchanged. Otherwise the name and value are passed to header_factory
method, and the resulting custom header object is returned as the
value. In this case a ValueError is raised if the input value contains
CR or LF characters.
"""
if hasattr(value, 'name') and value.name.lower() == name.lower():
return (name, value)
if isinstance(value, str) and len(value.splitlines())>1:
raise ValueError("Header values may not contain linefeed "
"or carriage return characters")
return (name, self.header_factory(name, value))
def header_fetch_parse(self, name, value):
"""+
If the value has a 'name' attribute, it is returned to unmodified.
Otherwise the name and the value with any linesep characters removed
are passed to the header_factory method, and the resulting custom
header object is returned. Any surrogateescaped bytes get turned
into the unicode unknown-character glyph.
"""
if hasattr(value, 'name'):
return value
return self.header_factory(name, ''.join(value.splitlines()))
def fold(self, name, value):
"""+
Header folding is controlled by the refold_source policy setting. A
value is considered to be a 'source value' if and only if it does not
have a 'name' attribute (having a 'name' attribute means it is a header
object of some sort). If a source value needs to be refolded according
to the policy, it is converted into a custom header object by passing
the name and the value with any linesep characters removed to the
header_factory method. Folding of a custom header object is done by
calling its fold method with the current policy.
Source values are split into lines using splitlines. If the value is
not to be refolded, the lines are rejoined using the linesep from the
policy and returned. The exception is lines containing non-ascii
binary data. In that case the value is refolded regardless of the
refold_source setting, which causes the binary data to be CTE encoded
using the unknown-8bit charset.
"""
return self._fold(name, value, refold_binary=True)
def fold_binary(self, name, value):
"""+
The same as fold if cte_type is 7bit, except that the returned value is
bytes.
If cte_type is 8bit, non-ASCII binary data is converted back into
bytes. Headers with binary data are not refolded, regardless of the
refold_header setting, since there is no way to know whether the binary
data consists of single byte characters or multibyte characters.
"""
folded = self._fold(name, value, refold_binary=self.cte_type=='7bit')
return folded.encode('ascii', 'surrogateescape')
def _fold(self, name, value, refold_binary=False):
if hasattr(value, 'name'):
return value.fold(policy=self)
maxlen = self.max_line_length if self.max_line_length else float('inf')
lines = value.splitlines()
refold = (self.refold_source == 'all' or
self.refold_source == 'long' and
(lines and len(lines[0])+len(name)+2 > maxlen or
any(len(x) > maxlen for x in lines[1:])))
if refold or refold_binary and _has_surrogates(value):
return self.header_factory(name, ''.join(lines)).fold(policy=self)
return name + ': ' + self.linesep.join(lines) + self.linesep
default = EmailPolicy()
# Make the default policy use the class default header_factory
del default.header_factory
strict = default.clone(raise_on_defect=True)
SMTP = default.clone(linesep='\r\n')
HTTP = default.clone(linesep='\r\n', max_line_length=None)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""This will be the home for the policy that hooks in the new
code that adds all the email6 features.
"""
from email._policybase import Policy, Compat32, compat32, _extend_docstrings
from email.utils import _has_surrogates
from email.headerregistry import HeaderRegistry as HeaderRegistry
from email.contentmanager import raw_data_manager
__all__ = [
'Compat32',
'compat32',
'Policy',
'EmailPolicy',
'default',
'strict',
'SMTP',
'HTTP',
]
@_extend_docstrings
class EmailPolicy(Policy):
"""+
PROVISIONAL
The API extensions enabled by this policy are currently provisional.
Refer to the documentation for details.
This policy adds new header parsing and folding algorithms. Instead of
simple strings, headers are custom objects with custom attributes
depending on the type of the field. The folding algorithm fully
implements RFCs 2047 and 5322.
In addition to the settable attributes listed above that apply to
all Policies, this policy adds the following additional attributes:
refold_source -- if the value for a header in the Message object
came from the parsing of some source, this attribute
indicates whether or not a generator should refold
that value when transforming the message back into
stream form. The possible values are:
none -- all source values use original folding
long -- source values that have any line that is
longer than max_line_length will be
refolded
all -- all values are refolded.
The default is 'long'.
header_factory -- a callable that takes two arguments, 'name' and
'value', where 'name' is a header field name and
'value' is an unfolded header field value, and
returns a string-like object that represents that
header. A default header_factory is provided that
understands some of the RFC5322 header field types.
(Currently address fields and date fields have
special treatment, while all other fields are
treated as unstructured. This list will be
completed before the extension is marked stable.)
content_manager -- an object with at least two methods: get_content
and set_content. When the get_content or
set_content method of a Message object is called,
it calls the corresponding method of this object,
passing it the message object as its first argument,
and any arguments or keywords that were passed to
it as additional arguments. The default
content_manager is
:data:`~email.contentmanager.raw_data_manager`.
"""
refold_source = 'long'
header_factory = HeaderRegistry()
content_manager = raw_data_manager
def __init__(self, **kw):
# Ensure that each new instance gets a unique header factory
# (as opposed to clones, which share the factory).
if 'header_factory' not in kw:
object.__setattr__(self, 'header_factory', HeaderRegistry())
super().__init__(**kw)
def header_max_count(self, name):
"""+
The implementation for this class returns the max_count attribute from
the specialized header class that would be used to construct a header
of type 'name'.
"""
return self.header_factory[name].max_count
# The logic of the next three methods is chosen such that it is possible to
# switch a Message object between a Compat32 policy and a policy derived
# from this class and have the results stay consistent. This allows a
# Message object constructed with this policy to be passed to a library
# that only handles Compat32 objects, or to receive such an object and
# convert it to use the newer style by just changing its policy. It is
# also chosen because it postpones the relatively expensive full rfc5322
# parse until as late as possible when parsing from source, since in many
# applications only a few headers will actually be inspected.
def header_source_parse(self, sourcelines):
"""+
The name is parsed as everything up to the ':' and returned unmodified.
The value is determined by stripping leading whitespace off the
remainder of the first line, joining all subsequent lines together, and
stripping any trailing carriage return or linefeed characters. (This
is the same as Compat32).
"""
name, value = sourcelines[0].split(':', 1)
value = value.lstrip(' \t') + ''.join(sourcelines[1:])
return (name, value.rstrip('\r\n'))
def header_store_parse(self, name, value):
"""+
The name is returned unchanged. If the input value has a 'name'
attribute and it matches the name ignoring case, the value is returned
unchanged. Otherwise the name and value are passed to header_factory
method, and the resulting custom header object is returned as the
value. In this case a ValueError is raised if the input value contains
CR or LF characters.
"""
if hasattr(value, 'name') and value.name.lower() == name.lower():
return (name, value)
if isinstance(value, str) and len(value.splitlines())>1:
raise ValueError("Header values may not contain linefeed "
"or carriage return characters")
return (name, self.header_factory(name, value))
def header_fetch_parse(self, name, value):
"""+
If the value has a 'name' attribute, it is returned to unmodified.
Otherwise the name and the value with any linesep characters removed
are passed to the header_factory method, and the resulting custom
header object is returned. Any surrogateescaped bytes get turned
into the unicode unknown-character glyph.
"""
if hasattr(value, 'name'):
return value
return self.header_factory(name, ''.join(value.splitlines()))
def fold(self, name, value):
"""+
Header folding is controlled by the refold_source policy setting. A
value is considered to be a 'source value' if and only if it does not
have a 'name' attribute (having a 'name' attribute means it is a header
object of some sort). If a source value needs to be refolded according
to the policy, it is converted into a custom header object by passing
the name and the value with any linesep characters removed to the
header_factory method. Folding of a custom header object is done by
calling its fold method with the current policy.
Source values are split into lines using splitlines. If the value is
not to be refolded, the lines are rejoined using the linesep from the
policy and returned. The exception is lines containing non-ascii
binary data. In that case the value is refolded regardless of the
refold_source setting, which causes the binary data to be CTE encoded
using the unknown-8bit charset.
"""
return self._fold(name, value, refold_binary=True)
def fold_binary(self, name, value):
"""+
The same as fold if cte_type is 7bit, except that the returned value is
bytes.
If cte_type is 8bit, non-ASCII binary data is converted back into
bytes. Headers with binary data are not refolded, regardless of the
refold_header setting, since there is no way to know whether the binary
data consists of single byte characters or multibyte characters.
"""
folded = self._fold(name, value, refold_binary=self.cte_type=='7bit')
return folded.encode('ascii', 'surrogateescape')
def _fold(self, name, value, refold_binary=False):
if hasattr(value, 'name'):
return value.fold(policy=self)
maxlen = self.max_line_length if self.max_line_length else float('inf')
lines = value.splitlines()
refold = (self.refold_source == 'all' or
self.refold_source == 'long' and
(lines and len(lines[0])+len(name)+2 > maxlen or
any(len(x) > maxlen for x in lines[1:])))
if refold or refold_binary and _has_surrogates(value):
return self.header_factory(name, ''.join(lines)).fold(policy=self)
return name + ': ' + self.linesep.join(lines) + self.linesep
default = EmailPolicy()
# Make the default policy use the class default header_factory
del default.header_factory
strict = default.clone(raise_on_defect=True)
SMTP = default.clone(linesep='\r\n')
HTTP = default.clone(linesep='\r\n', max_line_length=None)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
|
# -*- coding: utf-8 -*-
"""
Export of textX based models and metamodels to dot file.
"""
from __future__ import unicode_literals
from arpeggio import Match, OrderedChoice, Sequence, OneOrMore, ZeroOrMore,\
Optional
from textx.const import MULT_ZEROORMORE, MULT_ONEORMORE, MULT_ONE, \
RULE_ABSTRACT, RULE_COMMON, RULE_MATCH
from textx.lang import PRIMITIVE_PYTHON_TYPES, BASE_TYPE_NAMES, ALL_TYPE_NAMES
import codecs
import sys
if sys.version < '3':
text = unicode # noqa
else:
text = str
HEADER = '''
digraph textX {
fontname = "Bitstream Vera Sans"
fontsize = 8
node[
shape=record,
style=filled,
fillcolor=aliceblue
]
nodesep = 0.3
edge[dir=black,arrowtail=empty]
'''
def dot_match_str(cls, other_match_rules=None):
"""
For a given match rule meta-class returns a nice string representation for
the body.
"""
def r(s):
# print("==>" + str(s) + " " + s.rule_name)
if s.root:
# breakpoint()
if s in visited or s.rule_name in ALL_TYPE_NAMES or \
(hasattr(s, '_tx_class')
and (s._tx_class._tx_type is not RULE_MATCH
or (s._tx_class in other_match_rules
and s._tx_class is not cls))):
# print("==> NAME " + s.rule_name)
return s.rule_name
visited.add(s)
if isinstance(s, Match):
result = text(s)
elif isinstance(s, OrderedChoice):
result = "|".join([r(x) for x in s.nodes])
elif isinstance(s, Sequence):
result = " ".join([r(x) for x in s.nodes])
elif isinstance(s, ZeroOrMore):
result = "({})*".format(r(s.nodes[0]))
elif isinstance(s, OneOrMore):
result = "({})+".format(r(s.nodes[0]))
elif isinstance(s, Optional):
result = "{}?".format(r(s.nodes[0]))
else:
# breakpoint()
# print("#### {}".format(s.__class__.__name__))
result = "{}({})".format(
s.__class__.__name__,
','.join([r(x) for x in s.nodes]))
return "{}{}".format(result, "-" if s.suppress else "")
mstr = ""
# print("---------- "+str(cls))
if not (cls._tx_type is RULE_ABSTRACT
and cls.__name__ != cls._tx_peg_rule.rule_name):
e = cls._tx_peg_rule
visited = set()
if other_match_rules is None:
other_match_rules = set()
if not isinstance(e, Match):
visited.add(e)
if isinstance(e, OrderedChoice):
mstr = "|".join([r(x) for x in e.nodes
if x.rule_name in BASE_TYPE_NAMES or not x.root])
elif isinstance(e, Sequence):
mstr = " ".join([r(x) for x in e.nodes])
else:
mstr = r(e)
return mstr
def dot_escape(s):
return s.replace('\n', r'\n')\
.replace('\\', '\\\\')\
.replace('"', r'\"')\
.replace('|', r'\|')\
.replace('{', r'\{')\
.replace('}', r'\}')\
.replace('>', r'\>')\
.replace('<', r'\<')\
.replace('?', r'\?')
if sys.version < '3':
def html_escape(s):
return s.replace("<", "<").replace(">", ">")
else:
def html_escape(s):
from html import escape
return escape(s)
def dot_repr(o):
if type(o) is text:
escaped = dot_escape(text(o))
if len(escaped) > 20:
return "'{}...'".format(escaped[:20])
else:
return "'{}'".format(escaped)
else:
return text(o)
class DotRenderer(object):
def __init__(self):
self.match_rules = set()
def get_header(self):
return HEADER
def get_match_rules_table(self):
trailer = ''
if self.match_rules:
trailer = '<table>\n'
for cls in sorted(self.match_rules, key=lambda x: x._tx_fqn):
trailer += '\t<tr>\n'
attrs = dot_match_str(cls, self.match_rules)
trailer += '\t\t<td><b>{}</b></td><td>{}</td>\n'.format(
cls.__name__, html_escape(attrs))
trailer += '\t</tr>\n'
trailer += '</table>'
return trailer
def get_trailer(self):
trailer = ''
if self.match_rules:
trailer = 'match_rules [ shape=plaintext, label=< {} >]\n\n'.format(
self.get_match_rules_table()
)
return trailer + '\n}\n'
def render_class(self, cls):
name = cls.__name__
attrs = ""
if cls._tx_type is RULE_MATCH:
if cls.__name__ not in BASE_TYPE_NAMES:
self.match_rules.add(cls)
return ''
elif cls._tx_type is not RULE_ABSTRACT:
for attr in cls._tx_attrs.values():
required = attr.mult in [MULT_ONE, MULT_ONEORMORE]
mult_list = attr.mult in [MULT_ZEROORMORE, MULT_ONEORMORE]
attr_type = "list[{}]".format(attr.cls.__name__) \
if mult_list else attr.cls.__name__
if attr.ref and attr.cls.__name__ != 'OBJECT':
pass
else:
# If it is plain type
attrs += '{}: {}\\l'.format(
attr.name, attr_type
if required else r'optional\<{}\>'.format(attr_type))
return '{}[ label="{{{}|{}}}"]\n\n'.format(
id(cls), "*{}".format(name)
if cls._tx_type is RULE_ABSTRACT else name, attrs)
def render_attr_link(self, cls, attr):
arrowtail = "arrowtail=diamond, dir=both, " \
if attr.cont else ""
if attr.ref and attr.cls.__name__ != 'OBJECT':
# If attribute is a reference
mult = attr.mult if not attr.mult == MULT_ONE else ""
return '{} -> {}[{}headlabel="{} {}"]\n'\
.format(id(cls), id(attr.cls), arrowtail,
attr.name, mult)
def render_inherited_by(self, base, special):
return '{} -> {} [dir=back]\n'\
.format(id(base), id(special))
class PlantUmlRenderer(object):
def __init__(self):
self.match_rules = set()
def get_header(self):
return '''@startuml
set namespaceSeparator .
'''
def get_trailer(self):
trailer = ''
if self.match_rules:
trailer += '\nlegend\n'
trailer += ' Match rules:\n'
trailer += ' |= Name |= Rule details |\n'
for cls in self.match_rules:
# print("-*-> " + cls.__name__)
trailer += ' | {} | {} |\n'.format(
cls.__name__,
dot_escape(dot_match_str(cls, self.match_rules)) # reuse
)
trailer += "end legend\n\n"
trailer += '@enduml\n'
return trailer
def render_class(self, cls):
attrs = ""
stereotype = ""
if cls._tx_type is RULE_MATCH:
if cls.__name__ not in BASE_TYPE_NAMES:
self.match_rules.add(cls)
return ''
elif cls._tx_type is not RULE_COMMON:
stereotype += cls._tx_type
else:
for attr in cls._tx_attrs.values():
required = attr.mult in [MULT_ONE, MULT_ONEORMORE]
mult_list = attr.mult in [MULT_ZEROORMORE, MULT_ONEORMORE]
attr_type = "list[{}]".format(attr.cls.__name__) \
if mult_list else attr.cls.__name__
if attr.ref and attr.cls.__name__ != 'OBJECT':
pass
else:
if required:
attrs += " {} : {}\n".format(attr.name, attr_type)
else:
attrs += " {} : optional<{}>\n".format(attr.name,
attr_type)
if len(stereotype) > 0:
stereotype = "<<" + stereotype + ">>"
return '\n\nclass {} {} {{\n{}}}\n'.format(
cls._tx_fqn, stereotype, attrs)
def render_attr_link(self, cls, attr):
if attr.ref and attr.cls.__name__ != 'OBJECT':
# If attribute is a reference
# mult = attr.mult if not attr.mult == MULT_ONE else ""
if attr.cont:
arr = "*--"
else:
arr = "o--"
if attr.mult == MULT_ZEROORMORE:
arr = arr + ' "0..*"'
elif attr.mult == MULT_ONEORMORE:
arr = arr + ' "1..*"'
return '{} {} {}\n'.format(
cls._tx_fqn, arr, attr.cls._tx_fqn)
def render_inherited_by(self, base, special):
return '{} <|-- {}\n'.format(base._tx_fqn, special._tx_fqn)
def metamodel_export(metamodel, file_name, renderer=None):
with codecs.open(file_name, 'w', encoding="utf-8") as f:
metamodel_export_tofile(metamodel, f, renderer)
def metamodel_export_tofile(metamodel, f, renderer=None):
if renderer is None:
renderer = DotRenderer()
f.write(renderer.get_header())
classes = [c for c in metamodel if c._tx_fqn not in ALL_TYPE_NAMES]
for cls in classes:
f.write(renderer.render_class(cls))
f.write("\n\n")
for cls in classes:
if cls._tx_type is not RULE_COMMON:
pass
else:
for attr in cls._tx_attrs.values():
if attr.ref and attr.cls.__name__ != 'OBJECT':
f.write(renderer.render_attr_link(cls, attr))
if attr.cls not in classes:
f.write(renderer.render_class(attr.cls))
for inherited_by in cls._tx_inh_by:
f.write(renderer.render_inherited_by(cls, inherited_by))
f.write("{}".format(renderer.get_trailer()))
def model_export(model, file_name, repo=None):
"""
Args:
model: the model to be exported (may be None if repo is not None)
file_name: the output file name
repo: the model repo (alternative to model input) to be exported
Returns:
Nothing
"""
with codecs.open(file_name, 'w', encoding="utf-8") as f:
model_export_to_file(f, model, repo)
def model_export_to_file(f, model=None, repo=None):
"""
Args:
f: the file object to be used as output.
model: the model to be exported (alternative to repo)
repo: the repo to be exported (alternative to model)
Returns:
Nothing
"""
if not model and not repo:
raise Exception("specity either a model or a repo")
if model and repo:
raise Exception("specity either a model or a repo")
processed_set = set()
f.write(HEADER)
def _export(obj):
if obj is None or id(obj) in processed_set or type(obj) \
in PRIMITIVE_PYTHON_TYPES:
return
processed_set.add(id(obj))
attrs = ""
obj_cls = obj.__class__
name = ""
for attr_name, attr in obj_cls._tx_attrs.items():
attr_value = getattr(obj, attr_name)
if attr_value is None:
continue
endmark = 'arrowtail=diamond dir=both' if attr.cont else ""
required = "+" if attr.mult in \
[MULT_ONE, MULT_ONEORMORE] else ""
if attr.mult in [MULT_ONEORMORE, MULT_ZEROORMORE]:
if all([type(x) in PRIMITIVE_PYTHON_TYPES
for x in attr_value]):
attrs += "{}{}:list=[".format(required, attr_name)
attrs += ",".join([dot_repr(x) for x in attr_value])
attrs += "]\\l"
else:
for idx, list_obj in enumerate(attr_value):
if list_obj is not None:
if type(list_obj) in PRIMITIVE_PYTHON_TYPES:
f.write(
'{} -> "{}:{}" [label="{}:{}" {}]\n'
.format(id(obj), list_obj,
type(list_obj).__name__,
attr_name, idx, endmark))
else:
f.write('{} -> {} [label="{}:{}" {}]\n'
.format(id(obj), id(list_obj),
attr_name, idx, endmark))
_export(list_obj)
else:
# Plain attributes
if type(attr_value) is text and attr_name != 'name':
attr_value = dot_repr(attr_value)
if type(attr_value) in PRIMITIVE_PYTHON_TYPES:
if attr_name == 'name':
name = attr_value
else:
attrs += "{}{}:{}={}\\l".format(
required, attr_name, type(attr_value)
.__name__, attr_value)
else:
# Object references
if attr_value is not None:
f.write('{} -> {} [label="{}" {}]\n'.format(
id(obj), id(attr_value),
attr_name, endmark))
_export(attr_value)
name = "{}:{}".format(name, obj_cls.__name__)
f.write('{}[label="{{{}|{}}}"]\n'.format(id(obj), name, attrs))
def _export_subgraph(m):
from textx import get_children
f.write('subgraph "cluster_{}" {{\n'.format(m._tx_filename))
f.write('''
penwidth=2.0
color=darkorange4;
label = "{}";
'''.format(m._tx_filename))
for obj in get_children(lambda _: True, m):
f.write('{};\n'.format(id(obj)))
f.write('\n}\n')
if repo or hasattr(model, "_tx_model_repository"):
if not repo:
repo = model._tx_model_repository.all_models
if not repo:
_export(model)
for m in repo:
_export_subgraph(m)
_export(m)
else:
_export(model)
f.write('\n}\n')
|
|
"""Collection of useful functions for the HomeKit component."""
import io
import ipaddress
import logging
import os
import re
import secrets
import socket
import pyqrcode
import voluptuous as vol
from homeassistant.components import binary_sensor, media_player, sensor
from homeassistant.components.camera import DOMAIN as CAMERA_DOMAIN
from homeassistant.components.lock import DOMAIN as LOCK_DOMAIN
from homeassistant.components.media_player import (
DEVICE_CLASS_TV,
DOMAIN as MEDIA_PLAYER_DOMAIN,
)
from homeassistant.components.remote import DOMAIN as REMOTE_DOMAIN, SUPPORT_ACTIVITY
from homeassistant.const import (
ATTR_CODE,
ATTR_DEVICE_CLASS,
ATTR_SUPPORTED_FEATURES,
CONF_NAME,
CONF_PORT,
CONF_TYPE,
TEMP_CELSIUS,
)
from homeassistant.core import HomeAssistant, split_entity_id
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.storage import STORAGE_DIR
import homeassistant.util.temperature as temp_util
from .const import (
AUDIO_CODEC_COPY,
AUDIO_CODEC_OPUS,
CONF_AUDIO_CODEC,
CONF_AUDIO_MAP,
CONF_AUDIO_PACKET_SIZE,
CONF_FEATURE,
CONF_FEATURE_LIST,
CONF_LINKED_BATTERY_CHARGING_SENSOR,
CONF_LINKED_BATTERY_SENSOR,
CONF_LINKED_DOORBELL_SENSOR,
CONF_LINKED_HUMIDITY_SENSOR,
CONF_LINKED_MOTION_SENSOR,
CONF_LINKED_OBSTRUCTION_SENSOR,
CONF_LOW_BATTERY_THRESHOLD,
CONF_MAX_FPS,
CONF_MAX_HEIGHT,
CONF_MAX_WIDTH,
CONF_STREAM_ADDRESS,
CONF_STREAM_COUNT,
CONF_STREAM_SOURCE,
CONF_SUPPORT_AUDIO,
CONF_VIDEO_CODEC,
CONF_VIDEO_MAP,
CONF_VIDEO_PACKET_SIZE,
DEFAULT_AUDIO_CODEC,
DEFAULT_AUDIO_MAP,
DEFAULT_AUDIO_PACKET_SIZE,
DEFAULT_LOW_BATTERY_THRESHOLD,
DEFAULT_MAX_FPS,
DEFAULT_MAX_HEIGHT,
DEFAULT_MAX_WIDTH,
DEFAULT_STREAM_COUNT,
DEFAULT_SUPPORT_AUDIO,
DEFAULT_VIDEO_CODEC,
DEFAULT_VIDEO_MAP,
DEFAULT_VIDEO_PACKET_SIZE,
DOMAIN,
FEATURE_ON_OFF,
FEATURE_PLAY_PAUSE,
FEATURE_PLAY_STOP,
FEATURE_TOGGLE_MUTE,
HOMEKIT_PAIRING_QR,
HOMEKIT_PAIRING_QR_SECRET,
TYPE_FAUCET,
TYPE_OUTLET,
TYPE_SHOWER,
TYPE_SPRINKLER,
TYPE_SWITCH,
TYPE_VALVE,
VIDEO_CODEC_COPY,
VIDEO_CODEC_H264_OMX,
VIDEO_CODEC_LIBX264,
)
_LOGGER = logging.getLogger(__name__)
MAX_PORT = 65535
VALID_VIDEO_CODECS = [VIDEO_CODEC_LIBX264, VIDEO_CODEC_H264_OMX, AUDIO_CODEC_COPY]
VALID_AUDIO_CODECS = [AUDIO_CODEC_OPUS, VIDEO_CODEC_COPY]
BASIC_INFO_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_LINKED_BATTERY_SENSOR): cv.entity_domain(sensor.DOMAIN),
vol.Optional(CONF_LINKED_BATTERY_CHARGING_SENSOR): cv.entity_domain(
binary_sensor.DOMAIN
),
vol.Optional(
CONF_LOW_BATTERY_THRESHOLD, default=DEFAULT_LOW_BATTERY_THRESHOLD
): cv.positive_int,
}
)
FEATURE_SCHEMA = BASIC_INFO_SCHEMA.extend(
{vol.Optional(CONF_FEATURE_LIST, default=None): cv.ensure_list}
)
CAMERA_SCHEMA = BASIC_INFO_SCHEMA.extend(
{
vol.Optional(CONF_STREAM_ADDRESS): vol.All(ipaddress.ip_address, cv.string),
vol.Optional(CONF_STREAM_SOURCE): cv.string,
vol.Optional(CONF_AUDIO_CODEC, default=DEFAULT_AUDIO_CODEC): vol.In(
VALID_AUDIO_CODECS
),
vol.Optional(CONF_SUPPORT_AUDIO, default=DEFAULT_SUPPORT_AUDIO): cv.boolean,
vol.Optional(CONF_MAX_WIDTH, default=DEFAULT_MAX_WIDTH): cv.positive_int,
vol.Optional(CONF_MAX_HEIGHT, default=DEFAULT_MAX_HEIGHT): cv.positive_int,
vol.Optional(CONF_MAX_FPS, default=DEFAULT_MAX_FPS): cv.positive_int,
vol.Optional(CONF_AUDIO_MAP, default=DEFAULT_AUDIO_MAP): cv.string,
vol.Optional(CONF_VIDEO_MAP, default=DEFAULT_VIDEO_MAP): cv.string,
vol.Optional(CONF_STREAM_COUNT, default=DEFAULT_STREAM_COUNT): vol.All(
vol.Coerce(int), vol.Range(min=1, max=10)
),
vol.Optional(CONF_VIDEO_CODEC, default=DEFAULT_VIDEO_CODEC): vol.In(
VALID_VIDEO_CODECS
),
vol.Optional(
CONF_AUDIO_PACKET_SIZE, default=DEFAULT_AUDIO_PACKET_SIZE
): cv.positive_int,
vol.Optional(
CONF_VIDEO_PACKET_SIZE, default=DEFAULT_VIDEO_PACKET_SIZE
): cv.positive_int,
vol.Optional(CONF_LINKED_MOTION_SENSOR): cv.entity_domain(binary_sensor.DOMAIN),
vol.Optional(CONF_LINKED_DOORBELL_SENSOR): cv.entity_domain(
binary_sensor.DOMAIN
),
}
)
HUMIDIFIER_SCHEMA = BASIC_INFO_SCHEMA.extend(
{vol.Optional(CONF_LINKED_HUMIDITY_SENSOR): cv.entity_domain(sensor.DOMAIN)}
)
COVER_SCHEMA = BASIC_INFO_SCHEMA.extend(
{
vol.Optional(CONF_LINKED_OBSTRUCTION_SENSOR): cv.entity_domain(
binary_sensor.DOMAIN
)
}
)
CODE_SCHEMA = BASIC_INFO_SCHEMA.extend(
{vol.Optional(ATTR_CODE, default=None): vol.Any(None, cv.string)}
)
MEDIA_PLAYER_SCHEMA = vol.Schema(
{
vol.Required(CONF_FEATURE): vol.All(
cv.string,
vol.In(
(
FEATURE_ON_OFF,
FEATURE_PLAY_PAUSE,
FEATURE_PLAY_STOP,
FEATURE_TOGGLE_MUTE,
)
),
)
}
)
SWITCH_TYPE_SCHEMA = BASIC_INFO_SCHEMA.extend(
{
vol.Optional(CONF_TYPE, default=TYPE_SWITCH): vol.All(
cv.string,
vol.In(
(
TYPE_FAUCET,
TYPE_OUTLET,
TYPE_SHOWER,
TYPE_SPRINKLER,
TYPE_SWITCH,
TYPE_VALVE,
)
),
)
}
)
HOMEKIT_CHAR_TRANSLATIONS = {
0: " ", # nul
10: " ", # nl
13: " ", # cr
33: "-", # !
34: " ", # "
36: "-", # $
37: "-", # %
40: "-", # (
41: "-", # )
42: "-", # *
43: "-", # +
47: "-", # /
58: "-", # :
59: "-", # ;
60: "-", # <
61: "-", # =
62: "-", # >
63: "-", # ?
64: "-", # @
91: "-", # [
92: "-", # \
93: "-", # ]
94: "-", # ^
95: " ", # _
96: "-", # `
123: "-", # {
124: "-", # |
125: "-", # }
126: "-", # ~
127: "-", # del
}
def validate_entity_config(values):
"""Validate config entry for CONF_ENTITY."""
if not isinstance(values, dict):
raise vol.Invalid("expected a dictionary")
entities = {}
for entity_id, config in values.items():
entity = cv.entity_id(entity_id)
domain, _ = split_entity_id(entity)
if not isinstance(config, dict):
raise vol.Invalid(f"The configuration for {entity} must be a dictionary.")
if domain in ("alarm_control_panel", "lock"):
config = CODE_SCHEMA(config)
elif domain == media_player.const.DOMAIN:
config = FEATURE_SCHEMA(config)
feature_list = {}
for feature in config[CONF_FEATURE_LIST]:
params = MEDIA_PLAYER_SCHEMA(feature)
key = params.pop(CONF_FEATURE)
if key in feature_list:
raise vol.Invalid(f"A feature can be added only once for {entity}")
feature_list[key] = params
config[CONF_FEATURE_LIST] = feature_list
elif domain == "camera":
config = CAMERA_SCHEMA(config)
elif domain == "switch":
config = SWITCH_TYPE_SCHEMA(config)
elif domain == "humidifier":
config = HUMIDIFIER_SCHEMA(config)
elif domain == "cover":
config = COVER_SCHEMA(config)
else:
config = BASIC_INFO_SCHEMA(config)
entities[entity] = config
return entities
def get_media_player_features(state):
"""Determine features for media players."""
features = state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
supported_modes = []
if features & (
media_player.const.SUPPORT_TURN_ON | media_player.const.SUPPORT_TURN_OFF
):
supported_modes.append(FEATURE_ON_OFF)
if features & (media_player.const.SUPPORT_PLAY | media_player.const.SUPPORT_PAUSE):
supported_modes.append(FEATURE_PLAY_PAUSE)
if features & (media_player.const.SUPPORT_PLAY | media_player.const.SUPPORT_STOP):
supported_modes.append(FEATURE_PLAY_STOP)
if features & media_player.const.SUPPORT_VOLUME_MUTE:
supported_modes.append(FEATURE_TOGGLE_MUTE)
return supported_modes
def validate_media_player_features(state, feature_list):
"""Validate features for media players."""
supported_modes = get_media_player_features(state)
if not supported_modes:
_LOGGER.error("%s does not support any media_player features", state.entity_id)
return False
if not feature_list:
# Auto detected
return True
error_list = []
for feature in feature_list:
if feature not in supported_modes:
error_list.append(feature)
if error_list:
_LOGGER.error(
"%s does not support media_player features: %s", state.entity_id, error_list
)
return False
return True
def show_setup_message(hass, entry_id, bridge_name, pincode, uri):
"""Display persistent notification with setup information."""
pin = pincode.decode()
_LOGGER.info("Pincode: %s", pin)
buffer = io.BytesIO()
url = pyqrcode.create(uri)
url.svg(buffer, scale=5, module_color="#000", background="#FFF")
pairing_secret = secrets.token_hex(32)
hass.data[DOMAIN][entry_id][HOMEKIT_PAIRING_QR] = buffer.getvalue()
hass.data[DOMAIN][entry_id][HOMEKIT_PAIRING_QR_SECRET] = pairing_secret
message = (
f"To set up {bridge_name} in the Home App, "
f"scan the QR code or enter the following code:\n"
f"### {pin}\n"
f""
)
hass.components.persistent_notification.create(message, "HomeKit Pairing", entry_id)
def dismiss_setup_message(hass, entry_id):
"""Dismiss persistent notification and remove QR code."""
hass.components.persistent_notification.dismiss(entry_id)
def convert_to_float(state):
"""Return float of state, catch errors."""
try:
return float(state)
except (ValueError, TypeError):
return None
def cleanup_name_for_homekit(name):
"""Ensure the name of the device will not crash homekit."""
#
# This is not a security measure.
#
# UNICODE_EMOJI is also not allowed but that
# likely isn't a problem
return name.translate(HOMEKIT_CHAR_TRANSLATIONS)
def temperature_to_homekit(temperature, unit):
"""Convert temperature to Celsius for HomeKit."""
return round(temp_util.convert(temperature, unit, TEMP_CELSIUS), 1)
def temperature_to_states(temperature, unit):
"""Convert temperature back from Celsius to Home Assistant unit."""
return round(temp_util.convert(temperature, TEMP_CELSIUS, unit) * 2) / 2
def density_to_air_quality(density):
"""Map PM2.5 density to HomeKit AirQuality level."""
if density <= 35:
return 1
if density <= 75:
return 2
if density <= 115:
return 3
if density <= 150:
return 4
return 5
def get_persist_filename_for_entry_id(entry_id: str):
"""Determine the filename of the homekit state file."""
return f"{DOMAIN}.{entry_id}.state"
def get_aid_storage_filename_for_entry_id(entry_id: str):
"""Determine the ilename of homekit aid storage file."""
return f"{DOMAIN}.{entry_id}.aids"
def get_persist_fullpath_for_entry_id(hass: HomeAssistant, entry_id: str):
"""Determine the path to the homekit state file."""
return hass.config.path(STORAGE_DIR, get_persist_filename_for_entry_id(entry_id))
def get_aid_storage_fullpath_for_entry_id(hass: HomeAssistant, entry_id: str):
"""Determine the path to the homekit aid storage file."""
return hass.config.path(
STORAGE_DIR, get_aid_storage_filename_for_entry_id(entry_id)
)
def format_sw_version(version):
"""Extract the version string in a format homekit can consume."""
match = re.search(r"([0-9]+)(\.[0-9]+)?(\.[0-9]+)?", str(version).replace("-", "."))
if match:
return match.group(0)
return None
def remove_state_files_for_entry_id(hass: HomeAssistant, entry_id: str):
"""Remove the state files from disk."""
persist_file_path = get_persist_fullpath_for_entry_id(hass, entry_id)
aid_storage_path = get_aid_storage_fullpath_for_entry_id(hass, entry_id)
os.unlink(persist_file_path)
if os.path.exists(aid_storage_path):
os.unlink(aid_storage_path)
return True
def _get_test_socket():
"""Create a socket to test binding ports."""
test_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
test_socket.setblocking(False)
test_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return test_socket
def port_is_available(port: int) -> bool:
"""Check to see if a port is available."""
test_socket = _get_test_socket()
try:
test_socket.bind(("", port))
except OSError:
return False
return True
async def async_find_next_available_port(hass: HomeAssistant, start_port: int) -> int:
"""Find the next available port not assigned to a config entry."""
exclude_ports = {
entry.data[CONF_PORT]
for entry in hass.config_entries.async_entries(DOMAIN)
if CONF_PORT in entry.data
}
return await hass.async_add_executor_job(
_find_next_available_port, start_port, exclude_ports
)
def _find_next_available_port(start_port: int, exclude_ports: set) -> int:
"""Find the next available port starting with the given port."""
test_socket = _get_test_socket()
for port in range(start_port, MAX_PORT):
if port in exclude_ports:
continue
try:
test_socket.bind(("", port))
return port
except OSError:
if port == MAX_PORT:
raise
continue
def pid_is_alive(pid) -> bool:
"""Check to see if a process is alive."""
try:
os.kill(pid, 0)
return True
except OSError:
pass
return False
def accessory_friendly_name(hass_name, accessory):
"""Return the combined name for the accessory.
The mDNS name and the Home Assistant config entry
name are usually different which means they need to
see both to identify the accessory.
"""
accessory_mdns_name = accessory.display_name
if hass_name.casefold().startswith(accessory_mdns_name.casefold()):
return hass_name
if accessory_mdns_name.casefold().startswith(hass_name.casefold()):
return accessory_mdns_name
return f"{hass_name} ({accessory_mdns_name})"
def state_needs_accessory_mode(state):
"""Return if the entity represented by the state must be paired in accessory mode."""
if state.domain == CAMERA_DOMAIN:
return True
return (
state.domain == LOCK_DOMAIN
or state.domain == MEDIA_PLAYER_DOMAIN
and state.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_TV
or state.domain == REMOTE_DOMAIN
and state.attributes.get(ATTR_SUPPORTED_FEATURES, 0) & SUPPORT_ACTIVITY
)
|
|
from __future__ import division, absolute_import, print_function
import numbers
import operator
import sys
import numpy as np
from numpy.testing import assert_, assert_equal, assert_raises
PY2 = sys.version_info.major < 3
# NOTE: This class should be kept as an exact copy of the example from the
# docstring for NDArrayOperatorsMixin.
class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin):
def __init__(self, value):
self.value = np.asarray(value)
# One might also consider adding the built-in list type to this
# list, to support operations like np.add(array_like, list)
_HANDLED_TYPES = (np.ndarray, numbers.Number)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
out = kwargs.get('out', ())
for x in inputs + out:
# Only support operations with instances of _HANDLED_TYPES.
# Use ArrayLike instead of type(self) for isinstance to
# allow subclasses that don't override __array_ufunc__ to
# handle ArrayLike objects.
if not isinstance(x, self._HANDLED_TYPES + (ArrayLike,)):
return NotImplemented
# Defer to the implementation of the ufunc on unwrapped values.
inputs = tuple(x.value if isinstance(x, ArrayLike) else x
for x in inputs)
if out:
kwargs['out'] = tuple(
x.value if isinstance(x, ArrayLike) else x
for x in out)
result = getattr(ufunc, method)(*inputs, **kwargs)
if type(result) is tuple:
# multiple return values
return tuple(type(self)(x) for x in result)
elif method == 'at':
# no return value
return None
else:
# one return value
return type(self)(result)
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self.value)
def wrap_array_like(result):
if type(result) is tuple:
return tuple(ArrayLike(r) for r in result)
else:
return ArrayLike(result)
def _assert_equal_type_and_value(result, expected, err_msg=None):
assert_equal(type(result), type(expected), err_msg=err_msg)
if isinstance(result, tuple):
assert_equal(len(result), len(expected), err_msg=err_msg)
for result_item, expected_item in zip(result, expected):
_assert_equal_type_and_value(result_item, expected_item, err_msg)
else:
assert_equal(result.value, expected.value, err_msg=err_msg)
assert_equal(getattr(result.value, 'dtype', None),
getattr(expected.value, 'dtype', None), err_msg=err_msg)
_ALL_BINARY_OPERATORS = [
operator.lt,
operator.le,
operator.eq,
operator.ne,
operator.gt,
operator.ge,
operator.add,
operator.sub,
operator.mul,
operator.truediv,
operator.floordiv,
# TODO: test div on Python 2, only
operator.mod,
divmod,
pow,
operator.lshift,
operator.rshift,
operator.and_,
operator.xor,
operator.or_,
]
class TestNDArrayOperatorsMixin(object):
def test_array_like_add(self):
def check(result):
_assert_equal_type_and_value(result, ArrayLike(0))
check(ArrayLike(0) + 0)
check(0 + ArrayLike(0))
check(ArrayLike(0) + np.array(0))
check(np.array(0) + ArrayLike(0))
check(ArrayLike(np.array(0)) + 0)
check(0 + ArrayLike(np.array(0)))
check(ArrayLike(np.array(0)) + np.array(0))
check(np.array(0) + ArrayLike(np.array(0)))
def test_inplace(self):
array_like = ArrayLike(np.array([0]))
array_like += 1
_assert_equal_type_and_value(array_like, ArrayLike(np.array([1])))
array = np.array([0])
array += ArrayLike(1)
_assert_equal_type_and_value(array, ArrayLike(np.array([1])))
def test_opt_out(self):
class OptOut(object):
"""Object that opts out of __array_ufunc__."""
__array_ufunc__ = None
def __add__(self, other):
return self
def __radd__(self, other):
return self
array_like = ArrayLike(1)
opt_out = OptOut()
# supported operations
assert_(array_like + opt_out is opt_out)
assert_(opt_out + array_like is opt_out)
# not supported
with assert_raises(TypeError):
# don't use the Python default, array_like = array_like + opt_out
array_like += opt_out
with assert_raises(TypeError):
array_like - opt_out
with assert_raises(TypeError):
opt_out - array_like
def test_subclass(self):
class SubArrayLike(ArrayLike):
"""Should take precedence over ArrayLike."""
x = ArrayLike(0)
y = SubArrayLike(1)
_assert_equal_type_and_value(x + y, y)
_assert_equal_type_and_value(y + x, y)
def test_object(self):
x = ArrayLike(0)
obj = object()
with assert_raises(TypeError):
x + obj
with assert_raises(TypeError):
obj + x
with assert_raises(TypeError):
x += obj
def test_unary_methods(self):
array = np.array([-1, 0, 1, 2])
array_like = ArrayLike(array)
for op in [operator.neg,
operator.pos,
abs,
operator.invert]:
_assert_equal_type_and_value(op(array_like), ArrayLike(op(array)))
def test_forward_binary_methods(self):
array = np.array([-1, 0, 1, 2])
array_like = ArrayLike(array)
for op in _ALL_BINARY_OPERATORS:
expected = wrap_array_like(op(array, 1))
actual = op(array_like, 1)
err_msg = 'failed for operator {}'.format(op)
_assert_equal_type_and_value(expected, actual, err_msg=err_msg)
def test_reflected_binary_methods(self):
for op in _ALL_BINARY_OPERATORS:
expected = wrap_array_like(op(2, 1))
actual = op(2, ArrayLike(1))
err_msg = 'failed for operator {}'.format(op)
_assert_equal_type_and_value(expected, actual, err_msg=err_msg)
def test_ufunc_at(self):
array = ArrayLike(np.array([1, 2, 3, 4]))
assert_(np.negative.at(array, np.array([0, 1])) is None)
_assert_equal_type_and_value(array, ArrayLike([-1, -2, 3, 4]))
def test_ufunc_two_outputs(self):
mantissa, exponent = np.frexp(2 ** -3)
expected = (ArrayLike(mantissa), ArrayLike(exponent))
_assert_equal_type_and_value(
np.frexp(ArrayLike(2 ** -3)), expected)
_assert_equal_type_and_value(
np.frexp(ArrayLike(np.array(2 ** -3))), expected)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, Fedor Vompe <f.vompe () comptek.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_vm_info
short_description: Return basic info pertaining to a VMware machine guest
description:
- Return basic information pertaining to a vSphere or ESXi virtual machine guest.
- Cluster name as fact is added in version 2.7.
- This module was called C(vmware_vm_facts) before Ansible 2.9. The usage did not change.
version_added: '2.0'
author:
- Joseph Callen (@jcpowermac)
- Abhijeet Kasurde (@Akasurde)
- Fedor Vompe (@sumkincpp)
notes:
- Tested on ESXi 6.7, vSphere 5.5 and vSphere 6.5
- From 2.8 and onwards, information are returned as list of dict instead of dict.
requirements:
- python >= 2.6
- PyVmomi
options:
vm_type:
description:
- If set to C(vm), then information are gathered for virtual machines only.
- If set to C(template), then information are gathered for virtual machine templates only.
- If set to C(all), then information are gathered for all virtual machines and virtual machine templates.
required: False
default: 'all'
choices: [ all, vm, template ]
version_added: 2.5
type: str
show_attribute:
description:
- Attributes related to VM guest shown in information only when this is set C(true).
default: no
type: bool
version_added: 2.8
folder:
description:
- Specify a folder location of VMs to gather information from.
- 'Examples:'
- ' folder: /ha-datacenter/vm'
- ' folder: ha-datacenter/vm'
- ' folder: /datacenter1/vm'
- ' folder: datacenter1/vm'
- ' folder: /datacenter1/vm/folder1'
- ' folder: datacenter1/vm/folder1'
- ' folder: /folder1/datacenter1/vm'
- ' folder: folder1/datacenter1/vm'
- ' folder: /folder1/datacenter1/vm/folder2'
type: str
version_added: 2.9
show_tag:
description:
- Tags related to virtual machine are shown if set to C(True).
default: False
type: bool
version_added: 2.9
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Gather all registered virtual machines
vmware_vm_info:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
delegate_to: localhost
register: vminfo
- debug:
var: vminfo.virtual_machines
- name: Gather only registered virtual machine templates
vmware_vm_info:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
vm_type: template
delegate_to: localhost
register: template_info
- debug:
var: template_info.virtual_machines
- name: Gather only registered virtual machines
vmware_vm_info:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
vm_type: vm
delegate_to: localhost
register: vm_info
- debug:
var: vm_info.virtual_machines
- name: Get UUID from given VM Name
block:
- name: Get virtual machine info
vmware_vm_info:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
folder: "/datacenter/vm/folder"
delegate_to: localhost
register: vm_info
- debug:
msg: "{{ item.uuid }}"
with_items:
- "{{ vm_info.virtual_machines | json_query(query) }}"
vars:
query: "[?guest_name=='DC0_H0_VM0']"
- name: Get Tags from given VM Name
block:
- name: Get virtual machine info
vmware_vm_info:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
folder: "/datacenter/vm/folder"
delegate_to: localhost
register: vm_info
- debug:
msg: "{{ item.tags }}"
with_items:
- "{{ vm_info.virtual_machines | json_query(query) }}"
vars:
query: "[?guest_name=='DC0_H0_VM0']"
'''
RETURN = r'''
virtual_machines:
description: list of dictionary of virtual machines and their information
returned: success
type: list
sample: [
{
"guest_name": "ubuntu_t",
"cluster": null,
"esxi_hostname": "10.76.33.226",
"guest_fullname": "Ubuntu Linux (64-bit)",
"ip_address": "",
"mac_address": [
"00:50:56:87:a5:9a"
],
"power_state": "poweredOff",
"uuid": "4207072c-edd8-3bd5-64dc-903fd3a0db04",
"vm_network": {
"00:50:56:87:a5:9a": {
"ipv4": [
"10.76.33.228"
],
"ipv6": []
}
},
"attributes": {
"job": "backup-prepare"
},
"tags": [
{
"category_id": "urn:vmomi:InventoryServiceCategory:b316cc45-f1a9-4277-811d-56c7e7975203:GLOBAL",
"category_name": "cat_0001",
"description": "",
"id": "urn:vmomi:InventoryServiceTag:43737ec0-b832-4abf-abb1-fd2448ce3b26:GLOBAL",
"name": "tag_0001"
}
]
}
]
'''
try:
from pyVmomi import vim
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import PyVmomi, get_all_objs, vmware_argument_spec, _get_vm_prop
from ansible.module_utils.vmware_rest_client import VmwareRestClient
class VmwareVmInfo(PyVmomi):
def __init__(self, module):
super(VmwareVmInfo, self).__init__(module)
def get_tag_info(self, vm_dynamic_obj):
vmware_client = VmwareRestClient(self.module)
return vmware_client.get_tags_for_vm(vm_mid=vm_dynamic_obj._moId)
def get_vm_attributes(self, vm):
return dict((x.name, v.value) for x in self.custom_field_mgr
for v in vm.customValue if x.key == v.key)
# https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/getallvms.py
def get_all_virtual_machines(self):
"""
Get all virtual machines and related configurations information
"""
folder = self.params.get('folder')
folder_obj = None
if folder:
folder_obj = self.content.searchIndex.FindByInventoryPath(folder)
if not folder_obj:
self.module.fail_json(msg="Failed to find folder specified by %(folder)s" % self.params)
virtual_machines = get_all_objs(self.content, [vim.VirtualMachine], folder=folder_obj)
_virtual_machines = []
for vm in virtual_machines:
_ip_address = ""
summary = vm.summary
if summary.guest is not None:
_ip_address = summary.guest.ipAddress
if _ip_address is None:
_ip_address = ""
_mac_address = []
all_devices = _get_vm_prop(vm, ('config', 'hardware', 'device'))
if all_devices:
for dev in all_devices:
if isinstance(dev, vim.vm.device.VirtualEthernetCard):
_mac_address.append(dev.macAddress)
net_dict = {}
vmnet = _get_vm_prop(vm, ('guest', 'net'))
if vmnet:
for device in vmnet:
net_dict[device.macAddress] = dict()
net_dict[device.macAddress]['ipv4'] = []
net_dict[device.macAddress]['ipv6'] = []
for ip_addr in device.ipAddress:
if "::" in ip_addr:
net_dict[device.macAddress]['ipv6'].append(ip_addr)
else:
net_dict[device.macAddress]['ipv4'].append(ip_addr)
esxi_hostname = None
esxi_parent = None
if summary.runtime.host:
esxi_hostname = summary.runtime.host.summary.config.name
esxi_parent = summary.runtime.host.parent
cluster_name = None
if esxi_parent and isinstance(esxi_parent, vim.ClusterComputeResource):
cluster_name = summary.runtime.host.parent.name
vm_attributes = dict()
if self.module.params.get('show_attribute'):
vm_attributes = self.get_vm_attributes(vm)
vm_tags = list()
if self.module.params.get('show_tag'):
vm_tags = self.get_tag_info(vm)
virtual_machine = {
"guest_name": summary.config.name,
"guest_fullname": summary.config.guestFullName,
"power_state": summary.runtime.powerState,
"ip_address": _ip_address, # Kept for backward compatibility
"mac_address": _mac_address, # Kept for backward compatibility
"uuid": summary.config.uuid,
"vm_network": net_dict,
"esxi_hostname": esxi_hostname,
"cluster": cluster_name,
"attributes": vm_attributes,
"tags": vm_tags
}
vm_type = self.module.params.get('vm_type')
is_template = _get_vm_prop(vm, ('config', 'template'))
if vm_type == 'vm' and not is_template:
_virtual_machines.append(virtual_machine)
elif vm_type == 'template' and is_template:
_virtual_machines.append(virtual_machine)
elif vm_type == 'all':
_virtual_machines.append(virtual_machine)
return _virtual_machines
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
vm_type=dict(type='str', choices=['vm', 'all', 'template'], default='all'),
show_attribute=dict(type='bool', default='no'),
show_tag=dict(type='bool', default=False),
folder=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if module._name == 'vmware_vm_facts':
module.deprecate("The 'vmware_vm_facts' module has been renamed to 'vmware_vm_info'", version='2.13')
vmware_vm_info = VmwareVmInfo(module)
_virtual_machines = vmware_vm_info.get_all_virtual_machines()
module.exit_json(changed=False, virtual_machines=_virtual_machines)
if __name__ == '__main__':
main()
|
|
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
# This file deals with the ifmap id handling for both vnc-user-visible entities
# and bgp-visible entities
import uuid
import re
import StringIO
from lxml import etree
from ifmap.client import client, namespaces
from ifmap.request import NewSessionRequest, RenewSessionRequest, \
EndSessionRequest, PublishRequest, SearchRequest, \
SubscribeRequest, PurgeRequest, PollRequest
from ifmap.id import IPAddress, MACAddress, Device, AccessRequest, Identity, \
CustomIdentity
from ifmap.operations import PublishUpdateOperation, PublishNotifyOperation, \
PublishDeleteOperation, SubscribeUpdateOperation,\
SubscribeDeleteOperation
from ifmap.util import attr, link_ids
from ifmap.response import Response, newSessionResult
from ifmap.metadata import Metadata
_TENANT_GRP = "(?P<tenant_uuid>.*)"
_VPC_GRP = "(?P<vpc_name>.*)"
_VN_GRP = "(?P<vn_name>.*)"
_SG_GRP = "(?P<sg_name>.*)"
_POL_GRP = "(?P<pol_name>.*)"
_INST_GRP = "(?P<instance_uuid>.*)"
_PORT_GRP = "(?P<port_id>.*)"
_TENANT_ID_RE = "contrail:tenant:%s" % (_TENANT_GRP)
_VPC_NAME_RE = "contrail:network-group:%s:%s" % (_TENANT_GRP, _VPC_GRP)
_VN_NAME_RE = "contrail:virtual-network:%s:%s:%s" % (
_TENANT_GRP, _VPC_GRP, _VN_GRP)
_SG_NAME_RE = "contrail:security-group:%s:%s:%s" % (
_TENANT_GRP, _VPC_GRP, _SG_GRP)
_POL_NAME_RE = "contrail:policy:%s:%s:%s" % (_TENANT_GRP, _VPC_GRP, _POL_GRP)
_INST_ID_RE = "contrail:instance:%s:%s:%s:%s" \
% (_TENANT_GRP, _VPC_GRP, _VN_GRP, _INST_GRP)
_PORT_ID_RE = "contrail:port:%s:%s:%s:%s:%s" \
% (_TENANT_GRP, _VPC_GRP, _VN_GRP, _INST_GRP, _PORT_GRP)
_CT_NS = "contrail"
_ROOT_IMID = _CT_NS + ":config-root:root"
_SOAP_XSD = "http://www.w3.org/2003/05/soap-envelope"
_IFMAP_XSD = "http://www.trustedcomputinggroup.org/2010/IFMAP/2"
_IFMAP_META_XSD = "http://www.trustedcomputinggroup.org/2010/IFMAP-METADATA/2"
_CONTRAIL_XSD = "http://www.contrailsystems.com/vnc_cfg.xsd"
# Parse ifmap-server returned search results and create list of tuples
# of (ident-1, ident-2, link-attribs)
def parse_result_items(result_items, my_imid=None):
all_result_list = []
for r_item in result_items:
children = r_item.getchildren()
num_children = len(children)
if num_children == 1: # ignore ident-only result-items
continue
elif num_children == 2:
result_info = [children[0], None, children[1]]
elif num_children == 3:
result_info = [children[0], children[1], children[2]]
else:
raise Exception('Result item of length %s not handled!'
% (num_children))
all_result_list.append(result_info)
if not my_imid:
return all_result_list
# strip ones that don't originate from or to my_imid
filtered_result_list = []
for (ident_1, ident_2, meta) in all_result_list:
if (((ident_2 is not None) and (ident_2.attrib['name'] == my_imid)) or
(ident_1.attrib['name'] == my_imid)):
if meta is None:
filtered_result_list.append((ident_1, ident_2, None))
else:
# search gives all props under one metadata. expand it.
for m_elem in meta:
filtered_result_list.append((ident_1, ident_2, m_elem))
return filtered_result_list
# end parse_result_items
def get_ifmap_id_from_fq_name(type, fq_name):
my_fqn = ':' + ':'.join(fq_name)
my_imid = 'contrail:' + type + my_fqn
return my_imid
# end get_ifmap_id_from_fq_name
def get_type_from_ifmap_id(ifmap_id):
type = ifmap_id.split(':')[1]
return type
# end get_type_from_ifmap_id
def get_fq_name_str_from_ifmap_id(ifmap_id):
return re.sub(r'contrail:.*?:', '', ifmap_id)
# end get_fq_name_str_from_ifmap_id
def get_fq_name_from_ifmap_id(ifmap_id):
return ifmap_id.split(':')[2:]
# end get_fq_name_from_ifmap_id
def subscribe_root(ssrc_mapc):
#self._ident_type_subscribe(_CLOUD_IMID, "ct:member-of")
ident = str(Identity(name=_ROOT_IMID, type="other",
other_type="extended"))
subreq = SubscribeRequest(
ssrc_mapc.get_session_id(),
operations=str(SubscribeUpdateOperation("root", ident,
{"max-depth": "255", })))
result = ssrc_mapc.call('subscribe', subreq)
# end _subscribe_root
def ssrc_initialize(args):
ssrc_mapc = ifmap_server_connect(args)
result = ssrc_mapc.call('newSession', NewSessionRequest())
ssrc_mapc.set_session_id(newSessionResult(result).get_session_id())
ssrc_mapc.set_publisher_id(newSessionResult(result).get_publisher_id())
subscribe_root(ssrc_mapc)
return ssrc_mapc
# end ssrc_initialize
def arc_initialize(args, ssrc_mapc):
#
# Poll requests go on ARC channel which don't do newSession but
# share session-id with ssrc channel. so 2 connections to server but 1
# session/session-id in ifmap-server (mamma mia!)
#
arc_mapc = ifmap_server_connect(args)
arc_mapc.set_session_id(ssrc_mapc.get_session_id())
arc_mapc.set_publisher_id(ssrc_mapc.get_publisher_id())
return arc_mapc
# end arc_initialize
def ifmap_server_connect(args):
_CLIENT_NAMESPACES = {
'env': _SOAP_XSD,
'ifmap': _IFMAP_XSD,
'meta': _IFMAP_META_XSD,
_CT_NS: _CONTRAIL_XSD
}
ssl_options = None
if args.use_certs:
ssl_options = {
'keyfile': args.keyfile,
'certfile': args.certfile,
'ca_certs': args.ca_certs,
'cert_reqs': ssl.CERT_REQUIRED,
'ciphers': 'ALL'
}
return client(("%s" % (args.ifmap_server_ip),
"%s" % (args.ifmap_server_port)),
args.ifmap_username, args.ifmap_password,
_CLIENT_NAMESPACES, ssl_options)
# end ifmap_server_connect
def parse_poll_result(poll_result_str):
_XPATH_NAMESPACES = {
'a': _SOAP_XSD,
'b': _IFMAP_XSD,
'c': _CONTRAIL_XSD
}
soap_doc = etree.parse(StringIO.StringIO(poll_result_str))
#soap_doc.write(sys.stdout, pretty_print=True)
xpath_error = '/a:Envelope/a:Body/b:response/errorResult'
error_results = soap_doc.xpath(xpath_error,
namespaces=_XPATH_NAMESPACES)
if error_results:
raise Exception(etree.tostring(error_results[0]))
xpath_expr = '/a:Envelope/a:Body/b:response/pollResult'
poll_results = soap_doc.xpath(xpath_expr,
namespaces=_XPATH_NAMESPACES)
result_list = []
for result in poll_results:
children = result.getchildren()
for child in children:
result_type = child.tag
if result_type == 'errorResult':
raise Exception(etree.tostring(child))
result_items = child.getchildren()
item_list = parse_result_items(result_items)
for item in item_list:
ident1 = item[0]
ident2 = item[1]
meta = item[2]
idents = {}
ident1_imid = ident1.attrib['name']
ident1_type = get_type_from_ifmap_id(ident1_imid)
idents[ident1_type] = get_fq_name_str_from_ifmap_id(
ident1_imid)
if ident2 is not None:
ident2_imid = ident2.attrib['name']
ident2_type = get_type_from_ifmap_id(ident2_imid)
if ident1_type == ident2_type:
idents[ident1_type] = [
idents[ident1_type],
get_fq_name_str_from_ifmap_id(ident2_imid)]
else:
idents[ident2_type] = get_fq_name_str_from_ifmap_id(
ident2_imid)
result_list.append((result_type, idents, meta))
return result_list
# end parse_poll_result
|
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:46442")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:46442")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Rescuecoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Rescuecoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
|
import os
from mutagen.flac import FLAC, Picture
from convert_music import find_inconsistent_tags
def test_invalid_filename(tmpdir):
"""Test when the FLAC filename is invalid."""
flac_dir = tmpdir.mkdir('flac')
flac = flac_dir.join('Artist2 - Album - 01 - Title.flac').ensure(file=True)
a_messages = find_inconsistent_tags([str(flac.realpath())])
e_messages = {str(flac.realpath()): ["Filename doesn't have five items."]}
assert e_messages == a_messages
def test_invalid_file(tmpdir):
"""Test when FLAC file isn't really a FLAC file."""
flac_dir = tmpdir.mkdir('flac')
flac = flac_dir.join('Artist2 - 2012 - Album - 01 - Title.flac').ensure(file=True)
a_messages = find_inconsistent_tags([str(flac.realpath())])
e_messages = {str(flac.realpath()): ["Invalid file."]}
assert e_messages == a_messages
def test_no_tags(tmpdir):
"""Test FLAC file with no tags."""
flac_dir = tmpdir.mkdir('flac')
flac = flac_dir.join('Artist2 - 2012 - Album - 01 - Title.flac').ensure(file=True)
with open(os.path.join(os.path.dirname(__file__), '1khz_sine.flac'), 'rb') as f:
flac.write(f.read(), 'wb')
a_messages = find_inconsistent_tags([str(flac.realpath())])
e_messages = {str(flac.realpath()): [
"Artist mismatch: Artist2 != ",
"Album mismatch: Album != ",
"Title mismatch: Title != ",
"Date mismatch: 2012 != ",
"Track number mismatch: 01 != ",
"No album art.",
"No lyrics."
]}
assert e_messages == a_messages
def test_basic_tags(tmpdir):
"""Test when artist, album, title are the only valid tags."""
flac_dir = tmpdir.mkdir('flac')
flac = flac_dir.join('Artist2 - 2012 - Album - 01 - Title.flac').ensure(file=True)
with open(os.path.join(os.path.dirname(__file__), '1khz_sine.flac'), 'rb') as f:
flac.write(f.read(), 'wb')
tags = FLAC(str(flac.realpath()))
tags.update(dict(artist='Artist2', album='Album', title='Title'))
tags.save()
a_messages = find_inconsistent_tags([str(flac.realpath())])
e_messages = {str(flac.realpath()): [
"Date mismatch: 2012 != ",
"Track number mismatch: 01 != ",
"No album art.",
"No lyrics."
]}
assert e_messages == a_messages
def test_basic_numeric_tags(tmpdir):
"""Test when everything but lyrics/art are valid."""
flac_dir = tmpdir.mkdir('flac')
flac = flac_dir.join('Artist2 - 2012 - Album - 01 - Title.flac').ensure(file=True)
with open(os.path.join(os.path.dirname(__file__), '1khz_sine.flac'), 'rb') as f:
flac.write(f.read(), 'wb')
tags = FLAC(str(flac.realpath()))
tags.update(dict(artist='Artist2', date='2012', album='Album', tracknumber='01', title='Title'))
tags.save()
a_messages = find_inconsistent_tags([str(flac.realpath())])
e_messages = {str(flac.realpath()): [
"No album art.",
"No lyrics."
]}
assert e_messages == a_messages
def test_basic_numeric_tags_ignore_lyrics_art(tmpdir):
"""Test when everything but lyrics/art are valid, while ignoring lyrics/art."""
flac_dir = tmpdir.mkdir('flac')
flac = flac_dir.join('Artist2 - 2012 - Album - 01 - Title.flac').ensure(file=True)
with open(os.path.join(os.path.dirname(__file__), '1khz_sine.flac'), 'rb') as f:
flac.write(f.read(), 'wb')
tags = FLAC(str(flac.realpath()))
tags.update(dict(artist='Artist2', date='2012', album='Album', tracknumber='01', title='Title'))
tags.save()
a_messages = find_inconsistent_tags([str(flac.realpath())], True, True)
assert {} == a_messages
def test_art_lyrics(tmpdir):
"""Test when everything is valid."""
flac_dir = tmpdir.mkdir('flac')
flac = flac_dir.join('Artist2 - 2012 - Album - 01 - Title.flac').ensure(file=True)
with open(os.path.join(os.path.dirname(__file__), '1khz_sine.flac'), 'rb') as f:
flac.write(f.read(), 'wb')
tags = FLAC(str(flac.realpath()))
tags.update(dict(artist='Artist2', date='2012', album='Album', tracknumber='01', title='Title', unsyncedlyrics='L'))
image = Picture()
image.type, image.mime = 3, 'image/jpeg'
with open(os.path.join(os.path.dirname(__file__), '1_album_art.jpg'), 'rb') as f:
image.data = f.read()
tags.add_picture(image)
tags.save()
a_messages = find_inconsistent_tags([str(flac.realpath())], False, False)
assert {} == a_messages
def test_tag_alpha_instead_of_numeric(tmpdir):
"""Test when track number and date tags aren't integers."""
flac_dir = tmpdir.mkdir('flac')
flac = flac_dir.join('Artist2 - 2012 - Album - 01 - Title.flac').ensure(file=True)
with open(os.path.join(os.path.dirname(__file__), '1khz_sine.flac'), 'rb') as f:
flac.write(f.read(), 'wb')
tags = FLAC(str(flac.realpath()))
tags.update(dict(artist='Artist2', date='2012 ', album='Album', tracknumber='01 ', title='Title'))
tags.save()
a_messages = find_inconsistent_tags([str(flac.realpath())], True, True)
e_messages = {str(flac.realpath()): [
"Date mismatch: 2012 != 2012 ",
"Track number mismatch: 01 != 01 "
]}
assert e_messages == a_messages
def test_file_name_alpha_instead_of_numeric(tmpdir):
"""Test when track number and date file names aren't integers."""
flac_dir = tmpdir.mkdir('flac')
flac = flac_dir.join('Artist2 - 2012 - Album - 0.1 - Title.flac').ensure(file=True)
with open(os.path.join(os.path.dirname(__file__), '1khz_sine.flac'), 'rb') as f:
flac.write(f.read(), 'wb')
tags = FLAC(str(flac.realpath()))
tags.update(dict(artist='Artist2', date='2012', album='Album', tracknumber='01 ', title='Title'))
tags.save()
a_messages = find_inconsistent_tags([str(flac.realpath())], True, True)
e_messages = {str(flac.realpath()): [
"Filename date not a number.",
"Filename track number not a number."
]}
assert e_messages == a_messages
def test_single_digit(tmpdir):
"""Test for single digit track numbers (should be 2) and dates (should be 4)."""
flac_dir = tmpdir.mkdir('flac')
flac = flac_dir.join('Artist2 - 1 - Album - 1 - Title.flac').ensure(file=True)
with open(os.path.join(os.path.dirname(__file__), '1khz_sine.flac'), 'rb') as f:
flac.write(f.read(), 'wb')
tags = FLAC(str(flac.realpath()))
tags.update(dict(artist='Artist2', date='1', album='Album', tracknumber='1', title='Title'))
tags.save()
a_messages = find_inconsistent_tags([str(flac.realpath())], True, True)
e_messages = {str(flac.realpath()): [
"Filename date not four digits.",
"Filename track number not two digits."
]}
assert e_messages == a_messages
def test_one_valid_two_invalid(tmpdir):
"""Test when one FLAC file is fully valid and another one isn't."""
flac_dir = tmpdir.mkdir('flac')
flac_files = []
# Valid.
flac = flac_dir.join('Artist2 - 2012 - Album - 01 - Title.flac').ensure(file=True)
with open(os.path.join(os.path.dirname(__file__), '1khz_sine.flac'), 'rb') as f:
flac.write(f.read(), 'wb')
tags = FLAC(str(flac.realpath()))
tags.update(dict(artist='Artist2', date='2012', album='Album', tracknumber='01', title='Title'))
tags.save()
flac_files.append(str(flac.realpath()))
# Invalid.
flac = flac_dir.join('Artist - 2014 - Album - 01 - Title.flac').ensure(file=True)
with open(os.path.join(os.path.dirname(__file__), '1khz_sine.flac'), 'rb') as f:
flac.write(f.read(), 'wb')
tags = FLAC(str(flac.realpath()))
tags.update(dict(artist='Artist2', date='2014', album='Album', tracknumber='01', title='Title'))
tags.save()
flac_files.append(str(flac.realpath()))
# Test.
a_messages = find_inconsistent_tags(flac_files, True, True)
e_messages = {flac_files[1]: [
"Artist mismatch: Artist != Artist2",
]}
assert e_messages == a_messages
def test_two_invalid(tmpdir):
"""Test when two FLAC files have invalid tags."""
flac_dir = tmpdir.mkdir('flac')
flac_files = []
# One.
flac = flac_dir.join('Artist2 - 202 - Album - 01 - Title.flac').ensure(file=True)
with open(os.path.join(os.path.dirname(__file__), '1khz_sine.flac'), 'rb') as f:
flac.write(f.read(), 'wb')
tags = FLAC(str(flac.realpath()))
tags.update(dict(artist='Artist2', date='2012', album='Album', tracknumber='01', title='Title'))
tags.save()
flac_files.append(str(flac.realpath()))
# Two.
flac = flac_dir.join('Artist - 2014 - Album - 01 - Title.flac').ensure(file=True)
with open(os.path.join(os.path.dirname(__file__), '1khz_sine.flac'), 'rb') as f:
flac.write(f.read(), 'wb')
tags = FLAC(str(flac.realpath()))
tags.update(dict(artist='Artist2', date='2014', album='Album', tracknumber='01', title='Title'))
tags.save()
flac_files.append(str(flac.realpath()))
# Test.
a_messages = find_inconsistent_tags(flac_files, True, True)
e_messages = {
flac_files[0]: ["Filename date not four digits."],
flac_files[1]: ["Artist mismatch: Artist != Artist2"],
}
assert e_messages == a_messages
|
|
"""Support for Voice mailboxes."""
import asyncio
from contextlib import suppress
from datetime import timedelta
import logging
from aiohttp import web
from aiohttp.web_exceptions import HTTPNotFound
import async_timeout
from homeassistant.components.http import HomeAssistantView
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_per_platform, discovery
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.setup import async_prepare_setup_platform
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'mailbox'
EVENT = 'mailbox_updated'
CONTENT_TYPE_MPEG = 'audio/mpeg'
CONTENT_TYPE_NONE = 'none'
SCAN_INTERVAL = timedelta(seconds=30)
async def async_setup(hass, config):
"""Track states and offer events for mailboxes."""
mailboxes = []
hass.components.frontend.async_register_built_in_panel(
'mailbox', 'mailbox', 'mdi:mailbox')
hass.http.register_view(MailboxPlatformsView(mailboxes))
hass.http.register_view(MailboxMessageView(mailboxes))
hass.http.register_view(MailboxMediaView(mailboxes))
hass.http.register_view(MailboxDeleteView(mailboxes))
async def async_setup_platform(p_type, p_config=None, discovery_info=None):
"""Set up a mailbox platform."""
if p_config is None:
p_config = {}
if discovery_info is None:
discovery_info = {}
platform = await async_prepare_setup_platform(
hass, config, DOMAIN, p_type)
if platform is None:
_LOGGER.error("Unknown mailbox platform specified")
return
_LOGGER.info("Setting up %s.%s", DOMAIN, p_type)
mailbox = None
try:
if hasattr(platform, 'async_get_handler'):
mailbox = await \
platform.async_get_handler(hass, p_config, discovery_info)
elif hasattr(platform, 'get_handler'):
mailbox = await hass.async_add_executor_job(
platform.get_handler, hass, p_config, discovery_info)
else:
raise HomeAssistantError("Invalid mailbox platform.")
if mailbox is None:
_LOGGER.error(
"Failed to initialize mailbox platform %s", p_type)
return
except Exception: # pylint: disable=broad-except
_LOGGER.exception('Error setting up platform %s', p_type)
return
mailboxes.append(mailbox)
mailbox_entity = MailboxEntity(mailbox)
component = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL)
await component.async_add_entities([mailbox_entity])
setup_tasks = [async_setup_platform(p_type, p_config) for p_type, p_config
in config_per_platform(config, DOMAIN)]
if setup_tasks:
await asyncio.wait(setup_tasks)
async def async_platform_discovered(platform, info):
"""Handle for discovered platform."""
await async_setup_platform(platform, discovery_info=info)
discovery.async_listen_platform(hass, DOMAIN, async_platform_discovered)
return True
class MailboxEntity(Entity):
"""Entity for each mailbox platform to provide a badge display."""
def __init__(self, mailbox):
"""Initialize mailbox entity."""
self.mailbox = mailbox
self.message_count = 0
async def async_added_to_hass(self):
"""Complete entity initialization."""
@callback
def _mailbox_updated(event):
self.async_schedule_update_ha_state(True)
self.hass.bus.async_listen(EVENT, _mailbox_updated)
self.async_schedule_update_ha_state(True)
@property
def state(self):
"""Return the state of the binary sensor."""
return str(self.message_count)
@property
def name(self):
"""Return the name of the entity."""
return self.mailbox.name
async def async_update(self):
"""Retrieve messages from platform."""
messages = await self.mailbox.async_get_messages()
self.message_count = len(messages)
class Mailbox:
"""Represent a mailbox device."""
def __init__(self, hass, name):
"""Initialize mailbox object."""
self.hass = hass
self.name = name
def async_update(self):
"""Send event notification of updated mailbox."""
self.hass.bus.async_fire(EVENT)
@property
def media_type(self):
"""Return the supported media type."""
raise NotImplementedError()
@property
def can_delete(self):
"""Return if messages can be deleted."""
return False
@property
def has_media(self):
"""Return if messages have attached media files."""
return False
async def async_get_media(self, msgid):
"""Return the media blob for the msgid."""
raise NotImplementedError()
async def async_get_messages(self):
"""Return a list of the current messages."""
raise NotImplementedError()
def async_delete(self, msgid):
"""Delete the specified messages."""
raise NotImplementedError()
class StreamError(Exception):
"""Media streaming exception."""
pass
class MailboxView(HomeAssistantView):
"""Base mailbox view."""
def __init__(self, mailboxes):
"""Initialize a basic mailbox view."""
self.mailboxes = mailboxes
def get_mailbox(self, platform):
"""Retrieve the specified mailbox."""
for mailbox in self.mailboxes:
if mailbox.name == platform:
return mailbox
raise HTTPNotFound
class MailboxPlatformsView(MailboxView):
"""View to return the list of mailbox platforms."""
url = "/api/mailbox/platforms"
name = "api:mailbox:platforms"
async def get(self, request):
"""Retrieve list of platforms."""
platforms = []
for mailbox in self.mailboxes:
platforms.append(
{
'name': mailbox.name,
'has_media': mailbox.has_media,
'can_delete': mailbox.can_delete
})
return self.json(platforms)
class MailboxMessageView(MailboxView):
"""View to return the list of messages."""
url = "/api/mailbox/messages/{platform}"
name = "api:mailbox:messages"
async def get(self, request, platform):
"""Retrieve messages."""
mailbox = self.get_mailbox(platform)
messages = await mailbox.async_get_messages()
return self.json(messages)
class MailboxDeleteView(MailboxView):
"""View to delete selected messages."""
url = "/api/mailbox/delete/{platform}/{msgid}"
name = "api:mailbox:delete"
async def delete(self, request, platform, msgid):
"""Delete items."""
mailbox = self.get_mailbox(platform)
mailbox.async_delete(msgid)
class MailboxMediaView(MailboxView):
"""View to return a media file."""
url = r"/api/mailbox/media/{platform}/{msgid}"
name = "api:asteriskmbox:media"
async def get(self, request, platform, msgid):
"""Retrieve media."""
mailbox = self.get_mailbox(platform)
with suppress(asyncio.CancelledError, asyncio.TimeoutError):
with async_timeout.timeout(10):
try:
stream = await mailbox.async_get_media(msgid)
except StreamError as err:
error_msg = "Error getting media: %s" % (err)
_LOGGER.error(error_msg)
return web.Response(status=500)
if stream:
return web.Response(body=stream,
content_type=mailbox.media_type)
return web.Response(status=500)
|
|
import time
from datetime import datetime
from distutils.version import LooseVersion
from threading import Event
from cassandra import ConsistencyLevel as CL
from cassandra import ReadFailure
from cassandra.query import SimpleStatement
from ccmlib.node import TimeoutError, Node
from nose.tools import timed
from assertions import assert_invalid
from dtest import Tester, debug
from tools import known_failure, no_vnodes, since
class NotificationWaiter(object):
"""
A helper class for waiting for pushed notifications from
Cassandra over the native protocol.
"""
def __init__(self, tester, node, notification_types, keyspace=None):
"""
`address` should be a ccmlib.node.Node instance
`notification_types` should be a list of
"TOPOLOGY_CHANGE", "STATUS_CHANGE", and "SCHEMA_CHANGE".
"""
self.node = node
self.address = node.network_interfaces['binary'][0]
self.notification_types = notification_types
self.keyspace = keyspace
# get a single, new connection
session = tester.patient_exclusive_cql_connection(node)
connection = session.cluster.connection_factory(self.address, is_control_connection=True)
# coordinate with an Event
self.event = Event()
# the pushed notification
self.notifications = []
# register a callback for the notification type
for notification_type in notification_types:
connection.register_watcher(notification_type, self.handle_notification, register_timeout=5.0)
def handle_notification(self, notification):
"""
Called when a notification is pushed from Cassandra.
"""
debug("Got {} from {} at {}".format(notification, self.address, datetime.now()))
if self.keyspace and notification['keyspace'] and self.keyspace != notification['keyspace']:
return # we are not interested in this schema change
self.notifications.append(notification)
self.event.set()
def wait_for_notifications(self, timeout, num_notifications=1):
"""
Waits up to `timeout` seconds for notifications from Cassandra. If
passed `num_notifications`, stop waiting when that many notifications
are observed.
"""
deadline = time.time() + timeout
while time.time() < deadline:
self.event.wait(deadline - time.time())
self.event.clear()
if len(self.notifications) >= num_notifications:
break
return self.notifications
def clear_notifications(self):
debug("Clearing notifications...")
self.notifications = []
self.event.clear()
class TestPushedNotifications(Tester):
"""
Tests for pushed native protocol notification from Cassandra.
"""
@no_vnodes()
def move_single_node_test(self):
"""
@jira_ticket CASSANDRA-8516
Moving a token should result in MOVED_NODE notifications.
"""
self.cluster.populate(3).start(wait_for_binary_proto=True, wait_other_notice=True)
waiters = [NotificationWaiter(self, node, ["TOPOLOGY_CHANGE"])
for node in self.cluster.nodes.values()]
# The first node sends NEW_NODE for the other 2 nodes during startup, in case they are
# late due to network delays let's block a bit longer
debug("Waiting for unwanted notifications....")
waiters[0].wait_for_notifications(timeout=30, num_notifications=2)
waiters[0].clear_notifications()
debug("Issuing move command....")
node1 = self.cluster.nodes.values()[0]
node1.move("123")
for waiter in waiters:
debug("Waiting for notification from {}".format(waiter.address,))
notifications = waiter.wait_for_notifications(60.0)
self.assertEquals(1, len(notifications), notifications)
notification = notifications[0]
change_type = notification["change_type"]
address, port = notification["address"]
self.assertEquals("MOVED_NODE", change_type)
self.assertEquals(self.get_ip_from_node(node1), address)
@no_vnodes()
def move_single_node_localhost_test(self):
"""
@jira_ticket CASSANDRA-10052
Test that we don't get NODE_MOVED notifications from nodes other than the local one,
when rpc_address is set to localhost (127.0.0.1).
To set-up this test we override the rpc_address to "localhost (127.0.0.1)" for all nodes, and
therefore we must change the rpc port or else processes won't start.
"""
cluster = self.cluster
cluster.populate(3)
self.change_rpc_address_to_localhost()
cluster.start(wait_for_binary_proto=True, wait_other_notice=True)
waiters = [NotificationWaiter(self, node, ["TOPOLOGY_CHANGE"])
for node in self.cluster.nodes.values()]
# The first node sends NEW_NODE for the other 2 nodes during startup, in case they are
# late due to network delays let's block a bit longer
debug("Waiting for unwanted notifications...")
waiters[0].wait_for_notifications(timeout=30, num_notifications=2)
waiters[0].clear_notifications()
debug("Issuing move command....")
node1 = self.cluster.nodes.values()[0]
node1.move("123")
for waiter in waiters:
debug("Waiting for notification from {}".format(waiter.address,))
notifications = waiter.wait_for_notifications(30.0)
self.assertEquals(1 if waiter.node is node1 else 0, len(notifications), notifications)
@known_failure(failure_source='cassandra',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-11360',
flaky=True,
notes='2 different failures on 2.1 offheap memtables jobs. Fails on main 2.1 as well')
def restart_node_test(self):
"""
@jira_ticket CASSANDRA-7816
Restarting a node should generate exactly one DOWN and one UP notification
"""
self.cluster.populate(2).start(wait_for_binary_proto=True, wait_other_notice=True)
node1, node2 = self.cluster.nodelist()
waiter = NotificationWaiter(self, node1, ["STATUS_CHANGE", "TOPOLOGY_CHANGE"])
# need to block for up to 2 notifications (NEW_NODE and UP) so that these notifications
# don't confuse the state below.
debug("Waiting for unwanted notifications...")
waiter.wait_for_notifications(timeout=30, num_notifications=2)
waiter.clear_notifications()
# On versions prior to 2.2, an additional NEW_NODE notification is sent when a node
# is restarted. This bug was fixed in CASSANDRA-11038 (see also CASSANDRA-11360)
version = LooseVersion(self.cluster.cassandra_version())
expected_notifications = 2 if version >= '2.2' else 3
for i in range(5):
debug("Restarting second node...")
node2.stop(wait_other_notice=True)
node2.start(wait_other_notice=True)
debug("Waiting for notifications from {}".format(waiter.address))
notifications = waiter.wait_for_notifications(timeout=60.0, num_notifications=expected_notifications)
self.assertEquals(expected_notifications, len(notifications), notifications)
for notification in notifications:
self.assertEquals(self.get_ip_from_node(node2), notification["address"][0])
self.assertEquals("DOWN", notifications[0]["change_type"])
self.assertEquals("UP", notifications[1]["change_type"])
if version < '2.2':
self.assertEquals("NEW_NODE", notifications[2]["change_type"])
waiter.clear_notifications()
def restart_node_localhost_test(self):
"""
Test that we don't get client notifications when rpc_address is set to localhost.
@jira_ticket CASSANDRA-10052
To set-up this test we override the rpc_address to "localhost" for all nodes, and
therefore we must change the rpc port or else processes won't start.
"""
cluster = self.cluster
cluster.populate(2)
node1, node2 = cluster.nodelist()
self.change_rpc_address_to_localhost()
cluster.start(wait_for_binary_proto=True, wait_other_notice=True)
# register for notification with node1
waiter = NotificationWaiter(self, node1, ["STATUS_CHANGE", "TOPOLOGY_CHANGE"])
# restart node 2
debug("Restarting second node...")
node2.stop(wait_other_notice=True)
node2.start(wait_other_notice=True)
# check that node1 did not send UP or DOWN notification for node2
debug("Waiting for notifications from {}".format(waiter.address,))
notifications = waiter.wait_for_notifications(timeout=30.0, num_notifications=2)
self.assertEquals(0, len(notifications), notifications)
@since("2.2")
def add_and_remove_node_test(self):
"""
Test that NEW_NODE and REMOVED_NODE are sent correctly as nodes join and leave.
@jira_ticket CASSANDRA-11038
"""
self.cluster.populate(1).start(wait_for_binary_proto=True)
node1 = self.cluster.nodelist()[0]
waiter = NotificationWaiter(self, node1, ["STATUS_CHANGE", "TOPOLOGY_CHANGE"])
# need to block for up to 2 notifications (NEW_NODE and UP) so that these notifications
# don't confuse the state below
debug("Waiting for unwanted notifications...")
waiter.wait_for_notifications(timeout=30, num_notifications=2)
waiter.clear_notifications()
debug("Adding second node...")
node2 = Node('node2', self.cluster, True, ('127.0.0.2', 9160), ('127.0.0.2', 7000), '7200', '0', None, ('127.0.0.2', 9042))
self.cluster.add(node2, False)
node2.start(wait_other_notice=True)
debug("Waiting for notifications from {}".format(waiter.address))
notifications = waiter.wait_for_notifications(timeout=60.0, num_notifications=2)
self.assertEquals(2, len(notifications), notifications)
for notification in notifications:
self.assertEquals(self.get_ip_from_node(node2), notification["address"][0])
self.assertEquals("NEW_NODE", notifications[0]["change_type"])
self.assertEquals("UP", notifications[1]["change_type"])
debug("Removing second node...")
waiter.clear_notifications()
node2.decommission()
node2.stop(gently=False)
debug("Waiting for notifications from {}".format(waiter.address))
notifications = waiter.wait_for_notifications(timeout=60.0, num_notifications=2)
self.assertEquals(2, len(notifications), notifications)
for notification in notifications:
self.assertEquals(self.get_ip_from_node(node2), notification["address"][0])
self.assertEquals("REMOVED_NODE", notifications[0]["change_type"])
self.assertEquals("DOWN", notifications[1]["change_type"])
def change_rpc_address_to_localhost(self):
"""
change node's 'rpc_address' from '127.0.0.x' to 'localhost (127.0.0.1)', increase port numbers
"""
cluster = self.cluster
i = 0
for node in cluster.nodelist():
debug('Set 127.0.0.1 to prevent IPv6 java prefs, set rpc_address: localhost in cassandra.yaml')
node.network_interfaces['thrift'] = ('127.0.0.1', node.network_interfaces['thrift'][1] + i)
node.network_interfaces['binary'] = ('127.0.0.1', node.network_interfaces['thrift'][1] + 1)
node.import_config_files() # this regenerates the yaml file and sets 'rpc_address' to the 'thrift' address
node.set_configuration_options(values={'rpc_address': 'localhost'})
debug(node.show())
i += 2
@since("3.0")
def schema_changes_test(self):
"""
@jira_ticket CASSANDRA-10328
Creating, updating and dropping a keyspace, a table and a materialized view
will generate the correct schema change notifications.
"""
self.cluster.populate(2).start(wait_for_binary_proto=True)
node1, node2 = self.cluster.nodelist()
session = self.patient_cql_connection(node1)
waiter = NotificationWaiter(self, node2, ["SCHEMA_CHANGE"], keyspace='ks')
self.create_ks(session, 'ks', 3)
session.execute("create TABLE t (k int PRIMARY KEY , v int)")
session.execute("alter TABLE t add v1 int;")
session.execute("create MATERIALIZED VIEW mv as select * from t WHERE v IS NOT NULL AND v1 IS NOT NULL PRIMARY KEY (v, k)")
session.execute(" alter materialized view mv with min_index_interval = 100")
session.execute("drop MATERIALIZED VIEW mv")
session.execute("drop TABLE t")
session.execute("drop KEYSPACE ks")
debug("Waiting for notifications from {}".format(waiter.address,))
notifications = waiter.wait_for_notifications(timeout=60.0, num_notifications=8)
self.assertEquals(8, len(notifications), notifications)
self.assertDictContainsSubset({'change_type': u'CREATED', 'target_type': u'KEYSPACE'}, notifications[0])
self.assertDictContainsSubset({'change_type': u'CREATED', 'target_type': u'TABLE', u'table': u't'}, notifications[1])
self.assertDictContainsSubset({'change_type': u'UPDATED', 'target_type': u'TABLE', u'table': u't'}, notifications[2])
self.assertDictContainsSubset({'change_type': u'CREATED', 'target_type': u'TABLE', u'table': u'mv'}, notifications[3])
self.assertDictContainsSubset({'change_type': u'UPDATED', 'target_type': u'TABLE', u'table': u'mv'}, notifications[4])
self.assertDictContainsSubset({'change_type': u'DROPPED', 'target_type': u'TABLE', u'table': u'mv'}, notifications[5])
self.assertDictContainsSubset({'change_type': u'DROPPED', 'target_type': u'TABLE', u'table': u't'}, notifications[6])
self.assertDictContainsSubset({'change_type': u'DROPPED', 'target_type': u'KEYSPACE'}, notifications[7])
class TestVariousNotifications(Tester):
"""
Tests for various notifications/messages from Cassandra.
"""
@since('2.2')
def tombstone_failure_threshold_message_test(self):
"""
Ensure nodes return an error message in case of TombstoneOverwhelmingExceptions rather
than dropping the request. A drop makes the coordinator waits for the specified
read_request_timeout_in_ms.
@jira_ticket CASSANDRA-7886
"""
self.allow_log_errors = True
self.cluster.set_configuration_options(
values={
'tombstone_failure_threshold': 500,
'read_request_timeout_in_ms': 30000, # 30 seconds
'range_request_timeout_in_ms': 40000
}
)
self.cluster.populate(3).start()
node1, node2, node3 = self.cluster.nodelist()
session = self.patient_cql_connection(node1)
self.create_ks(session, 'test', 3)
session.execute(
"CREATE TABLE test ( "
"id int, mytext text, col1 int, col2 int, col3 int, "
"PRIMARY KEY (id, mytext) )"
)
# Add data with tombstones
values = map(lambda i: str(i), range(1000))
for value in values:
session.execute(SimpleStatement(
"insert into test (id, mytext, col1) values (1, '{}', null) ".format(
value
),
consistency_level=CL.ALL
))
failure_msg = ("Scanned over.* tombstones.* query aborted")
@timed(25)
def read_failure_query():
assert_invalid(
session, SimpleStatement("select * from test where id in (1,2,3,4,5)", consistency_level=CL.ALL),
expected=ReadFailure
)
read_failure_query()
# In almost all cases, we should find the failure message on node1 within a few seconds.
# If it is not on node1, we grep all logs, as it *absolutely* should be somewhere.
# If we still cannot find it then, we fail the test, as this is a problem.
try:
node1.watch_log_for(failure_msg, timeout=5)
except TimeoutError:
failure = (node1.grep_log(failure_msg) or
node2.grep_log(failure_msg) or
node3.grep_log(failure_msg))
self.assertTrue(failure, ("Cannot find tombstone failure threshold error in log "
"after failed query"))
mark1 = node1.mark_log()
mark2 = node2.mark_log()
mark3 = node3.mark_log()
@timed(35)
def range_request_failure_query():
assert_invalid(
session, SimpleStatement("select * from test", consistency_level=CL.ALL),
expected=ReadFailure
)
range_request_failure_query()
# In almost all cases, we should find the failure message on node1 within a few seconds.
# If it is not on node1, we grep all logs, as it *absolutely* should be somewhere.
# If we still cannot find it then, we fail the test, as this is a problem.
try:
node1.watch_log_for(failure_msg, from_mark=mark1, timeout=5)
except TimeoutError:
failure = (node1.grep_log(failure_msg, from_mark=mark1) or
node2.grep_log(failure_msg, from_mark=mark2) or
node3.grep_log(failure_msg, from_mark=mark3))
self.assertTrue(failure, ("Cannot find tombstone failure threshold error in log "
"after range_request_timeout_query"))
|
|
# (c) Copyright 2014 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Cisco Zone Driver is responsible to manage access control using FC zoning
for Cisco FC fabrics.
This is a concrete implementation of FCZoneDriver interface implementing
add_connection and delete_connection interfaces.
**Related Flags**
:zone_activate: Used by: class: 'FCZoneDriver'. Defaults to True
:zone_name_prefix: Used by: class: 'FCZoneDriver'. Defaults to 'openstack'
"""
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_utils import excutils
from oslo_utils import importutils
import six
from cinder import exception
from cinder.i18n import _, _LE, _LI
from cinder.openstack.common import log as logging
from cinder.zonemanager.drivers.cisco import cisco_fabric_opts as fabric_opts
from cinder.zonemanager.drivers import fc_zone_driver
from cinder.zonemanager import utils as zm_utils
LOG = logging.getLogger(__name__)
cisco_opts = [
cfg.StrOpt('cisco_sb_connector',
default='cinder.zonemanager.drivers.cisco'
'.cisco_fc_zone_client_cli.CiscoFCZoneClientCLI',
help='Southbound connector for zoning operation'),
]
CONF = cfg.CONF
CONF.register_opts(cisco_opts, 'fc-zone-manager')
class CiscoFCZoneDriver(fc_zone_driver.FCZoneDriver):
"""Cisco FC zone driver implementation.
OpenStack Fibre Channel zone driver to manage FC zoning in
Cisco SAN fabrics.
Version history:
1.0 - Initial Cisco FC zone driver
"""
VERSION = "1.0.0"
def __init__(self, **kwargs):
super(CiscoFCZoneDriver, self).__init__(**kwargs)
self.configuration = kwargs.get('configuration', None)
if self.configuration:
self.configuration.append_config_values(cisco_opts)
# Adding a hack to handle parameters from super classes
# in case configured with multi backends.
fabric_names = self.configuration.safe_get('fc_fabric_names')
activate = self.configuration.safe_get('cisco_zone_activate')
prefix = self.configuration.safe_get('cisco_zone_name_prefix')
base_san_opts = []
if not fabric_names:
base_san_opts.append(
cfg.StrOpt('fc_fabric_names', default=None,
help='Comma separated list of fibre channel '
'fabric names. This list of names is used to'
' retrieve other SAN credentials for connecting'
' to each SAN fabric'
))
if not activate:
base_san_opts.append(
cfg.BoolOpt('cisco_zone_activate',
default=True,
help='Indicates whether zone should '
'be activated or not'))
if not prefix:
base_san_opts.append(
cfg.StrOpt('cisco_zone_name_prefix',
default="openstack",
help="A prefix to be used when naming zone"))
if len(base_san_opts) > 0:
CONF.register_opts(base_san_opts)
self.configuration.append_config_values(base_san_opts)
fabric_names = [x.strip() for x in self.
configuration.fc_fabric_names.split(',')]
# There can be more than one SAN in the network and we need to
# get credentials for each SAN.
if fabric_names:
self.fabric_configs = fabric_opts.load_fabric_configurations(
fabric_names)
@lockutils.synchronized('cisco', 'fcfabric-', True)
def add_connection(self, fabric, initiator_target_map):
"""Concrete implementation of add_connection.
Based on zoning policy and state of each I-T pair, list of zone
members are created and pushed to the fabric to add zones. The
new zones created or zones updated are activated based on isActivate
flag set in cinder.conf returned by volume driver after attach
operation.
:param fabric: Fabric name from cinder.conf file
:param initiator_target_map: Mapping of initiator to list of targets
"""
LOG.debug("Add connection for Fabric:%s", fabric)
LOG.info(_LI("CiscoFCZoneDriver - Add connection "
"for I-T map: %s"), initiator_target_map)
fabric_ip = self.fabric_configs[fabric].safe_get(
'cisco_fc_fabric_address')
fabric_user = self.fabric_configs[fabric].safe_get(
'cisco_fc_fabric_user')
fabric_pwd = self.fabric_configs[fabric].safe_get(
'cisco_fc_fabric_password')
fabric_port = self.fabric_configs[fabric].safe_get(
'cisco_fc_fabric_port')
zoning_policy = self.configuration.zoning_policy
zoning_policy_fab = self.fabric_configs[fabric].safe_get(
'cisco_zoning_policy')
if zoning_policy_fab:
zoning_policy = zoning_policy_fab
zoning_vsan = self.fabric_configs[fabric].safe_get('cisco_zoning_vsan')
LOG.info(_LI("Zoning policy for Fabric %s"), zoning_policy)
statusmap_from_fabric = self.get_zoning_status(
fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan)
if statusmap_from_fabric.get('session') == 'none':
cfgmap_from_fabric = self.get_active_zone_set(
fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan)
zone_names = []
if cfgmap_from_fabric.get('zones'):
zone_names = cfgmap_from_fabric['zones'].keys()
# based on zoning policy, create zone member list and
# push changes to fabric.
for initiator_key in initiator_target_map.keys():
zone_map = {}
initiator = initiator_key.lower()
t_list = initiator_target_map[initiator_key]
if zoning_policy == 'initiator-target':
for t in t_list:
target = t.lower()
zone_members = [
zm_utils.get_formatted_wwn(initiator),
zm_utils.get_formatted_wwn(target)]
zone_name = (self.
configuration.cisco_zone_name_prefix
+ initiator.replace(':', '')
+ target.replace(':', ''))
if (len(cfgmap_from_fabric) == 0 or (
zone_name not in zone_names)):
zone_map[zone_name] = zone_members
else:
# This is I-T zoning, skip if zone exists.
LOG.info(_LI("Zone exists in I-T mode. "
"Skipping zone creation %s"),
zone_name)
elif zoning_policy == 'initiator':
zone_members = [
zm_utils.get_formatted_wwn(initiator)]
for t in t_list:
target = t.lower()
zone_members.append(
zm_utils.get_formatted_wwn(target))
zone_name = self.configuration.cisco_zone_name_prefix \
+ initiator.replace(':', '')
if len(zone_names) > 0 and (zone_name in zone_names):
zone_members = zone_members + filter(
lambda x: x not in zone_members,
cfgmap_from_fabric['zones'][zone_name])
zone_map[zone_name] = zone_members
else:
msg = _("Zoning Policy: %s, not"
" recognized") % zoning_policy
LOG.error(msg)
raise exception.FCZoneDriverException(msg)
LOG.info(_LI("Zone map to add: %s"), zone_map)
if len(zone_map) > 0:
conn = None
try:
conn = importutils.import_object(
self.configuration.cisco_sb_connector,
ipaddress=fabric_ip,
username=fabric_user,
password=fabric_pwd,
port=fabric_port,
vsan=zoning_vsan)
conn.add_zones(
zone_map, self.configuration.cisco_zone_activate,
zoning_vsan, cfgmap_from_fabric,
statusmap_from_fabric)
conn.cleanup()
except exception.CiscoZoningCliException as cisco_ex:
msg = _("Exception: %s") % six.text_type(cisco_ex)
raise exception.FCZoneDriverException(msg)
except Exception as e:
LOG.error(_LE("Exception: %s") % six.text_type(e))
msg = (_("Failed to add zoning configuration %s") %
six.text_type(e))
raise exception.FCZoneDriverException(msg)
LOG.debug("Zones added successfully: %s", zone_map)
else:
LOG.debug("Zoning session exists VSAN: %s", zoning_vsan)
@lockutils.synchronized('cisco', 'fcfabric-', True)
def delete_connection(self, fabric, initiator_target_map):
"""Concrete implementation of delete_connection.
Based on zoning policy and state of each I-T pair, list of zones
are created for deletion. The zones are either updated deleted based
on the policy and attach/detach state of each I-T pair.
:param fabric: Fabric name from cinder.conf file
:param initiator_target_map: Mapping of initiator to list of targets
"""
LOG.debug("Delete connection for fabric:%s", fabric)
LOG.info(_LI("CiscoFCZoneDriver - Delete connection for I-T map: %s"),
initiator_target_map)
fabric_ip = self.fabric_configs[fabric].safe_get(
'cisco_fc_fabric_address')
fabric_user = self.fabric_configs[fabric].safe_get(
'cisco_fc_fabric_user')
fabric_pwd = self.fabric_configs[fabric].safe_get(
'cisco_fc_fabric_password')
fabric_port = self.fabric_configs[fabric].safe_get(
'cisco_fc_fabric_port')
zoning_policy = self.configuration.zoning_policy
zoning_policy_fab = self.fabric_configs[fabric].safe_get(
'cisco_zoning_policy')
if zoning_policy_fab:
zoning_policy = zoning_policy_fab
zoning_vsan = self.fabric_configs[fabric].safe_get('cisco_zoning_vsan')
LOG.info(_LI("Zoning policy for fabric %s"), zoning_policy)
statusmap_from_fabric = self.get_zoning_status(
fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan)
if statusmap_from_fabric.get('session') == 'none':
cfgmap_from_fabric = self.get_active_zone_set(
fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan)
zone_names = []
if cfgmap_from_fabric.get('zones'):
zone_names = cfgmap_from_fabric['zones'].keys()
# Based on zoning policy, get zone member list and push
# changes to fabric. This operation could result in an update
# for zone config with new member list or deleting zones from
# active cfg.
LOG.debug("zone config from Fabric: %s", cfgmap_from_fabric)
for initiator_key in initiator_target_map.keys():
initiator = initiator_key.lower()
formatted_initiator = zm_utils.get_formatted_wwn(initiator)
zone_map = {}
zones_to_delete = []
t_list = initiator_target_map[initiator_key]
if zoning_policy == 'initiator-target':
# In this case, zone needs to be deleted.
for t in t_list:
target = t.lower()
zone_name = (
self.configuration.cisco_zone_name_prefix
+ initiator.replace(':', '')
+ target.replace(':', ''))
LOG.debug("Zone name to del: %s", zone_name)
if (len(zone_names) > 0 and (zone_name in zone_names)):
# delete zone.
LOG.debug("Added zone to delete to list: %s",
zone_name)
zones_to_delete.append(zone_name)
elif zoning_policy == 'initiator':
zone_members = [formatted_initiator]
for t in t_list:
target = t.lower()
zone_members.append(
zm_utils.get_formatted_wwn(target))
zone_name = self.configuration.cisco_zone_name_prefix \
+ initiator.replace(':', '')
if (zone_names and (zone_name in zone_names)):
filtered_members = filter(
lambda x: x not in zone_members,
cfgmap_from_fabric['zones'][zone_name])
# The assumption here is that initiator is always
# there in the zone as it is 'initiator' policy.
# We find the filtered list and if it is non-empty,
# add initiator to it and update zone if filtered
# list is empty, we remove that zone.
LOG.debug("Zone delete - I mode: filtered targets:%s",
filtered_members)
if filtered_members:
filtered_members.append(formatted_initiator)
LOG.debug("Filtered zone members to update: %s",
filtered_members)
zone_map[zone_name] = filtered_members
LOG.debug("Filtered zone Map to update: %s",
zone_map)
else:
zones_to_delete.append(zone_name)
else:
LOG.info(_LI("Zoning Policy: %s, not recognized"),
zoning_policy)
LOG.debug("Final Zone map to update: %s", zone_map)
LOG.debug("Final Zone list to delete: %s", zones_to_delete)
conn = None
try:
conn = importutils.import_object(
self.configuration.cisco_sb_connector,
ipaddress=fabric_ip,
username=fabric_user,
password=fabric_pwd,
port=fabric_port,
vsan=zoning_vsan)
# Update zone membership.
if zone_map:
conn.add_zones(
zone_map, self.configuration.cisco_zone_activate,
zoning_vsan, cfgmap_from_fabric,
statusmap_from_fabric)
# Delete zones ~sk.
if zones_to_delete:
zone_name_string = ''
num_zones = len(zones_to_delete)
for i in range(0, num_zones):
if i == 0:
zone_name_string = ('%s%s' % (
zone_name_string,
zones_to_delete[i]))
else:
zone_name_string = ('%s%s%s' % (
zone_name_string, ';',
zones_to_delete[i]))
conn.delete_zones(zone_name_string,
self.configuration.
cisco_zone_activate,
zoning_vsan, cfgmap_from_fabric,
statusmap_from_fabric)
conn.cleanup()
except Exception as e:
msg = _("Exception: %s") % six.text_type(e)
LOG.error(msg)
msg = _("Failed to update or delete zoning configuration")
raise exception.FCZoneDriverException(msg)
LOG.debug("Zones deleted successfully: %s", zone_map)
else:
LOG.debug("Zoning session exists VSAN: %s", zoning_vsan)
def get_san_context(self, target_wwn_list):
"""Lookup SAN context for visible end devices.
Look up each SAN configured and return a map of SAN (fabric IP) to
list of target WWNs visible to the fabric.
"""
formatted_target_list = []
fabric_map = {}
fabrics = [x.strip() for x in self.
configuration.fc_fabric_names.split(',')]
LOG.debug("Fabric List: %s", fabrics)
LOG.debug("Target wwn List: %s", target_wwn_list)
if len(fabrics) > 0:
for t in target_wwn_list:
formatted_target_list.append(
zm_utils.get_formatted_wwn(t.lower()))
LOG.debug("Formatted Target wwn List: %s", formatted_target_list)
for fabric_name in fabrics:
fabric_ip = self.fabric_configs[fabric_name].safe_get(
'cisco_fc_fabric_address')
fabric_user = self.fabric_configs[fabric_name].safe_get(
'cisco_fc_fabric_user')
fabric_pwd = self.fabric_configs[fabric_name].safe_get(
'cisco_fc_fabric_password')
fabric_port = self.fabric_configs[fabric_name].safe_get(
'cisco_fc_fabric_port')
zoning_vsan = self.fabric_configs[fabric_name].safe_get(
'cisco_zoning_vsan')
# Get name server data from fabric and get the targets
# logged in.
nsinfo = None
try:
conn = importutils.import_object(
self.configuration.cisco_sb_connector,
ipaddress=fabric_ip,
username=fabric_user,
password=fabric_pwd, port=fabric_port,
vsan=zoning_vsan)
nsinfo = conn.get_nameserver_info()
LOG.debug("show fcns database info from fabric:%s", nsinfo)
conn.cleanup()
except exception.CiscoZoningCliException as ex:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error getting show fcns database "
"info: %s"), six.text_type(ex))
except Exception as e:
msg = (_("Failed to get show fcns database info:%s") %
six.text_type(e))
LOG.error(msg)
raise exception.FCZoneDriverException(msg)
visible_targets = filter(
lambda x: x in formatted_target_list, nsinfo)
if visible_targets:
LOG.info(_LI("Filtered targets for SAN is: %s"),
{fabric_name: visible_targets})
# getting rid of the ':' before returning
for idx, elem in enumerate(visible_targets):
visible_targets[idx] = six.text_type(
visible_targets[idx]).replace(':', '')
fabric_map[fabric_name] = visible_targets
else:
LOG.debug("No targets are in the fcns info for SAN %s",
fabric_name)
LOG.debug("Return SAN context output:%s", fabric_map)
return fabric_map
def get_active_zone_set(self, fabric_ip,
fabric_user, fabric_pwd, fabric_port,
zoning_vsan):
"""Gets active zoneset config for vsan."""
cfgmap = {}
conn = None
try:
LOG.debug("Southbound connector: %s",
self.configuration.cisco_sb_connector)
conn = importutils.import_object(
self.configuration.cisco_sb_connector,
ipaddress=fabric_ip, username=fabric_user,
password=fabric_pwd, port=fabric_port, vsan=zoning_vsan)
cfgmap = conn.get_active_zone_set()
conn.cleanup()
except Exception as e:
msg = (_("Failed to access active zoning configuration:%s") %
six.text_type(e))
LOG.error(msg)
raise exception.FCZoneDriverException(msg)
LOG.debug("Active zone set from fabric: %s", cfgmap)
return cfgmap
def get_zoning_status(self, fabric_ip, fabric_user, fabric_pwd,
fabric_port, zoning_vsan):
"""Gets zoneset status and mode."""
statusmap = {}
conn = None
try:
LOG.debug("Southbound connector: %s",
self.configuration.cisco_sb_connector)
conn = importutils.import_object(
self.configuration.cisco_sb_connector,
ipaddress=fabric_ip, username=fabric_user,
password=fabric_pwd, port=fabric_port, vsan=zoning_vsan)
statusmap = conn.get_zoning_status()
conn.cleanup()
except Exception as e:
msg = (_("Failed to access zoneset status:%s") %
six.text_type(e))
LOG.error(msg)
raise exception.FCZoneDriverException(msg)
LOG.debug("Zoneset status from fabric: %s", statusmap)
return statusmap
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from functools import wraps
import os
import contextlib
from airflow import settings
from airflow.utils.log.logging_mixin import LoggingMixin
log = LoggingMixin().log
@contextlib.contextmanager
def create_session():
"""
Contextmanager that will create and teardown a session.
"""
session = settings.Session()
try:
yield session
session.commit()
except Exception:
session.rollback()
raise
finally:
session.close()
def provide_session(func):
"""
Function decorator that provides a session if it isn't provided.
If you want to reuse a session or run the function as part of a
database transaction, you pass it to the function, if not this wrapper
will create one and close it for you.
"""
@wraps(func)
def wrapper(*args, **kwargs):
arg_session = 'session'
func_params = func.__code__.co_varnames
session_in_args = arg_session in func_params and \
func_params.index(arg_session) < len(args)
session_in_kwargs = arg_session in kwargs
if session_in_kwargs or session_in_args:
return func(*args, **kwargs)
else:
with create_session() as session:
kwargs[arg_session] = session
return func(*args, **kwargs)
return wrapper
@provide_session
def merge_conn(conn, session=None):
from airflow.models.connection import Connection
if not session.query(Connection).filter(Connection.conn_id == conn.conn_id).first():
session.add(conn)
session.commit()
def initdb():
from airflow import models
from airflow.models.connection import Connection
upgradedb()
merge_conn(
Connection(
conn_id='airflow_db', conn_type='mysql',
host='mysql', login='root', password='',
schema='airflow'))
merge_conn(
Connection(
conn_id='beeline_default', conn_type='beeline', port="10000",
host='localhost', extra="{\"use_beeline\": true, \"auth\": \"\"}",
schema='default'))
merge_conn(
Connection(
conn_id='bigquery_default', conn_type='google_cloud_platform',
schema='default'))
merge_conn(
Connection(
conn_id='local_mysql', conn_type='mysql',
host='localhost', login='airflow', password='airflow',
schema='airflow'))
merge_conn(
Connection(
conn_id='presto_default', conn_type='presto',
host='localhost',
schema='hive', port=3400))
merge_conn(
Connection(
conn_id='google_cloud_default', conn_type='google_cloud_platform',
schema='default',))
merge_conn(
Connection(
conn_id='hive_cli_default', conn_type='hive_cli',
schema='default',))
merge_conn(
Connection(
conn_id='hiveserver2_default', conn_type='hiveserver2',
host='localhost',
schema='default', port=10000))
merge_conn(
Connection(
conn_id='metastore_default', conn_type='hive_metastore',
host='localhost', extra="{\"authMechanism\": \"PLAIN\"}",
port=9083))
merge_conn(
Connection(
conn_id='mongo_default', conn_type='mongo',
host='mongo', port=27017))
merge_conn(
Connection(
conn_id='mysql_default', conn_type='mysql',
login='root',
schema='airflow',
host='mysql'))
merge_conn(
Connection(
conn_id='postgres_default', conn_type='postgres',
login='postgres',
password='airflow',
schema='airflow',
host='postgres'))
merge_conn(
Connection(
conn_id='sqlite_default', conn_type='sqlite',
host='/tmp/sqlite_default.db'))
merge_conn(
Connection(
conn_id='http_default', conn_type='http',
host='https://www.google.com/'))
merge_conn(
Connection(
conn_id='mssql_default', conn_type='mssql',
host='localhost', port=1433))
merge_conn(
Connection(
conn_id='vertica_default', conn_type='vertica',
host='localhost', port=5433))
merge_conn(
Connection(
conn_id='wasb_default', conn_type='wasb',
extra='{"sas_token": null}'))
merge_conn(
Connection(
conn_id='webhdfs_default', conn_type='hdfs',
host='localhost', port=50070))
merge_conn(
Connection(
conn_id='ssh_default', conn_type='ssh',
host='localhost'))
merge_conn(
Connection(
conn_id='sftp_default', conn_type='sftp',
host='localhost', port=22, login='airflow',
extra='''
{"key_file": "~/.ssh/id_rsa", "no_host_key_check": true}
'''))
merge_conn(
Connection(
conn_id='fs_default', conn_type='fs',
extra='{"path": "/"}'))
merge_conn(
Connection(
conn_id='aws_default', conn_type='aws',
extra='{"region_name": "us-east-1"}'))
merge_conn(
Connection(
conn_id='spark_default', conn_type='spark',
host='yarn', extra='{"queue": "root.default"}'))
merge_conn(
Connection(
conn_id='druid_broker_default', conn_type='druid',
host='druid-broker', port=8082, extra='{"endpoint": "druid/v2/sql"}'))
merge_conn(
Connection(
conn_id='druid_ingest_default', conn_type='druid',
host='druid-overlord', port=8081, extra='{"endpoint": "druid/indexer/v1/task"}'))
merge_conn(
Connection(
conn_id='redis_default', conn_type='redis',
host='redis', port=6379,
extra='{"db": 0}'))
merge_conn(
Connection(
conn_id='sqoop_default', conn_type='sqoop',
host='rmdbs', extra=''))
merge_conn(
Connection(
conn_id='emr_default', conn_type='emr',
extra='''
{ "Name": "default_job_flow_name",
"LogUri": "s3://my-emr-log-bucket/default_job_flow_location",
"ReleaseLabel": "emr-4.6.0",
"Instances": {
"Ec2KeyName": "mykey",
"Ec2SubnetId": "somesubnet",
"InstanceGroups": [
{
"Name": "Master nodes",
"Market": "ON_DEMAND",
"InstanceRole": "MASTER",
"InstanceType": "r3.2xlarge",
"InstanceCount": 1
},
{
"Name": "Slave nodes",
"Market": "ON_DEMAND",
"InstanceRole": "CORE",
"InstanceType": "r3.2xlarge",
"InstanceCount": 1
}
],
"TerminationProtected": false,
"KeepJobFlowAliveWhenNoSteps": false
},
"Applications":[
{ "Name": "Spark" }
],
"VisibleToAllUsers": true,
"JobFlowRole": "EMR_EC2_DefaultRole",
"ServiceRole": "EMR_DefaultRole",
"Tags": [
{
"Key": "app",
"Value": "analytics"
},
{
"Key": "environment",
"Value": "development"
}
]
}
'''))
merge_conn(
Connection(
conn_id='databricks_default', conn_type='databricks',
host='localhost'))
merge_conn(
Connection(
conn_id='qubole_default', conn_type='qubole',
host='localhost'))
merge_conn(
Connection(
conn_id='segment_default', conn_type='segment',
extra='{"write_key": "my-segment-write-key"}')),
merge_conn(
Connection(
conn_id='azure_data_lake_default', conn_type='azure_data_lake',
extra='{"tenant": "<TENANT>", "account_name": "<ACCOUNTNAME>" }'))
merge_conn(
Connection(
conn_id='azure_cosmos_default', conn_type='azure_cosmos',
extra='{"database_name": "<DATABASE_NAME>", "collection_name": "<COLLECTION_NAME>" }'))
merge_conn(
Connection(
conn_id='azure_container_instances_default', conn_type='azure_container_instances',
extra='{"tenantId": "<TENANT>", "subscriptionId": "<SUBSCRIPTION ID>" }'))
merge_conn(
Connection(
conn_id='cassandra_default', conn_type='cassandra',
host='cassandra', port=9042))
dagbag = models.DagBag()
# Save individual DAGs in the ORM
for dag in dagbag.dags.values():
dag.sync_to_db()
# Deactivate the unknown ones
models.DAG.deactivate_unknown_dags(dagbag.dags.keys())
from flask_appbuilder.models.sqla import Base
Base.metadata.create_all(settings.engine)
def upgradedb():
# alembic adds significant import time, so we import it lazily
from alembic import command
from alembic.config import Config
log.info("Creating tables")
current_dir = os.path.dirname(os.path.abspath(__file__))
package_dir = os.path.normpath(os.path.join(current_dir, '..'))
directory = os.path.join(package_dir, 'migrations')
config = Config(os.path.join(package_dir, 'alembic.ini'))
config.set_main_option('script_location', directory.replace('%', '%%'))
config.set_main_option('sqlalchemy.url', settings.SQL_ALCHEMY_CONN.replace('%', '%%'))
command.upgrade(config, 'heads')
def resetdb():
"""
Clear out the database
"""
from airflow import models
# alembic adds significant import time, so we import it lazily
from alembic.migration import MigrationContext
log.info("Dropping tables that exist")
models.base.Base.metadata.drop_all(settings.engine)
mc = MigrationContext.configure(settings.engine)
if mc._version.exists(settings.engine):
mc._version.drop(settings.engine)
from flask_appbuilder.models.sqla import Base
Base.metadata.drop_all(settings.engine)
initdb()
|
|
"""Test event registration and listening."""
from sqlalchemy.testing import eq_, assert_raises, assert_raises_message, \
is_, is_not_
from sqlalchemy import event, exc
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.util import gc_collect
from sqlalchemy.testing.mock import Mock, call
from sqlalchemy import testing
class EventsTest(fixtures.TestBase):
"""Test class- and instance-level event registration."""
def setUp(self):
class TargetEvents(event.Events):
def event_one(self, x, y):
pass
def event_two(self, x):
pass
def event_three(self, x):
pass
class Target(object):
dispatch = event.dispatcher(TargetEvents)
self.Target = Target
def tearDown(self):
event.base._remove_dispatcher(self.Target.__dict__['dispatch'].events)
def test_register_class(self):
def listen(x, y):
pass
event.listen(self.Target, "event_one", listen)
eq_(len(self.Target().dispatch.event_one), 1)
eq_(len(self.Target().dispatch.event_two), 0)
def test_register_instance(self):
def listen(x, y):
pass
t1 = self.Target()
event.listen(t1, "event_one", listen)
eq_(len(self.Target().dispatch.event_one), 0)
eq_(len(t1.dispatch.event_one), 1)
eq_(len(self.Target().dispatch.event_two), 0)
eq_(len(t1.dispatch.event_two), 0)
def test_bool_clslevel(self):
def listen_one(x, y):
pass
event.listen(self.Target, "event_one", listen_one)
t = self.Target()
assert t.dispatch.event_one
def test_register_class_instance(self):
def listen_one(x, y):
pass
def listen_two(x, y):
pass
event.listen(self.Target, "event_one", listen_one)
t1 = self.Target()
event.listen(t1, "event_one", listen_two)
eq_(len(self.Target().dispatch.event_one), 1)
eq_(len(t1.dispatch.event_one), 2)
eq_(len(self.Target().dispatch.event_two), 0)
eq_(len(t1.dispatch.event_two), 0)
def listen_three(x, y):
pass
event.listen(self.Target, "event_one", listen_three)
eq_(len(self.Target().dispatch.event_one), 2)
eq_(len(t1.dispatch.event_one), 3)
def test_append_vs_insert_cls(self):
def listen_one(x, y):
pass
def listen_two(x, y):
pass
def listen_three(x, y):
pass
event.listen(self.Target, "event_one", listen_one)
event.listen(self.Target, "event_one", listen_two)
event.listen(self.Target, "event_one", listen_three, insert=True)
eq_(
list(self.Target().dispatch.event_one),
[listen_three, listen_one, listen_two]
)
def test_append_vs_insert_instance(self):
def listen_one(x, y):
pass
def listen_two(x, y):
pass
def listen_three(x, y):
pass
target = self.Target()
event.listen(target, "event_one", listen_one)
event.listen(target, "event_one", listen_two)
event.listen(target, "event_one", listen_three, insert=True)
eq_(
list(target.dispatch.event_one),
[listen_three, listen_one, listen_two]
)
def test_decorator(self):
@event.listens_for(self.Target, "event_one")
def listen_one(x, y):
pass
@event.listens_for(self.Target, "event_two")
@event.listens_for(self.Target, "event_three")
def listen_two(x, y):
pass
eq_(
list(self.Target().dispatch.event_one),
[listen_one]
)
eq_(
list(self.Target().dispatch.event_two),
[listen_two]
)
eq_(
list(self.Target().dispatch.event_three),
[listen_two]
)
def test_no_instance_level_collections(self):
@event.listens_for(self.Target, "event_one")
def listen_one(x, y):
pass
t1 = self.Target()
t2 = self.Target()
t1.dispatch.event_one(5, 6)
t2.dispatch.event_one(5, 6)
is_(
t1.dispatch.__dict__['event_one'],
self.Target.dispatch.event_one.\
_empty_listeners[self.Target]
)
@event.listens_for(t1, "event_one")
def listen_two(x, y):
pass
is_not_(
t1.dispatch.__dict__['event_one'],
self.Target.dispatch.event_one.\
_empty_listeners[self.Target]
)
is_(
t2.dispatch.__dict__['event_one'],
self.Target.dispatch.event_one.\
_empty_listeners[self.Target]
)
def test_immutable_methods(self):
t1 = self.Target()
for meth in [
t1.dispatch.event_one.exec_once,
t1.dispatch.event_one.insert,
t1.dispatch.event_one.append,
t1.dispatch.event_one.remove,
t1.dispatch.event_one.clear,
]:
assert_raises_message(
NotImplementedError,
r"need to call for_modify\(\)",
meth
)
class NamedCallTest(fixtures.TestBase):
def _fixture(self):
class TargetEventsOne(event.Events):
def event_one(self, x, y):
pass
def event_two(self, x, y, **kw):
pass
def event_five(self, x, y, z, q):
pass
class TargetOne(object):
dispatch = event.dispatcher(TargetEventsOne)
return TargetOne
def _wrapped_fixture(self):
class TargetEvents(event.Events):
@classmethod
def _listen(cls, event_key):
fn = event_key._listen_fn
def adapt(*args):
fn(*["adapted %s" % arg for arg in args])
event_key = event_key.with_wrapper(adapt)
event_key.base_listen()
def event_one(self, x, y):
pass
def event_five(self, x, y, z, q):
pass
class Target(object):
dispatch = event.dispatcher(TargetEvents)
return Target
def test_kw_accept(self):
TargetOne = self._fixture()
canary = Mock()
@event.listens_for(TargetOne, "event_one", named=True)
def handler1(**kw):
canary(kw)
TargetOne().dispatch.event_one(4, 5)
eq_(
canary.mock_calls,
[call({"x": 4, "y": 5})]
)
def test_kw_accept_wrapped(self):
TargetOne = self._wrapped_fixture()
canary = Mock()
@event.listens_for(TargetOne, "event_one", named=True)
def handler1(**kw):
canary(kw)
TargetOne().dispatch.event_one(4, 5)
eq_(
canary.mock_calls,
[call({'y': 'adapted 5', 'x': 'adapted 4'})]
)
def test_partial_kw_accept(self):
TargetOne = self._fixture()
canary = Mock()
@event.listens_for(TargetOne, "event_five", named=True)
def handler1(z, y, **kw):
canary(z, y, kw)
TargetOne().dispatch.event_five(4, 5, 6, 7)
eq_(
canary.mock_calls,
[call(6, 5, {"x": 4, "q": 7})]
)
def test_partial_kw_accept_wrapped(self):
TargetOne = self._wrapped_fixture()
canary = Mock()
@event.listens_for(TargetOne, "event_five", named=True)
def handler1(z, y, **kw):
canary(z, y, kw)
TargetOne().dispatch.event_five(4, 5, 6, 7)
eq_(
canary.mock_calls,
[call('adapted 6', 'adapted 5',
{'q': 'adapted 7', 'x': 'adapted 4'})]
)
def test_kw_accept_plus_kw(self):
TargetOne = self._fixture()
canary = Mock()
@event.listens_for(TargetOne, "event_two", named=True)
def handler1(**kw):
canary(kw)
TargetOne().dispatch.event_two(4, 5, z=8, q=5)
eq_(
canary.mock_calls,
[call({"x": 4, "y": 5, "z": 8, "q": 5})]
)
class LegacySignatureTest(fixtures.TestBase):
"""test adaption of legacy args"""
def setUp(self):
class TargetEventsOne(event.Events):
@event._legacy_signature("0.9", ["x", "y"])
def event_three(self, x, y, z, q):
pass
@event._legacy_signature("0.9", ["x", "y", "**kw"])
def event_four(self, x, y, z, q, **kw):
pass
@event._legacy_signature("0.9", ["x", "y", "z", "q"],
lambda x, y: (x, y, x + y, x * y))
def event_six(self, x, y):
pass
class TargetOne(object):
dispatch = event.dispatcher(TargetEventsOne)
self.TargetOne = TargetOne
def tearDown(self):
event.base._remove_dispatcher(self.TargetOne.__dict__['dispatch'].events)
def test_legacy_accept(self):
canary = Mock()
@event.listens_for(self.TargetOne, "event_three")
def handler1(x, y):
canary(x, y)
self.TargetOne().dispatch.event_three(4, 5, 6, 7)
eq_(
canary.mock_calls,
[call(4, 5)]
)
def test_legacy_accept_kw_cls(self):
canary = Mock()
@event.listens_for(self.TargetOne, "event_four")
def handler1(x, y, **kw):
canary(x, y, kw)
self._test_legacy_accept_kw(self.TargetOne(), canary)
def test_legacy_accept_kw_instance(self):
canary = Mock()
inst = self.TargetOne()
@event.listens_for(inst, "event_four")
def handler1(x, y, **kw):
canary(x, y, kw)
self._test_legacy_accept_kw(inst, canary)
def test_legacy_accept_partial(self):
canary = Mock()
def evt(a, x, y, **kw):
canary(a, x, y, **kw)
from functools import partial
evt_partial = partial(evt, 5)
target = self.TargetOne()
event.listen(target, "event_four", evt_partial)
# can't do legacy accept on a partial; we can't inspect it
assert_raises(
TypeError,
target.dispatch.event_four, 4, 5, 6, 7, foo="bar"
)
target.dispatch.event_four(4, 5, foo="bar")
eq_(
canary.mock_calls,
[call(5, 4, 5, foo="bar")]
)
def _test_legacy_accept_kw(self, target, canary):
target.dispatch.event_four(4, 5, 6, 7, foo="bar")
eq_(
canary.mock_calls,
[call(4, 5, {"foo": "bar"})]
)
def test_complex_legacy_accept(self):
canary = Mock()
@event.listens_for(self.TargetOne, "event_six")
def handler1(x, y, z, q):
canary(x, y, z, q)
self.TargetOne().dispatch.event_six(4, 5)
eq_(
canary.mock_calls,
[call(4, 5, 9, 20)]
)
def test_legacy_accept_from_method(self):
canary = Mock()
class MyClass(object):
def handler1(self, x, y):
canary(x, y)
event.listen(self.TargetOne, "event_three", MyClass().handler1)
self.TargetOne().dispatch.event_three(4, 5, 6, 7)
eq_(
canary.mock_calls,
[call(4, 5)]
)
def test_standard_accept_has_legacies(self):
canary = Mock()
event.listen(self.TargetOne, "event_three", canary)
self.TargetOne().dispatch.event_three(4, 5)
eq_(
canary.mock_calls,
[call(4, 5)]
)
def test_kw_accept_has_legacies(self):
canary = Mock()
@event.listens_for(self.TargetOne, "event_three", named=True)
def handler1(**kw):
canary(kw)
self.TargetOne().dispatch.event_three(4, 5, 6, 7)
eq_(
canary.mock_calls,
[call({"x": 4, "y": 5, "z": 6, "q": 7})]
)
def test_kw_accept_plus_kw_has_legacies(self):
canary = Mock()
@event.listens_for(self.TargetOne, "event_four", named=True)
def handler1(**kw):
canary(kw)
self.TargetOne().dispatch.event_four(4, 5, 6, 7, foo="bar")
eq_(
canary.mock_calls,
[call({"x": 4, "y": 5, "z": 6, "q": 7, "foo": "bar"})]
)
class ClsLevelListenTest(fixtures.TestBase):
def tearDown(self):
event.base._remove_dispatcher(self.TargetOne.__dict__['dispatch'].events)
def setUp(self):
class TargetEventsOne(event.Events):
def event_one(self, x, y):
pass
class TargetOne(object):
dispatch = event.dispatcher(TargetEventsOne)
self.TargetOne = TargetOne
def tearDown(self):
event.base._remove_dispatcher(
self.TargetOne.__dict__['dispatch'].events)
def test_lis_subcalss_lis(self):
@event.listens_for(self.TargetOne, "event_one")
def handler1(x, y):
pass
class SubTarget(self.TargetOne):
pass
@event.listens_for(self.TargetOne, "event_one")
def handler2(x, y):
pass
eq_(
len(SubTarget().dispatch.event_one),
2
)
def test_lis_multisub_lis(self):
@event.listens_for(self.TargetOne, "event_one")
def handler1(x, y):
pass
class SubTarget(self.TargetOne):
pass
class SubSubTarget(SubTarget):
pass
@event.listens_for(self.TargetOne, "event_one")
def handler2(x, y):
pass
eq_(
len(SubTarget().dispatch.event_one),
2
)
eq_(
len(SubSubTarget().dispatch.event_one),
2
)
def test_two_sub_lis(self):
class SubTarget1(self.TargetOne):
pass
class SubTarget2(self.TargetOne):
pass
@event.listens_for(self.TargetOne, "event_one")
def handler1(x, y):
pass
@event.listens_for(SubTarget1, "event_one")
def handler2(x, y):
pass
s1 = SubTarget1()
assert handler1 in s1.dispatch.event_one
assert handler2 in s1.dispatch.event_one
s2 = SubTarget2()
assert handler1 in s2.dispatch.event_one
assert handler2 not in s2.dispatch.event_one
class AcceptTargetsTest(fixtures.TestBase):
"""Test default target acceptance."""
def setUp(self):
class TargetEventsOne(event.Events):
def event_one(self, x, y):
pass
class TargetEventsTwo(event.Events):
def event_one(self, x, y):
pass
class TargetOne(object):
dispatch = event.dispatcher(TargetEventsOne)
class TargetTwo(object):
dispatch = event.dispatcher(TargetEventsTwo)
self.TargetOne = TargetOne
self.TargetTwo = TargetTwo
def tearDown(self):
event.base._remove_dispatcher(self.TargetOne.__dict__['dispatch'].events)
event.base._remove_dispatcher(self.TargetTwo.__dict__['dispatch'].events)
def test_target_accept(self):
"""Test that events of the same name are routed to the correct
collection based on the type of target given.
"""
def listen_one(x, y):
pass
def listen_two(x, y):
pass
def listen_three(x, y):
pass
def listen_four(x, y):
pass
event.listen(self.TargetOne, "event_one", listen_one)
event.listen(self.TargetTwo, "event_one", listen_two)
eq_(
list(self.TargetOne().dispatch.event_one),
[listen_one]
)
eq_(
list(self.TargetTwo().dispatch.event_one),
[listen_two]
)
t1 = self.TargetOne()
t2 = self.TargetTwo()
event.listen(t1, "event_one", listen_three)
event.listen(t2, "event_one", listen_four)
eq_(
list(t1.dispatch.event_one),
[listen_one, listen_three]
)
eq_(
list(t2.dispatch.event_one),
[listen_two, listen_four]
)
class CustomTargetsTest(fixtures.TestBase):
"""Test custom target acceptance."""
def setUp(self):
class TargetEvents(event.Events):
@classmethod
def _accept_with(cls, target):
if target == 'one':
return Target
else:
return None
def event_one(self, x, y):
pass
class Target(object):
dispatch = event.dispatcher(TargetEvents)
self.Target = Target
def tearDown(self):
event.base._remove_dispatcher(self.Target.__dict__['dispatch'].events)
def test_indirect(self):
def listen(x, y):
pass
event.listen("one", "event_one", listen)
eq_(
list(self.Target().dispatch.event_one),
[listen]
)
assert_raises(
exc.InvalidRequestError,
event.listen,
listen, "event_one", self.Target
)
class SubclassGrowthTest(fixtures.TestBase):
"""test that ad-hoc subclasses are garbage collected."""
def setUp(self):
class TargetEvents(event.Events):
def some_event(self, x, y):
pass
class Target(object):
dispatch = event.dispatcher(TargetEvents)
self.Target = Target
def test_subclass(self):
class SubTarget(self.Target):
pass
st = SubTarget()
st.dispatch.some_event(1, 2)
del st
del SubTarget
gc_collect()
eq_(self.Target.__subclasses__(), [])
class ListenOverrideTest(fixtures.TestBase):
"""Test custom listen functions which change the listener function signature."""
def setUp(self):
class TargetEvents(event.Events):
@classmethod
def _listen(cls, event_key, add=False):
fn = event_key.fn
if add:
def adapt(x, y):
fn(x + y)
event_key = event_key.with_wrapper(adapt)
event_key.base_listen()
def event_one(self, x, y):
pass
class Target(object):
dispatch = event.dispatcher(TargetEvents)
self.Target = Target
def tearDown(self):
event.base._remove_dispatcher(self.Target.__dict__['dispatch'].events)
def test_listen_override(self):
listen_one = Mock()
listen_two = Mock()
event.listen(self.Target, "event_one", listen_one, add=True)
event.listen(self.Target, "event_one", listen_two)
t1 = self.Target()
t1.dispatch.event_one(5, 7)
t1.dispatch.event_one(10, 5)
eq_(
listen_one.mock_calls,
[call(12), call(15)]
)
eq_(
listen_two.mock_calls,
[call(5, 7), call(10, 5)]
)
def test_remove_clslevel(self):
listen_one = Mock()
event.listen(self.Target, "event_one", listen_one, add=True)
t1 = self.Target()
t1.dispatch.event_one(5, 7)
eq_(
listen_one.mock_calls,
[call(12)]
)
event.remove(self.Target, "event_one", listen_one)
t1.dispatch.event_one(10, 5)
eq_(
listen_one.mock_calls,
[call(12)]
)
def test_remove_instancelevel(self):
listen_one = Mock()
t1 = self.Target()
event.listen(t1, "event_one", listen_one, add=True)
t1.dispatch.event_one(5, 7)
eq_(
listen_one.mock_calls,
[call(12)]
)
event.remove(t1, "event_one", listen_one)
t1.dispatch.event_one(10, 5)
eq_(
listen_one.mock_calls,
[call(12)]
)
class PropagateTest(fixtures.TestBase):
def setUp(self):
class TargetEvents(event.Events):
def event_one(self, arg):
pass
def event_two(self, arg):
pass
class Target(object):
dispatch = event.dispatcher(TargetEvents)
self.Target = Target
def test_propagate(self):
listen_one = Mock()
listen_two = Mock()
t1 = self.Target()
event.listen(t1, "event_one", listen_one, propagate=True)
event.listen(t1, "event_two", listen_two)
t2 = self.Target()
t2.dispatch._update(t1.dispatch)
t2.dispatch.event_one(t2, 1)
t2.dispatch.event_two(t2, 2)
eq_(
listen_one.mock_calls,
[call(t2, 1)]
)
eq_(
listen_two.mock_calls,
[]
)
class JoinTest(fixtures.TestBase):
def setUp(self):
class TargetEvents(event.Events):
def event_one(self, target, arg):
pass
class BaseTarget(object):
dispatch = event.dispatcher(TargetEvents)
class TargetFactory(BaseTarget):
def create(self):
return TargetElement(self)
class TargetElement(BaseTarget):
def __init__(self, parent):
self.dispatch = self.dispatch._join(parent.dispatch)
def run_event(self, arg):
list(self.dispatch.event_one)
self.dispatch.event_one(self, arg)
self.BaseTarget = BaseTarget
self.TargetFactory = TargetFactory
self.TargetElement = TargetElement
def tearDown(self):
for cls in (self.TargetElement,
self.TargetFactory, self.BaseTarget):
if 'dispatch' in cls.__dict__:
event.base._remove_dispatcher(cls.__dict__['dispatch'].events)
def test_neither(self):
element = self.TargetFactory().create()
element.run_event(1)
element.run_event(2)
element.run_event(3)
def test_kw_ok(self):
l1 = Mock()
def listen(**kw):
l1(kw)
event.listen(self.TargetFactory, "event_one", listen, named=True)
element = self.TargetFactory().create()
element.run_event(1)
element.run_event(2)
eq_(
l1.mock_calls,
[call({"target": element, "arg": 1}),
call({"target": element, "arg": 2}),]
)
def test_parent_class_only(self):
l1 = Mock()
event.listen(self.TargetFactory, "event_one", l1)
element = self.TargetFactory().create()
element.run_event(1)
element.run_event(2)
element.run_event(3)
eq_(
l1.mock_calls,
[call(element, 1), call(element, 2), call(element, 3)]
)
def test_parent_class_child_class(self):
l1 = Mock()
l2 = Mock()
event.listen(self.TargetFactory, "event_one", l1)
event.listen(self.TargetElement, "event_one", l2)
element = self.TargetFactory().create()
element.run_event(1)
element.run_event(2)
element.run_event(3)
eq_(
l1.mock_calls,
[call(element, 1), call(element, 2), call(element, 3)]
)
eq_(
l2.mock_calls,
[call(element, 1), call(element, 2), call(element, 3)]
)
def test_parent_class_child_instance_apply_after(self):
l1 = Mock()
l2 = Mock()
event.listen(self.TargetFactory, "event_one", l1)
element = self.TargetFactory().create()
element.run_event(1)
event.listen(element, "event_one", l2)
element.run_event(2)
element.run_event(3)
eq_(
l1.mock_calls,
[call(element, 1), call(element, 2), call(element, 3)]
)
eq_(
l2.mock_calls,
[call(element, 2), call(element, 3)]
)
def test_parent_class_child_instance_apply_before(self):
l1 = Mock()
l2 = Mock()
event.listen(self.TargetFactory, "event_one", l1)
element = self.TargetFactory().create()
event.listen(element, "event_one", l2)
element.run_event(1)
element.run_event(2)
element.run_event(3)
eq_(
l1.mock_calls,
[call(element, 1), call(element, 2), call(element, 3)]
)
eq_(
l2.mock_calls,
[call(element, 1), call(element, 2), call(element, 3)]
)
def test_parent_instance_child_class_apply_before(self):
l1 = Mock()
l2 = Mock()
event.listen(self.TargetElement, "event_one", l2)
factory = self.TargetFactory()
event.listen(factory, "event_one", l1)
element = factory.create()
element.run_event(1)
element.run_event(2)
element.run_event(3)
eq_(
l1.mock_calls,
[call(element, 1), call(element, 2), call(element, 3)]
)
eq_(
l2.mock_calls,
[call(element, 1), call(element, 2), call(element, 3)]
)
def test_parent_instance_child_class_apply_after(self):
l1 = Mock()
l2 = Mock()
event.listen(self.TargetElement, "event_one", l2)
factory = self.TargetFactory()
element = factory.create()
element.run_event(1)
event.listen(factory, "event_one", l1)
element.run_event(2)
element.run_event(3)
# if _JoinedListener fixes .listeners
# at construction time, then we don't get
# the new listeners.
#eq_(l1.mock_calls, [])
# alternatively, if _JoinedListener shares the list
# using a @property, then we get them, at the arguable
# expense of the extra method call to access the .listeners
# collection
eq_(
l1.mock_calls, [call(element, 2), call(element, 3)]
)
eq_(
l2.mock_calls,
[call(element, 1), call(element, 2), call(element, 3)]
)
def test_parent_instance_child_instance_apply_before(self):
l1 = Mock()
l2 = Mock()
factory = self.TargetFactory()
event.listen(factory, "event_one", l1)
element = factory.create()
event.listen(element, "event_one", l2)
element.run_event(1)
element.run_event(2)
element.run_event(3)
eq_(
l1.mock_calls,
[call(element, 1), call(element, 2), call(element, 3)]
)
eq_(
l2.mock_calls,
[call(element, 1), call(element, 2), call(element, 3)]
)
def test_parent_events_child_no_events(self):
l1 = Mock()
factory = self.TargetFactory()
event.listen(self.TargetElement, "event_one", l1)
element = factory.create()
element.run_event(1)
element.run_event(2)
element.run_event(3)
eq_(
l1.mock_calls,
[call(element, 1), call(element, 2), call(element, 3)]
)
class RemovalTest(fixtures.TestBase):
def _fixture(self):
class TargetEvents(event.Events):
def event_one(self, x, y):
pass
def event_two(self, x):
pass
def event_three(self, x):
pass
class Target(object):
dispatch = event.dispatcher(TargetEvents)
return Target
def _wrapped_fixture(self):
class TargetEvents(event.Events):
@classmethod
def _listen(cls, event_key):
fn = event_key._listen_fn
def adapt(value):
fn("adapted " + value)
event_key = event_key.with_wrapper(adapt)
event_key.base_listen()
def event_one(self, x):
pass
class Target(object):
dispatch = event.dispatcher(TargetEvents)
return Target
def test_clslevel(self):
Target = self._fixture()
m1 = Mock()
event.listen(Target, "event_two", m1)
t1 = Target()
t1.dispatch.event_two("x")
event.remove(Target, "event_two", m1)
t1.dispatch.event_two("y")
eq_(m1.mock_calls, [call("x")])
def test_clslevel_subclass(self):
Target = self._fixture()
class SubTarget(Target):
pass
m1 = Mock()
event.listen(Target, "event_two", m1)
t1 = SubTarget()
t1.dispatch.event_two("x")
event.remove(Target, "event_two", m1)
t1.dispatch.event_two("y")
eq_(m1.mock_calls, [call("x")])
def test_instance(self):
Target = self._fixture()
class Foo(object):
def __init__(self):
self.mock = Mock()
def evt(self, arg):
self.mock(arg)
f1 = Foo()
f2 = Foo()
event.listen(Target, "event_one", f1.evt)
event.listen(Target, "event_one", f2.evt)
t1 = Target()
t1.dispatch.event_one("x")
event.remove(Target, "event_one", f1.evt)
t1.dispatch.event_one("y")
eq_(f1.mock.mock_calls, [call("x")])
eq_(f2.mock.mock_calls, [call("x"), call("y")])
def test_once(self):
Target = self._fixture()
m1 = Mock()
m2 = Mock()
m3 = Mock()
m4 = Mock()
event.listen(Target, "event_one", m1)
event.listen(Target, "event_one", m2, once=True)
event.listen(Target, "event_one", m3, once=True)
t1 = Target()
t1.dispatch.event_one("x")
t1.dispatch.event_one("y")
event.listen(Target, "event_one", m4, once=True)
t1.dispatch.event_one("z")
t1.dispatch.event_one("q")
eq_(m1.mock_calls, [call("x"), call("y"), call("z"), call("q")])
eq_(m2.mock_calls, [call("x")])
eq_(m3.mock_calls, [call("x")])
eq_(m4.mock_calls, [call("z")])
def test_propagate(self):
Target = self._fixture()
m1 = Mock()
t1 = Target()
t2 = Target()
event.listen(t1, "event_one", m1, propagate=True)
event.listen(t1, "event_two", m1, propagate=False)
t2.dispatch._update(t1.dispatch)
t1.dispatch.event_one("t1e1x")
t1.dispatch.event_two("t1e2x")
t2.dispatch.event_one("t2e1x")
t2.dispatch.event_two("t2e2x")
event.remove(t1, "event_one", m1)
event.remove(t1, "event_two", m1)
t1.dispatch.event_one("t1e1y")
t1.dispatch.event_two("t1e2y")
t2.dispatch.event_one("t2e1y")
t2.dispatch.event_two("t2e2y")
eq_(m1.mock_calls,
[call('t1e1x'), call('t1e2x'),
call('t2e1x')])
@testing.requires.predictable_gc
def test_listener_collection_removed_cleanup(self):
from sqlalchemy.event import registry
Target = self._fixture()
m1 = Mock()
t1 = Target()
event.listen(t1, "event_one", m1)
key = (id(t1), "event_one", id(m1))
assert key in registry._key_to_collection
collection_ref = list(registry._key_to_collection[key])[0]
assert collection_ref in registry._collection_to_key
t1.dispatch.event_one("t1")
del t1
gc_collect()
assert key not in registry._key_to_collection
assert collection_ref not in registry._collection_to_key
def test_remove_not_listened(self):
Target = self._fixture()
m1 = Mock()
t1 = Target()
event.listen(t1, "event_one", m1, propagate=True)
event.listen(t1, "event_three", m1)
event.remove(t1, "event_one", m1)
assert_raises_message(
exc.InvalidRequestError,
r"No listeners found for event <.*Target.*> / 'event_two' / <Mock.*> ",
event.remove, t1, "event_two", m1
)
event.remove(t1, "event_three", m1)
def test_remove_plain_named(self):
Target = self._fixture()
listen_one = Mock()
t1 = Target()
event.listen(t1, "event_one", listen_one, named=True)
t1.dispatch.event_one("t1")
eq_(listen_one.mock_calls, [call(x="t1")])
event.remove(t1, "event_one", listen_one)
t1.dispatch.event_one("t2")
eq_(listen_one.mock_calls, [call(x="t1")])
def test_remove_wrapped_named(self):
Target = self._wrapped_fixture()
listen_one = Mock()
t1 = Target()
event.listen(t1, "event_one", listen_one, named=True)
t1.dispatch.event_one("t1")
eq_(listen_one.mock_calls, [call(x="adapted t1")])
event.remove(t1, "event_one", listen_one)
t1.dispatch.event_one("t2")
eq_(listen_one.mock_calls, [call(x="adapted t1")])
def test_double_event_nonwrapped(self):
Target = self._fixture()
listen_one = Mock()
t1 = Target()
event.listen(t1, "event_one", listen_one)
event.listen(t1, "event_one", listen_one)
t1.dispatch.event_one("t1")
# doubles are eliminated
eq_(listen_one.mock_calls, [call("t1")])
# only one remove needed
event.remove(t1, "event_one", listen_one)
t1.dispatch.event_one("t2")
eq_(listen_one.mock_calls, [call("t1")])
def test_double_event_wrapped(self):
# this is issue #3199
Target = self._wrapped_fixture()
listen_one = Mock()
t1 = Target()
event.listen(t1, "event_one", listen_one)
event.listen(t1, "event_one", listen_one)
t1.dispatch.event_one("t1")
# doubles are eliminated
eq_(listen_one.mock_calls, [call("adapted t1")])
# only one remove needed
event.remove(t1, "event_one", listen_one)
t1.dispatch.event_one("t2")
eq_(listen_one.mock_calls, [call("adapted t1")])
|
|
from __future__ import absolute_import
import functools
import six
from collections import defaultdict, Iterable, OrderedDict
from dateutil.parser import parse as parse_datetime
from pytz import UTC
from django.core.cache import cache
from sentry import options
from sentry.api.event_search import FIELD_ALIASES, PROJECT_ALIAS, USER_DISPLAY_ALIAS
from sentry.models import Project
from sentry.api.utils import default_start_end_dates
from sentry.snuba.dataset import Dataset
from sentry.tagstore import TagKeyStatus
from sentry.tagstore.base import TagStorage, TOP_VALUES_DEFAULT_LIMIT
from sentry.tagstore.exceptions import (
GroupTagKeyNotFound,
GroupTagValueNotFound,
TagKeyNotFound,
TagValueNotFound,
)
from sentry.tagstore.types import TagKey, TagValue, GroupTagKey, GroupTagValue
from sentry.utils import snuba, metrics
from sentry.utils.hashlib import md5_text
from sentry.utils.dates import to_timestamp
from sentry_relay.consts import SPAN_STATUS_CODE_TO_NAME
SEEN_COLUMN = "timestamp"
# columns we want to exclude from methods that return
# all values for a given tag/column
BLACKLISTED_COLUMNS = frozenset(["project_id"])
FUZZY_NUMERIC_KEYS = frozenset(
["stack.colno", "stack.in_app", "stack.lineno", "stack.stack_level", "transaction.duration"]
)
FUZZY_NUMERIC_DISTANCE = 50
tag_value_data_transformers = {"first_seen": parse_datetime, "last_seen": parse_datetime}
def fix_tag_value_data(data):
for key, transformer in tag_value_data_transformers.items():
if key in data:
data[key] = transformer(data[key]).replace(tzinfo=UTC)
return data
def get_project_list(project_id):
return project_id if isinstance(project_id, Iterable) else [project_id]
class SnubaTagStorage(TagStorage):
def __get_tag_key(self, project_id, group_id, environment_id, key):
tag = u"tags[{}]".format(key)
filters = {"project_id": get_project_list(project_id)}
if environment_id:
filters["environment"] = [environment_id]
if group_id is not None:
filters["group_id"] = [group_id]
conditions = [[tag, "!=", ""]]
aggregations = [["uniq", tag, "values_seen"], ["count()", "", "count"]]
result = snuba.query(
conditions=conditions,
filter_keys=filters,
aggregations=aggregations,
referrer="tagstore.__get_tag_key",
)
if result is None or result["count"] == 0:
raise TagKeyNotFound if group_id is None else GroupTagKeyNotFound
else:
data = {"key": key, "values_seen": result["values_seen"], "count": result["count"]}
if group_id is None:
return TagKey(**data)
else:
return GroupTagKey(group_id=group_id, **data)
def __get_tag_key_and_top_values(
self, project_id, group_id, environment_id, key, limit=3, raise_on_empty=True, **kwargs
):
tag = u"tags[{}]".format(key)
filters = {"project_id": get_project_list(project_id)}
if environment_id:
filters["environment"] = [environment_id]
if group_id is not None:
filters["group_id"] = [group_id]
conditions = kwargs.get("conditions", [])
aggregations = kwargs.get("aggregations", [])
conditions.append([tag, "!=", ""])
aggregations += [
["uniq", tag, "values_seen"],
["count()", "", "count"],
["min", SEEN_COLUMN, "first_seen"],
["max", SEEN_COLUMN, "last_seen"],
]
result, totals = snuba.query(
start=kwargs.get("start"),
end=kwargs.get("end"),
groupby=[tag],
conditions=conditions,
filter_keys=filters,
aggregations=aggregations,
orderby="-count",
limit=limit,
totals=True,
referrer="tagstore.__get_tag_key_and_top_values",
)
if raise_on_empty and (not result or totals.get("count", 0) == 0):
raise TagKeyNotFound if group_id is None else GroupTagKeyNotFound
else:
if group_id is None:
key_ctor = TagKey
value_ctor = TagValue
else:
key_ctor = functools.partial(GroupTagKey, group_id=group_id)
value_ctor = functools.partial(GroupTagValue, group_id=group_id)
top_values = [
value_ctor(
key=key,
value=value,
times_seen=data["count"],
first_seen=parse_datetime(data["first_seen"]),
last_seen=parse_datetime(data["last_seen"]),
)
for value, data in six.iteritems(result)
]
return key_ctor(
key=key,
values_seen=totals.get("values_seen", 0),
count=totals.get("count", 0),
top_values=top_values,
)
def __get_tag_keys(
self,
project_id,
group_id,
environment_ids,
limit=1000,
keys=None,
include_values_seen=True,
**kwargs
):
return self.__get_tag_keys_for_projects(
get_project_list(project_id),
group_id,
environment_ids,
kwargs.get("start"),
kwargs.get("end"),
limit,
keys,
include_values_seen=include_values_seen,
)
def __get_tag_keys_for_projects(
self,
projects,
group_id,
environments,
start,
end,
limit=1000,
keys=None,
include_values_seen=True,
use_cache=False,
**kwargs
):
""" Query snuba for tag keys based on projects
When use_cache is passed, we'll attempt to use the cache. There's an exception if group_id was passed
which refines the query enough caching isn't required.
The cache key is based on the filters being passed so that different queries don't hit the same cache, with
exceptions for start and end dates. Since even a microsecond passing would result in a different caching
key, which means always missing the cache.
Instead, to keep the cache key the same for a short period we append the duration, and the end time rounded
with a certain jitter to the cache key.
This jitter is based on the hash of the key before duration/end time is added for consistency per query.
The jitter's intent is to avoid a dogpile effect of many queries being invalidated at the same time.
This is done by changing the rounding of the end key to a random offset. See snuba.quantize_time for
further explanation of how that is done.
"""
default_start, default_end = default_start_end_dates()
if start is None:
start = default_start
if end is None:
end = default_end
filters = {"project_id": sorted(projects)}
if environments:
filters["environment"] = sorted(environments)
if group_id is not None:
filters["group_id"] = [group_id]
if keys is not None:
filters["tags_key"] = sorted(keys)
aggregations = [["count()", "", "count"]]
if include_values_seen:
aggregations.append(["uniq", "tags_value", "values_seen"])
conditions = []
should_cache = use_cache and group_id is None
result = None
if should_cache:
filtering_strings = [
u"{}={}".format(key, value) for key, value in six.iteritems(filters)
]
cache_key = u"tagstore.__get_tag_keys:{}".format(
md5_text(*filtering_strings).hexdigest()
)
key_hash = hash(cache_key)
should_cache = (key_hash % 1000) / 1000.0 <= options.get(
"snuba.tagstore.cache-tagkeys-rate"
)
# If we want to continue attempting to cache after checking against the cache rate
if should_cache:
# Needs to happen before creating the cache suffix otherwise rounding will cause different durations
duration = (end - start).total_seconds()
# Cause there's rounding to create this cache suffix, we want to update the query end so results match
end = snuba.quantize_time(end, key_hash)
cache_key += u":{}@{}".format(duration, end.isoformat())
result = cache.get(cache_key, None)
if result is not None:
metrics.incr("testing.tagstore.cache_tag_key.hit")
else:
metrics.incr("testing.tagstore.cache_tag_key.miss")
if result is None:
result = snuba.query(
start=start,
end=end,
groupby=["tags_key"],
conditions=conditions,
filter_keys=filters,
aggregations=aggregations,
limit=limit,
orderby="-count",
referrer="tagstore.__get_tag_keys",
**kwargs
)
if should_cache:
cache.set(cache_key, result, 300)
metrics.incr("testing.tagstore.cache_tag_key.len", amount=len(result))
if group_id is None:
ctor = TagKey
else:
ctor = functools.partial(GroupTagKey, group_id=group_id)
results = set()
for key, data in six.iteritems(result):
params = {"key": key}
if include_values_seen:
params["values_seen"] = data["values_seen"]
params["count"] = data["count"]
else:
# If only one aggregate is requested then data is just that raw
# aggregate value, rather than a dictionary of
# key:aggregate_value pairs
params["count"] = data
results.add(ctor(**params))
return results
def __get_tag_value(self, project_id, group_id, environment_id, key, value):
tag = u"tags[{}]".format(key)
filters = {"project_id": get_project_list(project_id)}
if environment_id:
filters["environment"] = [environment_id]
if group_id is not None:
filters["group_id"] = [group_id]
conditions = [[tag, "=", value]]
aggregations = [
["count()", "", "times_seen"],
["min", SEEN_COLUMN, "first_seen"],
["max", SEEN_COLUMN, "last_seen"],
]
data = snuba.query(
conditions=conditions,
filter_keys=filters,
aggregations=aggregations,
referrer="tagstore.__get_tag_value",
)
if not data["times_seen"] > 0:
raise TagValueNotFound if group_id is None else GroupTagValueNotFound
else:
data.update({"key": key, "value": value})
if group_id is None:
return TagValue(**fix_tag_value_data(data))
else:
return GroupTagValue(group_id=group_id, **fix_tag_value_data(data))
def get_tag_key(self, project_id, environment_id, key, status=TagKeyStatus.VISIBLE, **kwargs):
assert status is TagKeyStatus.VISIBLE
return self.__get_tag_key_and_top_values(project_id, None, environment_id, key, **kwargs)
def get_tag_keys(
self, project_id, environment_id, status=TagKeyStatus.VISIBLE, include_values_seen=False
):
assert status is TagKeyStatus.VISIBLE
return self.__get_tag_keys(project_id, None, environment_id and [environment_id])
def get_tag_keys_for_projects(
self, projects, environments, start, end, status=TagKeyStatus.VISIBLE, use_cache=False
):
MAX_UNSAMPLED_PROJECTS = 50
# We want to disable FINAL in the snuba query to reduce load.
optimize_kwargs = {"turbo": True}
# If we are fetching less than MAX_UNSAMPLED_PROJECTS, then disable
# the sampling that turbo enables so that we get more accurate results.
# We only want sampling when we have a large number of projects, so
# that we don't cause performance issues for Snuba.
if len(projects) <= MAX_UNSAMPLED_PROJECTS:
optimize_kwargs["sample"] = 1
return self.__get_tag_keys_for_projects(
projects,
None,
environments,
start,
end,
include_values_seen=False,
use_cache=use_cache,
**optimize_kwargs
)
def get_tag_value(self, project_id, environment_id, key, value):
return self.__get_tag_value(project_id, None, environment_id, key, value)
def get_tag_values(self, project_id, environment_id, key):
key = self.__get_tag_key_and_top_values(
project_id, None, environment_id, key, limit=None, raise_on_empty=False
)
return set(key.top_values)
def get_group_tag_key(self, project_id, group_id, environment_id, key):
return self.__get_tag_key_and_top_values(
project_id, group_id, environment_id, key, limit=TOP_VALUES_DEFAULT_LIMIT
)
def get_group_tag_keys(
self, project_id, group_id, environment_ids, limit=None, keys=None, **kwargs
):
return self.__get_tag_keys(
project_id,
group_id,
environment_ids,
limit=limit,
keys=keys,
include_values_seen=False,
**kwargs
)
def get_group_tag_value(self, project_id, group_id, environment_id, key, value):
return self.__get_tag_value(project_id, group_id, environment_id, key, value)
def get_group_tag_values(self, project_id, group_id, environment_id, key):
# NB this uses a 'top' values function, but the limit is None so it should
# return all values for this key.
key = self.__get_tag_key_and_top_values(
project_id, group_id, environment_id, key, limit=None, raise_on_empty=False
)
return set(key.top_values)
def get_group_list_tag_value(self, project_ids, group_id_list, environment_ids, key, value):
tag = u"tags[{}]".format(key)
filters = {"project_id": project_ids, "group_id": group_id_list}
if environment_ids:
filters["environment"] = environment_ids
conditions = [[tag, "=", value]]
aggregations = [
["count()", "", "times_seen"],
["min", SEEN_COLUMN, "first_seen"],
["max", SEEN_COLUMN, "last_seen"],
]
result = snuba.query(
groupby=["group_id"],
conditions=conditions,
filter_keys=filters,
aggregations=aggregations,
referrer="tagstore.get_group_list_tag_value",
)
return {
issue: GroupTagValue(group_id=issue, key=key, value=value, **fix_tag_value_data(data))
for issue, data in six.iteritems(result)
}
def get_group_seen_values_for_environments(
self, project_ids, group_id_list, environment_ids, start=None, end=None
):
# Get the total times seen, first seen, and last seen across multiple environments
filters = {"project_id": project_ids, "group_id": group_id_list}
if environment_ids:
filters["environment"] = environment_ids
aggregations = [
["count()", "", "times_seen"],
["min", SEEN_COLUMN, "first_seen"],
["max", SEEN_COLUMN, "last_seen"],
]
result = snuba.query(
start=start,
end=end,
groupby=["group_id"],
conditions=None,
filter_keys=filters,
aggregations=aggregations,
referrer="tagstore.get_group_seen_values_for_environments",
)
return {issue: fix_tag_value_data(data) for issue, data in six.iteritems(result)}
def get_group_tag_value_count(self, project_id, group_id, environment_id, key):
tag = u"tags[{}]".format(key)
filters = {"project_id": get_project_list(project_id), "group_id": [group_id]}
if environment_id:
filters["environment"] = [environment_id]
conditions = [[tag, "!=", ""]]
aggregations = [["count()", "", "count"]]
return snuba.query(
conditions=conditions,
filter_keys=filters,
aggregations=aggregations,
referrer="tagstore.get_group_tag_value_count",
)
def get_top_group_tag_values(
self, project_id, group_id, environment_id, key, limit=TOP_VALUES_DEFAULT_LIMIT
):
tag = self.__get_tag_key_and_top_values(project_id, group_id, environment_id, key, limit)
return tag.top_values
def get_group_tag_keys_and_top_values(
self,
project_id,
group_id,
environment_ids,
user=None,
keys=None,
value_limit=TOP_VALUES_DEFAULT_LIMIT,
**kwargs
):
# Similar to __get_tag_key_and_top_values except we get the top values
# for all the keys provided. value_limit in this case means the number
# of top values for each key, so the total rows returned should be
# num_keys * limit.
# First get totals and unique counts by key.
keys_with_counts = self.get_group_tag_keys(project_id, group_id, environment_ids, keys=keys)
# Then get the top values with first_seen/last_seen/count for each
filters = {"project_id": get_project_list(project_id)}
if environment_ids:
filters["environment"] = environment_ids
if keys is not None:
filters["tags_key"] = keys
if group_id is not None:
filters["group_id"] = [group_id]
conditions = kwargs.get("conditions", [])
aggregations = kwargs.get("aggregations", [])
aggregations += [
["count()", "", "count"],
["min", SEEN_COLUMN, "first_seen"],
["max", SEEN_COLUMN, "last_seen"],
]
values_by_key = snuba.query(
start=kwargs.get("start"),
end=kwargs.get("end"),
groupby=["tags_key", "tags_value"],
conditions=conditions,
filter_keys=filters,
aggregations=aggregations,
orderby="-count",
limitby=[value_limit, "tags_key"],
referrer="tagstore.__get_tag_keys_and_top_values",
)
# Then supplement the key objects with the top values for each.
if group_id is None:
value_ctor = TagValue
else:
value_ctor = functools.partial(GroupTagValue, group_id=group_id)
for keyobj in keys_with_counts:
key = keyobj.key
values = values_by_key.get(key, [])
keyobj.top_values = [
value_ctor(
key=keyobj.key,
value=value,
times_seen=data["count"],
first_seen=parse_datetime(data["first_seen"]),
last_seen=parse_datetime(data["last_seen"]),
)
for value, data in six.iteritems(values)
]
return keys_with_counts
def __get_release(self, project_id, group_id, first=True):
filters = {"project_id": get_project_list(project_id)}
conditions = [["tags[sentry:release]", "IS NOT NULL", None]]
if group_id is not None:
filters["group_id"] = [group_id]
aggregations = [["min" if first else "max", SEEN_COLUMN, "seen"]]
orderby = "seen" if first else "-seen"
result = snuba.query(
groupby=["tags[sentry:release]"],
conditions=conditions,
filter_keys=filters,
aggregations=aggregations,
limit=1,
orderby=orderby,
referrer="tagstore.__get_release",
)
if not result:
return None
else:
return list(result.keys())[0]
def get_first_release(self, project_id, group_id):
return self.__get_release(project_id, group_id, True)
def get_last_release(self, project_id, group_id):
return self.__get_release(project_id, group_id, False)
def get_release_tags(self, project_ids, environment_id, versions):
filters = {"project_id": project_ids}
if environment_id:
filters["environment"] = [environment_id]
# NB we add release as a condition rather than a filter because
# this method is already dealing with version strings rather than
# release ids which would need to be translated by the snuba util.
tag = "sentry:release"
col = u"tags[{}]".format(tag)
conditions = [[col, "IN", versions]]
aggregations = [
["count()", "", "times_seen"],
["min", SEEN_COLUMN, "first_seen"],
["max", SEEN_COLUMN, "last_seen"],
]
result = snuba.query(
groupby=["project_id", col],
conditions=conditions,
filter_keys=filters,
aggregations=aggregations,
referrer="tagstore.get_release_tags",
)
values = []
for project_data in six.itervalues(result):
for value, data in six.iteritems(project_data):
values.append(TagValue(key=tag, value=value, **fix_tag_value_data(data)))
return set(values)
def get_group_ids_for_users(self, project_ids, event_users, limit=100):
filters = {"project_id": project_ids}
conditions = [
["tags[sentry:user]", "IN", [_f for _f in [eu.tag_value for eu in event_users] if _f]]
]
aggregations = [["max", SEEN_COLUMN, "last_seen"]]
result = snuba.query(
groupby=["group_id"],
conditions=conditions,
filter_keys=filters,
aggregations=aggregations,
limit=limit,
orderby="-last_seen",
referrer="tagstore.get_group_ids_for_users",
)
return set(result.keys())
def get_group_tag_values_for_users(self, event_users, limit=100):
filters = {"project_id": [eu.project_id for eu in event_users]}
conditions = [
["tags[sentry:user]", "IN", [_f for _f in [eu.tag_value for eu in event_users] if _f]]
]
aggregations = [
["count()", "", "times_seen"],
["min", SEEN_COLUMN, "first_seen"],
["max", SEEN_COLUMN, "last_seen"],
]
result = snuba.query(
groupby=["group_id", "user_id"],
conditions=conditions,
filter_keys=filters,
aggregations=aggregations,
orderby="-last_seen",
limit=limit,
referrer="tagstore.get_group_tag_values_for_users",
)
values = []
for issue, users in six.iteritems(result):
for name, data in six.iteritems(users):
values.append(
GroupTagValue(
group_id=issue, key="sentry:user", value=name, **fix_tag_value_data(data)
)
)
return values
def get_groups_user_counts(self, project_ids, group_ids, environment_ids, start=None, end=None):
filters = {"project_id": project_ids, "group_id": group_ids}
if environment_ids:
filters["environment"] = environment_ids
aggregations = [["uniq", "tags[sentry:user]", "count"]]
result = snuba.query(
start=start,
end=end,
groupby=["group_id"],
conditions=None,
filter_keys=filters,
aggregations=aggregations,
referrer="tagstore.get_groups_user_counts",
)
return defaultdict(int, {k: v for k, v in result.items() if v})
def get_tag_value_paginator(
self,
project_id,
environment_id,
key,
start=None,
end=None,
query=None,
order_by="-last_seen",
):
return self.get_tag_value_paginator_for_projects(
get_project_list(project_id),
[environment_id] if environment_id else None,
key,
start=start,
end=end,
query=query,
order_by=order_by,
)
def get_tag_value_paginator_for_projects(
self,
projects,
environments,
key,
start=None,
end=None,
query=None,
order_by="-last_seen",
include_transactions=False,
):
from sentry.api.paginator import SequencePaginator
if not order_by == "-last_seen":
raise ValueError("Unsupported order_by: %s" % order_by)
dataset = Dataset.Events
snuba_key = snuba.get_snuba_column_name(key)
if include_transactions and snuba_key.startswith("tags["):
snuba_key = snuba.get_snuba_column_name(key, dataset=Dataset.Discover)
if not snuba_key.startswith("tags["):
dataset = Dataset.Discover
# We cannot search the values of these columns like we do other columns because they are
# a different type, and as such, LIKE and != do not work on them. Furthermore, because the
# use case for these values in autosuggestion is minimal, so we choose to disable them here.
#
# event_id: This is a FixedString which disallows us to use LIKE on it when searching,
# but does work with !=. However, for consistency sake we disallow it
# entirely, furthermore, suggesting an event_id is not a very useful feature
# as they are not human readable.
# timestamp: This is a DateTime which disallows us to use both LIKE and != on it when
# searching. Suggesting a timestamp can potentially be useful but as it does
# work at all, we opt to disable it here. A potential solution can be to
# generate a time range to bound where they are searching. e.g. if a user
# enters 2020-07 we can generate the following conditions:
# >= 2020-07-01T00:00:00 AND <= 2020-07-31T23:59:59
# time: This is a column computed from timestamp so it suffers the same issues
if snuba_key in {"event_id", "timestamp", "time"}:
return SequencePaginator([])
# These columns have fixed values and we don't need to emit queries to find out the
# potential options.
if key in {"error.handled", "error.unhandled"}:
return SequencePaginator(
[
(
1,
TagValue(
key=key, value="true", times_seen=None, first_seen=None, last_seen=None
),
),
(
2,
TagValue(
key=key, value="false", times_seen=None, first_seen=None, last_seen=None
),
),
]
)
conditions = []
# transaction status needs a special case so that the user interacts with the names and not codes
transaction_status = snuba_key == "transaction_status"
if include_transactions and transaction_status:
# Here we want to use the status codes during filtering,
# but want to do this with names that include our query
status_codes = [
span_key
for span_key, value in six.iteritems(SPAN_STATUS_CODE_TO_NAME)
if (query and query in value) or (not query)
]
if status_codes:
conditions.append([snuba_key, "IN", status_codes])
else:
return SequencePaginator([])
elif key in FUZZY_NUMERIC_KEYS:
converted_query = int(query) if query is not None and query.isdigit() else None
if converted_query is not None:
conditions.append([snuba_key, ">=", converted_query - FUZZY_NUMERIC_DISTANCE])
conditions.append([snuba_key, "<=", converted_query + FUZZY_NUMERIC_DISTANCE])
elif include_transactions and key == PROJECT_ALIAS:
project_filters = {
"id__in": projects,
}
if query:
project_filters["slug__icontains"] = query
project_queryset = Project.objects.filter(**project_filters).values("id", "slug")
if not project_queryset.exists():
return SequencePaginator([])
project_slugs = {project["id"]: project["slug"] for project in project_queryset}
projects = [project["id"] for project in project_queryset]
snuba_key = "project_id"
dataset = Dataset.Discover
else:
snuba_name = snuba_key
is_user_alias = include_transactions and key == USER_DISPLAY_ALIAS
if is_user_alias:
# user.alias is a pseudo column in discover. It is computed by coalescing
# together multiple user attributes. Here we get the coalese function used,
# and resolve it to the corresponding snuba query
dataset = Dataset.Discover
resolver = snuba.resolve_column(dataset)
snuba_name = FIELD_ALIASES[USER_DISPLAY_ALIAS].get_field()
snuba.resolve_complex_column(snuba_name, resolver)
elif snuba_name in BLACKLISTED_COLUMNS:
snuba_name = "tags[%s]" % (key,)
if query:
conditions.append([snuba_name, "LIKE", u"%{}%".format(query)])
else:
conditions.append([snuba_name, "!=", ""])
filters = {"project_id": projects}
if environments:
filters["environment"] = environments
results = snuba.query(
dataset=dataset,
start=start,
end=end,
groupby=[snuba_key],
filter_keys=filters,
aggregations=[
["count()", "", "times_seen"],
["min", "timestamp", "first_seen"],
["max", "timestamp", "last_seen"],
],
conditions=conditions,
orderby=order_by,
# TODO: This means they can't actually paginate all TagValues.
limit=1000,
arrayjoin=snuba.get_arrayjoin(snuba_key),
referrer="tagstore.get_tag_value_paginator_for_projects",
)
if include_transactions:
# With transaction_status we need to map the ids back to their names
if transaction_status:
results = OrderedDict(
[
(SPAN_STATUS_CODE_TO_NAME[result_key], data)
for result_key, data in six.iteritems(results)
]
)
# With project names we map the ids back to the project slugs
elif key == PROJECT_ALIAS:
results = OrderedDict(
[
(project_slugs[value], data)
for value, data in six.iteritems(results)
if value in project_slugs
]
)
tag_values = [
TagValue(key=key, value=six.text_type(value), **fix_tag_value_data(data))
for value, data in six.iteritems(results)
]
desc = order_by.startswith("-")
score_field = order_by.lstrip("-")
return SequencePaginator(
[(int(to_timestamp(getattr(tv, score_field)) * 1000), tv) for tv in tag_values],
reverse=desc,
)
def get_group_tag_value_iter(
self, project_id, group_id, environment_ids, key, callbacks=(), limit=1000, offset=0
):
filters = {
"project_id": get_project_list(project_id),
"tags_key": [key],
"group_id": [group_id],
}
if environment_ids:
filters["environment"] = environment_ids
results = snuba.query(
groupby=["tags_value"],
filter_keys=filters,
aggregations=[
["count()", "", "times_seen"],
["min", "timestamp", "first_seen"],
["max", "timestamp", "last_seen"],
],
orderby="-first_seen", # Closest thing to pre-existing `-id` order
limit=limit,
referrer="tagstore.get_group_tag_value_iter",
offset=offset,
)
group_tag_values = [
GroupTagValue(group_id=group_id, key=key, value=value, **fix_tag_value_data(data))
for value, data in six.iteritems(results)
]
for cb in callbacks:
cb(group_tag_values)
return group_tag_values
def get_group_tag_value_paginator(
self, project_id, group_id, environment_ids, key, order_by="-id"
):
from sentry.api.paginator import SequencePaginator
if order_by in ("-last_seen", "-first_seen"):
pass
elif order_by == "-id":
# Snuba has no unique id per GroupTagValue so we'll substitute `-first_seen`
order_by = "-first_seen"
else:
raise ValueError("Unsupported order_by: %s" % order_by)
group_tag_values = self.get_group_tag_value_iter(project_id, group_id, environment_ids, key)
desc = order_by.startswith("-")
score_field = order_by.lstrip("-")
return SequencePaginator(
[
(int(to_timestamp(getattr(gtv, score_field)) * 1000), gtv)
for gtv in group_tag_values
],
reverse=desc,
)
def get_group_tag_value_qs(self, project_id, group_id, environment_id, key, value=None):
# This method is not implemented because it is only used by the Django
# search backend.
raise NotImplementedError
def get_group_event_filter(self, project_id, group_id, environment_ids, tags, start, end):
filters = {"project_id": get_project_list(project_id), "group_id": [group_id]}
if environment_ids:
filters["environment"] = environment_ids
conditions = []
for tag_name, tag_val in tags.items():
operator = "IN" if isinstance(tag_val, list) else "="
conditions.append([u"tags[{}]".format(tag_name), operator, tag_val])
result = snuba.raw_query(
start=start,
end=end,
selected_columns=["event_id"],
conditions=conditions,
orderby="-timestamp",
filter_keys=filters,
limit=1000,
referrer="tagstore.get_group_event_filter",
)
event_id_set = set(row["event_id"] for row in result["data"])
if not event_id_set:
return None
return {"event_id__in": event_id_set}
|
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Nonlinear conjugate gradient algorithm"""
from typing import Any
from typing import Callable
from typing import NamedTuple
from typing import Optional
from dataclasses import dataclass
import jax
import jax.numpy as jnp
from jaxopt._src import base
from jaxopt._src.backtracking_linesearch import BacktrackingLineSearch
from jaxopt.tree_util import tree_vdot
from jaxopt.tree_util import tree_scalar_mul
from jaxopt.tree_util import tree_add_scalar_mul
from jaxopt.tree_util import tree_sub
from jaxopt.tree_util import tree_div
from jaxopt.tree_util import tree_l2_norm
class NonlinearCGState(NamedTuple):
"""Named tuple containing state information."""
iter_num: int
stepsize: float
error: float
value: float
grad: any
descent_direction: jnp.ndarray
aux: Optional[Any] = None
@dataclass(eq=False)
class NonlinearCG(base.IterativeSolver):
"""
Nonlinear Conjugate Gradient Solver.
Attributes:
fun: a smooth function of the form ``fun(x, *args, **kwargs)``.
method: which variant to calculate the beta parameter in Nonlinear CG.
"polak-ribiere", "fletcher-reeves", "hestenes-stiefel"
(default: "polak-ribiere")
has_aux: whether function fun outputs one (False) or more values (True).
When True it will be assumed by default that fun(...)[0] is the objective.
maxiter: maximum number of proximal gradient descent iterations.
tol: tolerance of the stopping criterion.
maxls: maximum number of iterations to use in the line search.
decrease_factor: factor by which to decrease the stepsize during line search
(default: 0.8).
increase_factor: factor by which to increase the stepsize during line search
(default: 1.2).
implicit_diff: whether to enable implicit diff or autodiff of unrolled
iterations.
implicit_diff_solve: the linear system solver to use.
jit: whether to JIT-compile the optimization loop (default: "auto").
unroll: whether to unroll the optimization loop (default: "auto").
verbose: whether to print error on every iteration or not.
Warning: verbose=True will automatically disable jit.
Reference:
Jorge Nocedal and Stephen Wright.
Numerical Optimization, second edition.
Algorithm 5.4 (page 121).
"""
fun: Callable
has_aux: bool = False
maxiter: int = 100
tol: float = 1e-3
method: str = "polak-ribiere" # same as SciPy
condition: str = "strong-wolfe"
maxls: int = 15
decrease_factor: float = 0.8
increase_factor: float = 1.2
implicit_diff: bool = True
implicit_diff_solve: Optional[Callable] = None
jit: base.AutoOrBoolean = "auto"
unroll: base.AutoOrBoolean = "auto"
verbose: int = 0
def init_state(self,
init_params: Any,
*args,
**kwargs) -> NonlinearCGState:
"""Initialize the solver state.
Args:
init_params: pytree containing the initial parameters.
*args: additional positional arguments to be passed to ``fun``.
**kwargs: additional keyword arguments to be passed to ``fun``.
Returns:
state
"""
value, grad = self._value_and_grad_fun(init_params, *args, **kwargs)
return NonlinearCGState(iter_num=jnp.asarray(0),
stepsize=jnp.asarray(1.0),
error=jnp.asarray(jnp.inf),
value=value,
grad=grad,
descent_direction=tree_scalar_mul(-1.0, grad))
def update(self,
params: Any,
state: NonlinearCGState,
*args,
**kwargs) -> base.OptStep:
"""Performs one iteration of Fletcher-Reeves Algorithm.
Args:
params: pytree containing the parameters.
state: named tuple containing the solver state.
*args: additional positional arguments to be passed to ``fun``.
**kwargs: additional keyword arguments to be passed to ``fun``.
Returns:
(params, state)
"""
eps = 1e-6
value, grad, descent_direction = state.value, state.grad, state.descent_direction
init_stepsize = state.stepsize * self.increase_factor
ls = BacktrackingLineSearch(fun=self._value_and_grad_fun,
value_and_grad=True,
maxiter=self.maxls,
decrease_factor=self.decrease_factor,
condition=self.condition)
new_stepsize, ls_state = ls.run(init_stepsize=init_stepsize,
params=params,
value=value,
grad=grad,
*args, **kwargs)
new_params = tree_add_scalar_mul(params, new_stepsize, descent_direction)
(new_value, new_aux), new_grad = self._value_and_grad_with_aux(new_params, *args, **kwargs)
if self.method == "polak-ribiere":
# See Numerical Optimization, second edition, equation (5.44).
gTg = tree_vdot(grad, grad)
gTg = jnp.where(gTg >= eps, gTg, eps)
new_beta = tree_div(tree_vdot(new_grad, tree_sub(new_grad, grad)), gTg)
new_beta = jax.nn.relu(new_beta)
elif self.method == "fletcher-reeves":
# See Numerical Optimization, second edition, equation (5.41a).
gTg = tree_vdot(grad, grad)
gTg = jnp.where(gTg >= eps, gTg, eps)
new_beta = tree_div(tree_vdot(new_grad, new_grad), gTg)
elif self.method == 'hestenes-stiefel':
# See Numerical Optimization, second edition, equation (5.45).
grad_diff = tree_sub(new_grad, grad)
dTg = tree_vdot(descent_direction, grad_diff)
dTg = jnp.where(dTg >= eps, dTg, eps)
new_beta = tree_div(tree_vdot(new_grad, grad_diff), dTg)
else:
raise ValueError("method should be either 'polak-ribiere', 'fletcher-reeves', or 'hestenes-stiefel'")
new_descent_direction = tree_add_scalar_mul(tree_scalar_mul(-1, new_grad), new_beta, descent_direction)
new_state = NonlinearCGState(iter_num=state.iter_num + 1,
stepsize=jnp.asarray(new_stepsize),
error=tree_l2_norm(grad),
value=new_value,
grad=new_grad,
descent_direction=new_descent_direction,
aux=new_aux)
return base.OptStep(params=new_params, state=new_state)
def optimality_fun(self, params, *args, **kwargs):
"""Optimality function mapping compatible with ``@custom_root``."""
return self._grad_fun(params, *args, **kwargs)
def _value_and_grad_fun(self, params, *args, **kwargs):
(value, aux), grad = self._value_and_grad_with_aux(params, *args, **kwargs)
return value, grad
def _grad_fun(self, params, *args, **kwargs):
return self._value_and_grad_fun(params, *args, **kwargs)[1]
def __post_init__(self):
if self.has_aux:
self._fun = lambda *a, **kw: self.fun(*a, **kw)[0]
fun_with_aux = self.fun
else:
self._fun = self.fun
fun_with_aux = lambda *a, **kw: (self.fun(*a, **kw), None)
self._value_and_grad_with_aux = jax.value_and_grad(fun_with_aux,
has_aux=True)
self.reference_signature = self.fun
|
|
"""
mbed SDK
Copyright (c) 2011-2017 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Title: GNU ARM Eclipse (http://gnuarmeclipse.github.io) exporter.
Description: Creates a managed build project that can be imported by
the GNU ARM Eclipse plug-ins.
Author: Liviu Ionescu <ilg@livius.net>
"""
from __future__ import print_function, absolute_import
from builtins import str
import os
import copy
import tempfile
import shutil
import copy
from subprocess import call, Popen, PIPE
from os.path import splitext, basename, relpath, dirname, exists, join, dirname
from random import randint
from json import load
from tools.export.exporters import Exporter, apply_supported_whitelist
from tools.options import list_profiles
from tools.targets import TARGET_MAP
from tools.utils import NotSupportedException
from tools.build_api import prepare_toolchain
# =============================================================================
class UID:
"""
Helper class, used to generate unique ids required by .cproject symbols.
"""
@property
def id(self):
return "%0.9u" % randint(0, 999999999)
# Global UID generator instance.
# Passed to the template engine, and referred as {{u.id}}.
# Each invocation generates a new number.
u = UID()
# =============================================================================
POST_BINARY_WHITELIST = set([
"LPCTargetCode.lpc_patch",
"PSOC6Code.complete"
])
class GNUARMEclipse(Exporter):
NAME = 'GNU ARM Eclipse'
TOOLCHAIN = 'GCC_ARM'
@classmethod
def is_target_supported(cls, target_name):
target = TARGET_MAP[target_name]
return apply_supported_whitelist(
cls.TOOLCHAIN, POST_BINARY_WHITELIST, target)
def validate_resources(self):
if not self.resources.linker_script:
raise NotSupportedException("No linker script found.")
def create_jinja_ctx(self):
self.validate_resources()
self.resources.win_to_unix()
# TODO: use some logger to display additional info if verbose
libraries = []
library_files = []
for lib in self.libraries:
library_files.append(self.filter_dot(lib))
l, _ = splitext(basename(lib))
libraries.append(l[3:])
self.system_libraries = [
'stdc++', 'supc++', 'm', 'c', 'gcc', 'nosys'
]
# Read in all profiles, we'll extract compiler options.
profiles = self.get_all_profiles()
profile_ids = [s.lower() for s in profiles]
profile_ids.sort()
# TODO: get the list from existing .cproject
build_folders = [s.capitalize() for s in profile_ids]
build_folders.append('BUILD')
objects = [self.filter_dot(s) for s in self.resources.objects]
for bf in build_folders:
objects = [o for o in objects if not o.startswith(bf + '/')]
self.compute_exclusions()
self.include_path = [
self.filter_dot(s) for s in self.resources.inc_dirs]
self.as_defines = self.toolchain.get_symbols(True)
self.c_defines = self.toolchain.get_symbols()
self.cpp_defines = self.c_defines
self.ld_script = self.filter_dot(
self.resources.linker_script)
self.options = {}
for id in profile_ids:
# There are 4 categories of options, a category common too
# all tools and a specific category for each of the tools.
opts = {}
opts['common'] = {}
opts['as'] = {}
opts['c'] = {}
opts['cpp'] = {}
opts['ld'] = {}
opts['id'] = id
opts['name'] = opts['id'].capitalize()
profile = profiles[id]
# A small hack, do not bother with src_path again,
# pass an empty string to avoid crashing.
src_paths = ['']
target_name = self.toolchain.target.name
toolchain = prepare_toolchain(
src_paths, "", target_name, self.TOOLCHAIN, build_profile=[profile])
# Hack to fill in build_dir
toolchain.build_dir = self.toolchain.build_dir
toolchain.config = self.toolchain.config
toolchain.set_config_data(self.toolchain.config.get_config_data())
flags = self.toolchain_flags(toolchain)
# Most GNU ARM Eclipse options have a parent,
# either debug or release.
if '-O0' in flags['common_flags'] or '-Og' in flags['common_flags']:
opts['parent_id'] = 'debug'
else:
opts['parent_id'] = 'release'
self.process_options(opts, flags)
opts['as']['defines'] = self.as_defines
opts['c']['defines'] = self.c_defines
opts['cpp']['defines'] = self.cpp_defines
opts['common']['include_paths'] = self.include_path
opts['common']['excluded_folders'] = '|'.join(
self.excluded_folders)
opts['ld']['library_paths'] = [
self.filter_dot(s) for s in self.resources.lib_dirs]
opts['ld']['object_files'] = objects
opts['ld']['user_libraries'] = libraries
opts['ld']['user_library_files'] = library_files
opts['ld']['system_libraries'] = self.system_libraries
opts['ld']['script'] = join(id.capitalize(),
"linker-script-%s.ld" % id)
opts['cpp_cmd'] = '"{}"'.format(toolchain.preproc[0]) + " " + " ".join(toolchain.preproc[1:])
# Unique IDs used in multiple places.
# Those used only once are implemented with {{u.id}}.
uid = {}
uid['config'] = u.id
uid['tool_c_compiler'] = u.id
uid['tool_c_compiler_input'] = u.id
uid['tool_cpp_compiler'] = u.id
uid['tool_cpp_compiler_input'] = u.id
opts['uid'] = uid
self.options[id] = opts
jinja_ctx = {
'name': self.project_name,
'ld_script': self.ld_script,
# Compiler & linker command line options
'options': self.options,
# Must be an object with an `id` property, which
# will be called repeatedly, to generate multiple UIDs.
'u': u,
}
return jinja_ctx
# override
def generate(self):
"""
Generate the .project and .cproject files.
"""
jinja_ctx = self.create_jinja_ctx()
self.gen_file('gnuarmeclipse/.project.tmpl', jinja_ctx,
'.project', trim_blocks=True, lstrip_blocks=True)
self.gen_file('gnuarmeclipse/.cproject.tmpl', jinja_ctx,
'.cproject', trim_blocks=True, lstrip_blocks=True)
self.gen_file('gnuarmeclipse/makefile.targets.tmpl', jinja_ctx,
'makefile.targets', trim_blocks=True, lstrip_blocks=True)
self.gen_file_nonoverwrite('gnuarmeclipse/mbedignore.tmpl', jinja_ctx,
'.mbedignore')
print('Done. Import the \'{0}\' project in Eclipse.'.format(self.project_name))
@staticmethod
def clean(_):
os.remove('.project')
os.remove('.cproject')
if exists('Debug'):
shutil.rmtree('Debug')
if exists('Release'):
shutil.rmtree('Release')
if exists('makefile.targets'):
os.remove('makefile.targets')
# override
@staticmethod
def build(project_name, log_name="build_log.txt", cleanup=True):
"""
Headless build an Eclipse project.
The following steps are performed:
- a temporary workspace is created,
- the project is imported,
- a clean build of all configurations is performed and
- the temporary workspace is removed.
The build results are in the Debug & Release folders.
All executables (eclipse & toolchain) must be in the PATH.
The general method to start a headless Eclipse build is:
$ eclipse \
--launcher.suppressErrors \
-nosplash \
-application org.eclipse.cdt.managedbuilder.core.headlessbuild \
-data /path/to/workspace \
-import /path/to/project \
-cleanBuild "project[/configuration] | all"
"""
# TODO: possibly use the log file.
# Create a temporary folder for the workspace.
tmp_folder = tempfile.mkdtemp()
cmd = [
'eclipse',
'--launcher.suppressErrors',
'-nosplash',
'-application org.eclipse.cdt.managedbuilder.core.headlessbuild',
'-data', tmp_folder,
'-import', os.getcwd(),
'-cleanBuild', project_name
]
p = Popen(' '.join(cmd), shell=True, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
ret_code = p.returncode
stdout_string = "=" * 10 + "STDOUT" + "=" * 10 + "\n"
err_string = "=" * 10 + "STDERR" + "=" * 10 + "\n"
err_string += err
ret_string = "SUCCESS\n"
if ret_code != 0:
ret_string += "FAILURE\n"
print("%s\n%s\n%s\n%s" % (stdout_string, out, err_string, ret_string))
if log_name:
# Write the output to the log file
with open(log_name, 'w+') as f:
f.write(stdout_string)
f.write(out)
f.write(err_string)
f.write(ret_string)
# Cleanup the exported and built files
if cleanup:
if exists(log_name):
os.remove(log_name)
# Always remove the temporary folder.
if exists(tmp_folder):
shutil.rmtree(tmp_folder)
if ret_code == 0:
# Return Success
return 0
# Seems like something went wrong.
return -1
# -------------------------------------------------------------------------
@staticmethod
def get_all_profiles():
tools_path = dirname(dirname(dirname(__file__)))
file_names = [join(tools_path, "profiles", fn) for fn in os.listdir(
join(tools_path, "profiles")) if fn.endswith(".json")]
profile_names = [basename(fn).replace(".json", "")
for fn in file_names]
profiles = {}
for fn in file_names:
content = load(open(fn))
profile_name = basename(fn).replace(".json", "")
profiles[profile_name] = content
return profiles
# -------------------------------------------------------------------------
# Process source files/folders exclusions.
def compute_exclusions(self):
"""
With the project root as the only source folder known to CDT,
based on the list of source files, compute the folders to not
be included in the build.
The steps are:
- get the list of source folders, as dirname(source_file)
- compute the top folders (subfolders of the project folder)
- iterate all subfolders and add them to a tree, with all
nodes markes as 'not used'
- iterate the source folders and mark them as 'used' in the
tree, including all intermediate nodes
- recurse the tree and collect all unused folders; descend
the hierarchy only for used nodes
"""
self.excluded_folders = set(self.resources.ignored_dirs) - set(self.resources.inc_dirs)
# -------------------------------------------------------------------------
def dump_tree(self, nodes, depth=0):
for k in list(nodes):
node = nodes[k]
parent_name = node['parent'][
'name'] if 'parent' in list(node) else ''
if len(node['children'].keys()) != 0:
self.dump_tree(node['children'], depth + 1)
def dump_paths(self, nodes, depth=0):
for k in list(nodes):
node = nodes[k]
parts = []
while True:
parts.insert(0, node['name'])
if 'parent' not in node:
break
node = node['parent']
path = '/'.join(parts)
self.dump_paths(nodes[k]['children'], depth + 1)
# -------------------------------------------------------------------------
def process_options(self, opts, flags_in):
"""
CDT managed projects store lots of build options in separate
variables, with separate IDs in the .cproject file.
When the CDT build is started, all these options are brought
together to compose the compiler and linker command lines.
Here the process is reversed, from the compiler and linker
command lines, the options are identified and various flags are
set to control the template generation process.
Once identified, the options are removed from the command lines.
The options that were not identified are options that do not
have CDT equivalents and will be passed in the 'Other options'
categories.
Although this process does not have a very complicated logic,
given the large number of explicit configuration options
used by the GNU ARM Eclipse managed build plug-in, it is tedious...
"""
# Make a copy of the flags, to be one by one removed after processing.
flags = copy.deepcopy(flags_in)
# Initialise the 'last resort' options where all unrecognised
# options will be collected.
opts['as']['other'] = ''
opts['c']['other'] = ''
opts['cpp']['other'] = ''
opts['ld']['other'] = ''
MCPUS = {
'Cortex-M0': {'mcpu': 'cortex-m0', 'fpu_unit': None},
'Cortex-M0+': {'mcpu': 'cortex-m0plus', 'fpu_unit': None},
'Cortex-M1': {'mcpu': 'cortex-m1', 'fpu_unit': None},
'Cortex-M3': {'mcpu': 'cortex-m3', 'fpu_unit': None},
'Cortex-M4': {'mcpu': 'cortex-m4', 'fpu_unit': None},
'Cortex-M4F': {'mcpu': 'cortex-m4', 'fpu_unit': 'fpv4spd16'},
'Cortex-M7': {'mcpu': 'cortex-m7', 'fpu_unit': None},
'Cortex-M7F': {'mcpu': 'cortex-m7', 'fpu_unit': 'fpv4spd16'},
'Cortex-M7FD': {'mcpu': 'cortex-m7', 'fpu_unit': 'fpv5d16'},
'Cortex-A5': {'mcpu': 'cortex-a5', 'fpu_unit': 'vfpv3'},
'Cortex-A9': {'mcpu': 'cortex-a9', 'fpu_unit': 'vfpv3'}
}
# Remove options that are supplied by CDT
self.remove_option(flags['common_flags'], '-c')
self.remove_option(flags['common_flags'], '-MMD')
# As 'plan B', get the CPU from the target definition.
core = self.toolchain.target.core
opts['common']['arm.target.family'] = None
# cortex-m0, cortex-m0-small-multiply, cortex-m0plus,
# cortex-m0plus-small-multiply, cortex-m1, cortex-m1-small-multiply,
# cortex-m3, cortex-m4, cortex-m7.
str = self.find_options(flags['common_flags'], '-mcpu=')
if str != None:
opts['common']['arm.target.family'] = str[len('-mcpu='):]
self.remove_option(flags['common_flags'], str)
self.remove_option(flags['ld_flags'], str)
else:
if core not in MCPUS:
raise NotSupportedException(
'Target core {0} not supported.'.format(core))
opts['common']['arm.target.family'] = MCPUS[core]['mcpu']
opts['common']['arm.target.arch'] = 'none'
str = self.find_options(flags['common_flags'], '-march=')
arch = str[len('-march='):]
archs = {'armv6-m': 'armv6-m', 'armv7-m': 'armv7-m', 'armv7-a': 'armv7-a'}
if arch in archs:
opts['common']['arm.target.arch'] = archs[arch]
self.remove_option(flags['common_flags'], str)
opts['common']['arm.target.instructionset'] = 'thumb'
if '-mthumb' in flags['common_flags']:
self.remove_option(flags['common_flags'], '-mthumb')
self.remove_option(flags['ld_flags'], '-mthumb')
elif '-marm' in flags['common_flags']:
opts['common']['arm.target.instructionset'] = 'arm'
self.remove_option(flags['common_flags'], '-marm')
self.remove_option(flags['ld_flags'], '-marm')
opts['common']['arm.target.thumbinterwork'] = False
if '-mthumb-interwork' in flags['common_flags']:
opts['common']['arm.target.thumbinterwork'] = True
self.remove_option(flags['common_flags'], '-mthumb-interwork')
opts['common']['arm.target.endianness'] = None
if '-mlittle-endian' in flags['common_flags']:
opts['common']['arm.target.endianness'] = 'little'
self.remove_option(flags['common_flags'], '-mlittle-endian')
elif '-mbig-endian' in flags['common_flags']:
opts['common']['arm.target.endianness'] = 'big'
self.remove_option(flags['common_flags'], '-mbig-endian')
opts['common']['arm.target.fpu.unit'] = None
# default, fpv4spd16, fpv5d16, fpv5spd16
str = self.find_options(flags['common_flags'], '-mfpu=')
if str != None:
fpu = str[len('-mfpu='):]
fpus = {
'fpv4-sp-d16': 'fpv4spd16',
'fpv5-d16': 'fpv5d16',
'fpv5-sp-d16': 'fpv5spd16'
}
if fpu in fpus:
opts['common']['arm.target.fpu.unit'] = fpus[fpu]
self.remove_option(flags['common_flags'], str)
self.remove_option(flags['ld_flags'], str)
if opts['common']['arm.target.fpu.unit'] == None:
if core not in MCPUS:
raise NotSupportedException(
'Target core {0} not supported.'.format(core))
if MCPUS[core]['fpu_unit']:
opts['common'][
'arm.target.fpu.unit'] = MCPUS[core]['fpu_unit']
# soft, softfp, hard.
str = self.find_options(flags['common_flags'], '-mfloat-abi=')
if str != None:
opts['common']['arm.target.fpu.abi'] = str[
len('-mfloat-abi='):]
self.remove_option(flags['common_flags'], str)
self.remove_option(flags['ld_flags'], str)
opts['common']['arm.target.unalignedaccess'] = None
if '-munaligned-access' in flags['common_flags']:
opts['common']['arm.target.unalignedaccess'] = 'enabled'
self.remove_option(flags['common_flags'], '-munaligned-access')
elif '-mno-unaligned-access' in flags['common_flags']:
opts['common']['arm.target.unalignedaccess'] = 'disabled'
self.remove_option(flags['common_flags'], '-mno-unaligned-access')
# Default optimisation level for Release.
opts['common']['optimization.level'] = '-Os'
# If the project defines an optimisation level, it is used
# only for the Release configuration, the Debug one used '-Og'.
str = self.find_options(flags['common_flags'], '-O')
if str != None:
levels = {
'-O0': 'none', '-O1': 'optimize', '-O2': 'more',
'-O3': 'most', '-Os': 'size', '-Og': 'debug'
}
if str in levels:
opts['common']['optimization.level'] = levels[str]
self.remove_option(flags['common_flags'], str)
include_files = []
for all_flags in [flags['common_flags'], flags['c_flags'], flags['cxx_flags']]:
while '-include' in all_flags:
ix = all_flags.index('-include')
str = all_flags[ix + 1]
if str not in include_files:
include_files.append(str)
self.remove_option(all_flags, '-include')
self.remove_option(all_flags, str)
opts['common']['include_files'] = include_files
if '-ansi' in flags['c_flags']:
opts['c']['compiler.std'] = '-ansi'
self.remove_option(flags['c_flags'], str)
else:
str = self.find_options(flags['c_flags'], '-std')
std = str[len('-std='):]
c_std = {
'c90': 'c90', 'c89': 'c90', 'gnu90': 'gnu90', 'gnu89': 'gnu90',
'c99': 'c99', 'c9x': 'c99', 'gnu99': 'gnu99', 'gnu9x': 'gnu98',
'c11': 'c11', 'c1x': 'c11', 'gnu11': 'gnu11', 'gnu1x': 'gnu11'
}
if std in c_std:
opts['c']['compiler.std'] = c_std[std]
self.remove_option(flags['c_flags'], str)
if '-ansi' in flags['cxx_flags']:
opts['cpp']['compiler.std'] = '-ansi'
self.remove_option(flags['cxx_flags'], str)
else:
str = self.find_options(flags['cxx_flags'], '-std')
std = str[len('-std='):]
cpp_std = {
'c++98': 'cpp98', 'c++03': 'cpp98',
'gnu++98': 'gnucpp98', 'gnu++03': 'gnucpp98',
'c++0x': 'cpp0x', 'gnu++0x': 'gnucpp0x',
'c++11': 'cpp11', 'gnu++11': 'gnucpp11',
'c++1y': 'cpp1y', 'gnu++1y': 'gnucpp1y',
'c++14': 'cpp14', 'gnu++14': 'gnucpp14',
'c++1z': 'cpp1z', 'gnu++1z': 'gnucpp1z',
}
if std in cpp_std:
opts['cpp']['compiler.std'] = cpp_std[std]
self.remove_option(flags['cxx_flags'], str)
# Common optimisation options.
optimization_options = {
'-fmessage-length=0': 'optimization.messagelength',
'-fsigned-char': 'optimization.signedchar',
'-ffunction-sections': 'optimization.functionsections',
'-fdata-sections': 'optimization.datasections',
'-fno-common': 'optimization.nocommon',
'-fno-inline-functions': 'optimization.noinlinefunctions',
'-ffreestanding': 'optimization.freestanding',
'-fno-builtin': 'optimization.nobuiltin',
'-fsingle-precision-constant': 'optimization.spconstant',
'-fPIC': 'optimization.PIC',
'-fno-move-loop-invariants': 'optimization.nomoveloopinvariants',
}
for option in optimization_options:
opts['common'][optimization_options[option]] = False
if option in flags['common_flags']:
opts['common'][optimization_options[option]] = True
self.remove_option(flags['common_flags'], option)
# Common warning options.
warning_options = {
'-fsyntax-only': 'warnings.syntaxonly',
'-pedantic': 'warnings.pedantic',
'-pedantic-errors': 'warnings.pedanticerrors',
'-w': 'warnings.nowarn',
'-Wunused': 'warnings.unused',
'-Wuninitialized': 'warnings.uninitialized',
'-Wall': 'warnings.allwarn',
'-Wextra': 'warnings.extrawarn',
'-Wmissing-declarations': 'warnings.missingdeclaration',
'-Wconversion': 'warnings.conversion',
'-Wpointer-arith': 'warnings.pointerarith',
'-Wpadded': 'warnings.padded',
'-Wshadow': 'warnings.shadow',
'-Wlogical-op': 'warnings.logicalop',
'-Waggregate-return': 'warnings.agreggatereturn',
'-Wfloat-equal': 'warnings.floatequal',
'-Werror': 'warnings.toerrors',
}
for option in warning_options:
opts['common'][warning_options[option]] = False
if option in flags['common_flags']:
opts['common'][warning_options[option]] = True
self.remove_option(flags['common_flags'], option)
# Common debug options.
debug_levels = {
'-g': 'default',
'-g1': 'minimal',
'-g3': 'max',
}
opts['common']['debugging.level'] = 'none'
for option in debug_levels:
if option in flags['common_flags']:
opts['common'][
'debugging.level'] = debug_levels[option]
self.remove_option(flags['common_flags'], option)
debug_formats = {
'-ggdb': 'gdb',
'-gstabs': 'stabs',
'-gstabs+': 'stabsplus',
'-gdwarf-2': 'dwarf2',
'-gdwarf-3': 'dwarf3',
'-gdwarf-4': 'dwarf4',
'-gdwarf-5': 'dwarf5',
}
opts['common']['debugging.format'] = ''
for option in debug_levels:
if option in flags['common_flags']:
opts['common'][
'debugging.format'] = debug_formats[option]
self.remove_option(flags['common_flags'], option)
opts['common']['debugging.prof'] = False
if '-p' in flags['common_flags']:
opts['common']['debugging.prof'] = True
self.remove_option(flags['common_flags'], '-p')
opts['common']['debugging.gprof'] = False
if '-pg' in flags['common_flags']:
opts['common']['debugging.gprof'] = True
self.remove_option(flags['common_flags'], '-gp')
# Assembler options.
opts['as']['usepreprocessor'] = False
while '-x' in flags['asm_flags']:
ix = flags['asm_flags'].index('-x')
str = flags['asm_flags'][ix + 1]
if str == 'assembler-with-cpp':
opts['as']['usepreprocessor'] = True
else:
# Collect all other assembler options.
opts['as']['other'] += ' -x ' + str
self.remove_option(flags['asm_flags'], '-x')
self.remove_option(flags['asm_flags'], 'assembler-with-cpp')
opts['as']['nostdinc'] = False
if '-nostdinc' in flags['asm_flags']:
opts['as']['nostdinc'] = True
self.remove_option(flags['asm_flags'], '-nostdinc')
opts['as']['verbose'] = False
if '-v' in flags['asm_flags']:
opts['as']['verbose'] = True
self.remove_option(flags['asm_flags'], '-v')
# C options.
opts['c']['nostdinc'] = False
if '-nostdinc' in flags['c_flags']:
opts['c']['nostdinc'] = True
self.remove_option(flags['c_flags'], '-nostdinc')
opts['c']['verbose'] = False
if '-v' in flags['c_flags']:
opts['c']['verbose'] = True
self.remove_option(flags['c_flags'], '-v')
warning_options = {
'-Wmissing-prototypes': 'warnings.missingprototypes',
'-Wstrict-prototypes': 'warnings.strictprototypes',
'-Wbad-function-cast': 'warnings.badfunctioncast',
}
for option in warning_options:
opts['c'][warning_options[option]] = False
if option in flags['common_flags']:
opts['c'][warning_options[option]] = True
self.remove_option(flags['common_flags'], option)
# C++ options.
opts['cpp']['nostdinc'] = False
if '-nostdinc' in flags['cxx_flags']:
opts['cpp']['nostdinc'] = True
self.remove_option(flags['cxx_flags'], '-nostdinc')
opts['cpp']['nostdincpp'] = False
if '-nostdinc++' in flags['cxx_flags']:
opts['cpp']['nostdincpp'] = True
self.remove_option(flags['cxx_flags'], '-nostdinc++')
optimization_options = {
'-fno-exceptions': 'optimization.noexceptions',
'-fno-rtti': 'optimization.nortti',
'-fno-use-cxa-atexit': 'optimization.nousecxaatexit',
'-fno-threadsafe-statics': 'optimization.nothreadsafestatics',
}
for option in optimization_options:
opts['cpp'][optimization_options[option]] = False
if option in flags['cxx_flags']:
opts['cpp'][optimization_options[option]] = True
self.remove_option(flags['cxx_flags'], option)
if option in flags['common_flags']:
opts['cpp'][optimization_options[option]] = True
self.remove_option(flags['common_flags'], option)
warning_options = {
'-Wabi': 'warnabi',
'-Wctor-dtor-privacy': 'warnings.ctordtorprivacy',
'-Wnoexcept': 'warnings.noexcept',
'-Wnon-virtual-dtor': 'warnings.nonvirtualdtor',
'-Wstrict-null-sentinel': 'warnings.strictnullsentinel',
'-Wsign-promo': 'warnings.signpromo',
'-Weffc++': 'warneffc',
}
for option in warning_options:
opts['cpp'][warning_options[option]] = False
if option in flags['cxx_flags']:
opts['cpp'][warning_options[option]] = True
self.remove_option(flags['cxx_flags'], option)
if option in flags['common_flags']:
opts['cpp'][warning_options[option]] = True
self.remove_option(flags['common_flags'], option)
opts['cpp']['verbose'] = False
if '-v' in flags['cxx_flags']:
opts['cpp']['verbose'] = True
self.remove_option(flags['cxx_flags'], '-v')
# Linker options.
linker_options = {
'-nostartfiles': 'nostart',
'-nodefaultlibs': 'nodeflibs',
'-nostdlib': 'nostdlibs',
}
for option in linker_options:
opts['ld'][linker_options[option]] = False
if option in flags['ld_flags']:
opts['ld'][linker_options[option]] = True
self.remove_option(flags['ld_flags'], option)
opts['ld']['gcsections'] = False
if '-Wl,--gc-sections' in flags['ld_flags']:
opts['ld']['gcsections'] = True
self.remove_option(flags['ld_flags'], '-Wl,--gc-sections')
opts['ld']['flags'] = []
to_remove = []
for opt in flags['ld_flags']:
if opt.startswith('-Wl,--wrap,'):
opts['ld']['flags'].append(
'--wrap=' + opt[len('-Wl,--wrap,'):])
to_remove.append(opt)
for opt in to_remove:
self.remove_option(flags['ld_flags'], opt)
# Other tool remaining options are separated by category.
opts['as']['otherwarnings'] = self.find_options(
flags['asm_flags'], '-W')
opts['c']['otherwarnings'] = self.find_options(
flags['c_flags'], '-W')
opts['c']['otheroptimizations'] = self.find_options(flags[
'c_flags'], '-f')
opts['cpp']['otherwarnings'] = self.find_options(
flags['cxx_flags'], '-W')
opts['cpp']['otheroptimizations'] = self.find_options(
flags['cxx_flags'], '-f')
# Other common remaining options are separated by category.
opts['common']['optimization.other'] = self.find_options(
flags['common_flags'], '-f')
opts['common']['warnings.other'] = self.find_options(
flags['common_flags'], '-W')
# Remaining common flags are added to each tool.
opts['as']['other'] += ' ' + \
' '.join(flags['common_flags']) + ' ' + \
' '.join(flags['asm_flags'])
opts['c']['other'] += ' ' + \
' '.join(flags['common_flags']) + ' ' + ' '.join(flags['c_flags'])
opts['cpp']['other'] += ' ' + \
' '.join(flags['common_flags']) + ' ' + \
' '.join(flags['cxx_flags'])
opts['ld']['other'] += ' ' + \
' '.join(flags['common_flags']) + ' ' + ' '.join(flags['ld_flags'])
if len(self.system_libraries) > 0:
opts['ld']['other'] += ' -Wl,--start-group '
opts['ld'][
'other'] += ' '.join('-l' + s for s in self.system_libraries)
opts['ld']['other'] += ' -Wl,--end-group '
# Strip all 'other' flags, since they might have leading spaces.
opts['as']['other'] = opts['as']['other'].strip()
opts['c']['other'] = opts['c']['other'].strip()
opts['cpp']['other'] = opts['cpp']['other'].strip()
opts['ld']['other'] = opts['ld']['other'].strip()
@staticmethod
def find_options(lst, option):
tmp = [str for str in lst if str.startswith(option)]
if len(tmp) > 0:
return tmp[0]
else:
return None
@staticmethod
def find_options(lst, prefix):
other = ''
opts = [str for str in lst if str.startswith(prefix)]
if len(opts) > 0:
for opt in opts:
other += ' ' + opt
GNUARMEclipse.remove_option(lst, opt)
return other.strip()
@staticmethod
def remove_option(lst, option):
if option in lst:
lst.remove(option)
# =============================================================================
|
|
import pytest
from future.moves.urllib.parse import urlparse
from api_tests.nodes.views.test_node_contributors_list import NodeCRUDTestCase
from api.base.settings.defaults import API_BASE
from framework.auth.core import Auth
from osf_tests.factories import (
ProjectFactory,
CommentFactory,
RegistrationFactory,
InstitutionFactory,
WithdrawnRegistrationFactory,
)
class TestWithdrawnRegistrations(NodeCRUDTestCase):
@pytest.fixture()
def institution_one(self):
return InstitutionFactory()
@pytest.fixture()
def registration(self, user, project_public, institution_one):
registration = RegistrationFactory(creator=user, project=project_public)
registration.affiliated_institutions.add(institution_one)
return registration
@pytest.fixture()
def registration_with_child(self, user, project_public):
project = ProjectFactory(creator=user, is_public=True)
child = ProjectFactory(creator=user, is_public=True, parent=project)
registration = RegistrationFactory(project=project, is_public=True)
RegistrationFactory(project=child, is_public=True)
return registration
@pytest.fixture()
def withdrawn_registration_with_child(self, user, registration_with_child):
withdrawn_registration = WithdrawnRegistrationFactory(
registration=registration_with_child, user=registration_with_child.creator)
withdrawn_registration.justification = 'We made a major error.'
withdrawn_registration.save()
return withdrawn_registration
@pytest.fixture()
def withdrawn_registration(self, registration):
withdrawn_registration = WithdrawnRegistrationFactory(
registration=registration, user=registration.creator)
withdrawn_registration.justification = 'We made a major error.'
withdrawn_registration.save()
return withdrawn_registration
@pytest.fixture()
def project_pointer_public(self):
return ProjectFactory(is_public=True)
@pytest.fixture()
def pointer_public(self, user, project_public, project_pointer_public):
return project_public.add_pointer(
project_pointer_public, auth=Auth(user), save=True)
@pytest.fixture()
def url_withdrawn(self, registration):
return '/{}registrations/{}/?version=2.2'.format(
API_BASE, registration._id)
def test_can_access_withdrawn_contributors(
self, app, user, registration, withdrawn_registration):
url = '/{}registrations/{}/contributors/'.format(
API_BASE, registration._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 200
def test_can_access_withdrawn_contributor_detail(
self, app, user, registration):
url = '/{}registrations/{}/contributors/{}/'.format(
API_BASE, registration._id, user._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 200
def test_cannot_errors(
self, app, user, project_public, registration,
withdrawn_registration, pointer_public):
# test_cannot_access_withdrawn_children
url = '/{}registrations/{}/children/'.format(
API_BASE, registration._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 403
# test_cannot_return_a_withdrawn_registration_at_node_detail_endpoint
url = '/{}nodes/{}/'.format(API_BASE, registration._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 404
# test_cannot_delete_a_withdrawn_registration
url = '/{}registrations/{}/'.format(API_BASE, registration._id)
res = app.delete_json_api(url, auth=user.auth, expect_errors=True)
registration.reload()
assert res.status_code == 405
# test_cannot_access_withdrawn_files_list
url = '/{}registrations/{}/files/'.format(API_BASE, registration._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 403
# test_cannot_access_withdrawn_node_links_detail
url = '/{}registrations/{}/node_links/{}/'.format(
API_BASE, registration._id, pointer_public._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 403
# test_cannot_access_withdrawn_node_links_list
url = '/{}registrations/{}/node_links/'.format(
API_BASE, registration._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 403
# test_cannot_access_withdrawn_registrations_list
registration.save()
url = '/{}registrations/{}/registrations/'.format(
API_BASE, registration._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 403
def test_cannot_access_withdrawn_comments(
self, app, user, project_public, pointer_public,
registration, withdrawn_registration):
project_public = ProjectFactory(is_public=True, creator=user)
CommentFactory(node=project_public, user=user)
url = '/{}registrations/{}/comments/'.format(
API_BASE, registration._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 403
def test_cannot_access_withdrawn_node_logs(
self, app, user, project_public, pointer_public,
registration, withdrawn_registration):
ProjectFactory(is_public=True, creator=user)
url = '/{}registrations/{}/logs/'.format(API_BASE, registration._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 403
def test_withdrawn_registrations_display_limited_attributes_fields(
self, app, user, registration, withdrawn_registration, url_withdrawn):
registration = registration
res = app.get(url_withdrawn, auth=user.auth)
assert res.status_code == 200
attributes = res.json['data']['attributes']
registration.reload()
expected_attributes = {
'title': registration.title,
'description': registration.description,
'date_created': registration.created.isoformat().replace(
'+00:00',
'Z'),
'date_registered': registration.registered_date.isoformat().replace(
'+00:00',
'Z'),
'date_modified': registration.last_logged.isoformat().replace(
'+00:00',
'Z'),
'date_withdrawn': registration.retraction.date_retracted.isoformat().replace(
'+00:00',
'Z'),
'withdrawal_justification': registration.retraction.justification,
'public': None,
'registration': True,
'fork': None,
'collection': None,
'tags': None,
'withdrawn': True,
'pending_withdrawal': None,
'pending_registration_approval': None,
'pending_embargo_approval': None,
'embargo_end_date': None,
'registered_meta': None,
'current_user_permissions': None,
'registration_supplement': registration.registered_schema.first().name}
for attribute in expected_attributes:
assert expected_attributes[attribute] == attributes[attribute]
contributors = urlparse(
res.json['data']['relationships']['contributors']['links']['related']['href']).path
assert contributors == '/{}registrations/{}/contributors/'.format(
API_BASE, registration._id)
def test_withdrawn_registrations_display_limited_relationship_fields(
self, app, user, registration, withdrawn_registration):
url_withdrawn = '/{}registrations/{}/?version=2.14'.format(API_BASE, registration._id)
res = app.get(url_withdrawn, auth=user.auth)
assert 'children' not in res.json['data']['relationships']
assert 'comments' not in res.json['data']['relationships']
assert 'node_links' not in res.json['data']['relationships']
assert 'registrations' not in res.json['data']['relationships']
assert 'parent' in res.json['data']['relationships']
assert 'forked_from' not in res.json['data']['relationships']
assert 'files' not in res.json['data']['relationships']
assert 'logs' not in res.json['data']['relationships']
assert 'registered_by' not in res.json['data']['relationships']
assert 'registered_from' in res.json['data']['relationships']
assert 'root' in res.json['data']['relationships']
assert 'affiliated_institutions' in res.json['data']['relationships']
assert 'license' not in res.json['data']['relationships']
assert 'identifiers' in res.json['data']['relationships']
def test_field_specific_related_counts_ignored_if_hidden_field_on_withdrawn_registration(
self, app, user, registration, withdrawn_registration):
url = '/{}registrations/{}/?related_counts=children'.format(
API_BASE, registration._id)
res = app.get(url, auth=user.auth)
assert res.status_code == 200
assert 'children' not in res.json['data']['relationships']
assert 'contributors' in res.json['data']['relationships']
def test_field_specific_related_counts_retrieved_if_visible_field_on_withdrawn_registration(
self, app, user, registration, withdrawn_registration):
url = '/{}registrations/{}/?related_counts=contributors'.format(
API_BASE, registration._id)
res = app.get(url, auth=user.auth)
assert res.status_code == 200
assert res.json['data']['relationships']['contributors']['links']['related']['meta']['count'] == 1
def test_child_inherits_withdrawal_justification_and_date_withdrawn(
self, app, user, withdrawn_registration_with_child, registration_with_child):
reg_child = registration_with_child.node_relations.first().child
url = '/{}registrations/{}/?version=2.2'.format(API_BASE, reg_child._id)
res = app.get(url, auth=user.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['withdrawal_justification'] == withdrawn_registration_with_child.justification
formatted_date_retracted = withdrawn_registration_with_child.date_retracted.isoformat().replace('+00:00', 'Z')
assert res.json['data']['attributes']['date_withdrawn'] == formatted_date_retracted
|
|
"""
Testing for the boost module (sklearn.ensemble.boost).
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.dummy import DummyClassifier
from sklearn.dummy import DummyRegressor
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_classification_toy():
"""Check classification on a toy dataset."""
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
def test_regression_toy():
"""Check classification on a toy dataset."""
clf = AdaBoostRegressor()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
def test_iris():
"""Check consistency on dataset iris."""
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
def test_boston():
"""Check consistency on dataset boston house prices."""
clf = AdaBoostRegressor()
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
"""Check staged predictions."""
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target)
staged_scores = [s for s in clf.staged_score(iris.data, iris.target)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10)
clf.fit(boston.data, boston.target)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target)
staged_scores = [s for s in clf.staged_score(boston.data, boston.target)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
"""Check that base trees can be grid-searched."""
# AdaBoost classification
boost = AdaBoostClassifier()
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor()
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
"""Check pickability."""
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor()
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
"""Check variable importances."""
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
"""Test that it gives proper exception on deficient input."""
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y)
assert_raises(TypeError,
AdaBoostClassifier(base_estimator=DummyRegressor()).fit,
X, y)
assert_raises(TypeError,
AdaBoostRegressor(base_estimator=DummyClassifier()).fit,
X, y)
def test_base_estimator():
"""Test different base estimators."""
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor())
clf.fit(X, y)
clf = AdaBoostRegressor(SVR())
clf.fit(X, y)
if __name__ == "__main__":
import nose
nose.runmodule()
|
|
import numpy as np
import openmoc
import openmc
import openmc.checkvalue as cv
# TODO: Get rid of global state by using memoization on functions below
# A dictionary of all OpenMC Materials created
# Keys - Material IDs
# Values - Materials
OPENMC_MATERIALS = {}
# A dictionary of all OpenMOC Materials created
# Keys - Material IDs
# Values - Materials
OPENMOC_MATERIALS = {}
# A dictionary of all OpenMC Surfaces created
# Keys - Surface IDs
# Values - Surfaces
OPENMC_SURFACES = {}
# A dictionary of all OpenMOC Surfaces created
# Keys - Surface IDs
# Values - Surfaces
OPENMOC_SURFACES = {}
# A dictionary of all OpenMC Cells created
# Keys - Cell IDs
# Values - Cells
OPENMC_CELLS = {}
# A dictionary of all OpenMOC Cells created
# Keys - Cell IDs
# Values - Cells
OPENMOC_CELLS = {}
# A dictionary of all OpenMC Universes created
# Keys - Universes IDs
# Values - Universes
OPENMC_UNIVERSES = {}
# A dictionary of all OpenMOC Universes created
# Keys - Universes IDs
# Values - Universes
OPENMOC_UNIVERSES = {}
# A dictionary of all OpenMC Lattices created
# Keys - Lattice IDs
# Values - Lattices
OPENMC_LATTICES = {}
# A dictionary of all OpenMOC Lattices created
# Keys - Lattice IDs
# Values - Lattices
OPENMOC_LATTICES = {}
def get_openmoc_material(openmc_material):
"""Return an OpenMOC material corresponding to an OpenMC material.
Parameters
----------
openmc_material : openmc.Material
OpenMC material
Returns
-------
openmoc_material : openmoc.Material
Equivalent OpenMOC material
"""
cv.check_type('openmc_material', openmc_material, openmc.Material)
material_id = openmc_material.id
# If this Material was already created, use it
if material_id in OPENMOC_MATERIALS:
return OPENMOC_MATERIALS[material_id]
# Create an OpenMOC Material to represent this OpenMC Material
name = str(openmc_material.name)
openmoc_material = openmoc.Material(id=material_id, name=name)
# Add the OpenMC Material to the global collection of all OpenMC Materials
OPENMC_MATERIALS[material_id] = openmc_material
# Add the OpenMOC Material to the global collection of all OpenMOC Materials
OPENMOC_MATERIALS[material_id] = openmoc_material
return openmoc_material
def get_openmc_material(openmoc_material):
"""Return an OpenMC material corresponding to an OpenMOC material.
Parameters
----------
openmoc_material : openmoc.Material
OpenMOC material
Returns
-------
openmc_material : openmc.Material
Equivalent OpenMC material
"""
cv.check_type('openmoc_material', openmoc_material, openmoc.Material)
material_id = openmoc_material.getId()
# If this Material was already created, use it
if material_id in OPENMC_MATERIALS:
return OPENMC_MATERIALS[material_id]
# Create an OpenMC Material to represent this OpenMOC Material
name = openmoc_material.getName()
openmc_material = openmc.Material(material_id=material_id, name=name)
# Add the OpenMOC Material to the global collection of all OpenMOC Materials
OPENMOC_MATERIALS[material_id] = openmoc_material
# Add the OpenMC Material to the global collection of all OpenMC Materials
OPENMC_MATERIALS[material_id] = openmc_material
return openmc_material
def get_openmoc_surface(openmc_surface):
"""Return an OpenMOC surface corresponding to an OpenMC surface.
Parameters
----------
openmc_surface : openmc.Surface
OpenMC surface
Returns
-------
openmoc_surface : openmoc.Surface
Equivalent OpenMOC surface
"""
cv.check_type('openmc_surface', openmc_surface, openmc.Surface)
surface_id = openmc_surface.id
# If this Material was already created, use it
if surface_id in OPENMOC_SURFACES:
return OPENMOC_SURFACES[surface_id]
# Create an OpenMOC Surface to represent this OpenMC Surface
name = openmc_surface.name
# Determine the type of boundary conditions applied to the Surface
if openmc_surface.boundary_type == 'vacuum':
boundary = openmoc.VACUUM
elif openmc_surface.boundary_type == 'reflective':
boundary = openmoc.REFLECTIVE
elif openmc_surface.boundary_type == 'periodic':
boundary = openmoc.PERIODIC
else:
boundary = openmoc.BOUNDARY_NONE
if openmc_surface.type == 'plane':
A = openmc_surface.a
B = openmc_surface.b
C = openmc_surface.c
D = openmc_surface.d
# OpenMOC uses the opposite sign on D
openmoc_surface = openmoc.Plane(A, B, C, -D, surface_id, name)
elif openmc_surface.type == 'x-plane':
x0 = openmc_surface.x0
openmoc_surface = openmoc.XPlane(x0, surface_id, name)
elif openmc_surface.type == 'y-plane':
y0 = openmc_surface.y0
openmoc_surface = openmoc.YPlane(y0, surface_id, name)
elif openmc_surface.type == 'z-plane':
z0 = openmc_surface.z0
openmoc_surface = openmoc.ZPlane(z0, surface_id, name)
elif openmc_surface.type == 'z-cylinder':
x0 = openmc_surface.x0
y0 = openmc_surface.y0
R = openmc_surface.r
openmoc_surface = openmoc.ZCylinder(x0, y0, R, surface_id, name)
else:
msg = ('Unable to create an OpenMOC Surface from an OpenMC Surface of '
f'type "{type(openmc_surface)}" since it is not a compatible '
'Surface type in OpenMOC')
raise ValueError(msg)
# Set the boundary condition for this Surface
openmoc_surface.setBoundaryType(boundary)
# Add the OpenMC Surface to the global collection of all OpenMC Surfaces
OPENMC_SURFACES[surface_id] = openmc_surface
# Add the OpenMOC Surface to the global collection of all OpenMOC Surfaces
OPENMOC_SURFACES[surface_id] = openmoc_surface
return openmoc_surface
def get_openmc_surface(openmoc_surface):
"""Return an OpenMC surface corresponding to an OpenMOC surface.
Parameters
----------
openmoc_surface : openmoc.Surface
OpenMOC surface
Returns
-------
openmc_surface : openmc.Surface
Equivalent OpenMC surface
"""
cv.check_type('openmoc_surface', openmoc_surface, openmoc.Surface)
surface_id = openmoc_surface.getId()
# If this Surface was already created, use it
if surface_id in OPENMC_SURFACES:
return OPENMC_SURFACES[surface_id]
# Create an OpenMC Surface to represent this OpenMOC Surface
name = openmoc_surface.getName()
# Correct for OpenMC's syntax for Surfaces dividing Cells
boundary = openmoc_surface.getBoundaryType()
if boundary == openmoc.VACUUM:
boundary = 'vacuum'
elif boundary == openmoc.REFLECTIVE:
boundary = 'reflective'
elif boundary == openmoc.PERIODIC:
boundary = 'periodic'
else:
boundary = 'transmission'
if openmoc_surface.getSurfaceType() == openmoc.PLANE:
openmoc_surface = openmoc.castSurfaceToPlane(openmoc_surface)
A = openmoc_surface.getA()
B = openmoc_surface.getB()
C = openmoc_surface.getC()
D = openmoc_surface.getD()
# OpenMOC uses the opposite sign on D
openmc_surface = openmc.Plane(surface_id, boundary, A, B, C, -D, name)
elif openmoc_surface.getSurfaceType() == openmoc.XPLANE:
openmoc_surface = openmoc.castSurfaceToXPlane(openmoc_surface)
x0 = openmoc_surface.getX()
openmc_surface = openmc.XPlane(surface_id, boundary, x0, name)
elif openmoc_surface.getSurfaceType() == openmoc.YPLANE:
openmoc_surface = openmoc.castSurfaceToYPlane(openmoc_surface)
y0 = openmoc_surface.getY()
openmc_surface = openmc.YPlane(surface_id, boundary, y0, name)
elif openmoc_surface.getSurfaceType() == openmoc.ZPLANE:
openmoc_surface = openmoc.castSurfaceToZPlane(openmoc_surface)
z0 = openmoc_surface.getZ()
openmc_surface = openmc.ZPlane(surface_id, boundary, z0, name)
elif openmoc_surface.getSurfaceType() == openmoc.ZCYLINDER:
openmoc_surface = openmoc.castSurfaceToZCylinder(openmoc_surface)
x0 = openmoc_surface.getX0()
y0 = openmoc_surface.getY0()
R = openmoc_surface.getRadius()
openmc_surface = openmc.ZCylinder(surface_id, boundary, x0, y0, R, name)
# Add the OpenMC Surface to the global collection of all OpenMC Surfaces
OPENMC_SURFACES[surface_id] = openmc_surface
# Add the OpenMOC Surface to the global collection of all OpenMOC Surfaces
OPENMOC_SURFACES[surface_id] = openmoc_surface
return openmc_surface
def get_openmoc_cell(openmc_cell):
"""Return an OpenMOC cell corresponding to an OpenMC cell.
Parameters
----------
openmc_cell : openmc.Cell
OpenMC cell
Returns
-------
openmoc_cell : openmoc.Cell
Equivalent OpenMOC cell
"""
cv.check_type('openmc_cell', openmc_cell, openmc.Cell)
cell_id = openmc_cell.id
# If this Cell was already created, use it
if cell_id in OPENMOC_CELLS:
return OPENMOC_CELLS[cell_id]
# Create an OpenMOC Cell to represent this OpenMC Cell
name = openmc_cell.name
openmoc_cell = openmoc.Cell(cell_id, name)
fill = openmc_cell.fill
if openmc_cell.fill_type == 'material':
openmoc_cell.setFill(get_openmoc_material(fill))
elif openmc_cell.fill_type == 'universe':
openmoc_cell.setFill(get_openmoc_universe(fill))
else:
openmoc_cell.setFill(get_openmoc_lattice(fill))
if openmc_cell.rotation is not None:
rotation = np.asarray(openmc_cell.rotation, dtype=np.float64)
openmoc_cell.setRotation(rotation)
if openmc_cell.translation is not None:
translation = np.asarray(openmc_cell.translation, dtype=np.float64)
openmoc_cell.setTranslation(translation)
# Convert OpenMC's cell region to an equivalent OpenMOC region
if openmc_cell.region is not None:
openmoc_cell.setRegion(get_openmoc_region(openmc_cell.region))
# Add the OpenMC Cell to the global collection of all OpenMC Cells
OPENMC_CELLS[cell_id] = openmc_cell
# Add the OpenMOC Cell to the global collection of all OpenMOC Cells
OPENMOC_CELLS[cell_id] = openmoc_cell
return openmoc_cell
def get_openmoc_region(openmc_region):
"""Return an OpenMOC region corresponding to an OpenMC region.
Parameters
----------
openmc_region : openmc.Region
OpenMC region
Returns
-------
openmoc_region : openmoc.Region
Equivalent OpenMOC region
"""
cv.check_type('openmc_region', openmc_region, openmc.Region)
# Recursively instantiate a region of the appropriate type
if isinstance(openmc_region, openmc.Halfspace):
surface = openmc_region.surface
halfspace = -1 if openmc_region.side == '-' else 1
openmoc_region = \
openmoc.Halfspace(halfspace, get_openmoc_surface(surface))
elif isinstance(openmc_region, openmc.Intersection):
openmoc_region = openmoc.Intersection()
for openmc_node in openmc_region:
openmoc_region.addNode(get_openmoc_region(openmc_node))
elif isinstance(openmc_region, openmc.Union):
openmoc_region = openmoc.Union()
for openmc_node in openmc_region:
openmoc_region.addNode(get_openmoc_region(openmc_node))
elif isinstance(openmc_region, openmc.Complement):
openmoc_region = openmoc.Complement()
openmoc_region.addNode(get_openmoc_region(openmc_region.node))
return openmoc_region
def get_openmc_region(openmoc_region):
"""Return an OpenMC region corresponding to an OpenMOC region.
Parameters
----------
openmoc_region : openmoc.Region
OpenMOC region
Returns
-------
openmc_region : openmc.Region
Equivalent OpenMC region
"""
cv.check_type('openmoc_region', openmoc_region, openmoc.Region)
# Recursively instantiate a region of the appropriate type
if openmoc_region.getRegionType() == openmoc.HALFSPACE:
openmoc_region = openmoc.castRegionToHalfspace(openmoc_region)
surface = get_openmc_surface(openmoc_region.getSurface())
side = '-' if openmoc_region.getHalfspace() == -1 else '+'
openmc_region = openmc.Halfspace(surface, side)
elif openmoc_region.getRegionType() == openmoc.INTERSECTION:
openmc_region = openmc.Intersection([])
for openmoc_node in openmoc_region.getNodes():
openmc_node = get_openmc_region(openmoc_node)
openmc_region.append(openmc_node)
elif openmoc_region.getRegionType() == openmoc.UNION:
openmc_region = openmc.Union([])
for openmoc_node in openmoc_region.getNodes():
openmc_node = get_openmc_region(openmoc_node)
openmc_region.append(openmc_node)
elif openmoc_region.getRegionType() == openmoc.COMPLEMENT:
openmoc_nodes = openmoc_region.getNodes()
openmc_node = get_openmc_region(openmoc_nodes[0])
openmc_region = openmc.Complement(openmc_node)
return openmc_region
def get_openmc_cell(openmoc_cell):
"""Return an OpenMC cell corresponding to an OpenMOC cell.
Parameters
----------
openmoc_cell : openmoc.Cell
OpenMOC cell
Returns
-------
openmc_cell : openmc.Cell
Equivalent OpenMC cell
"""
cv.check_type('openmoc_cell', openmoc_cell, openmoc.Cell)
cell_id = openmoc_cell.getId()
# If this Cell was already created, use it
if cell_id in OPENMC_CELLS:
return OPENMC_CELLS[cell_id]
# Create an OpenMOC Cell to represent this OpenMC Cell
name = openmoc_cell.getName()
openmc_cell = openmc.Cell(cell_id, name)
if openmoc_cell.getType() == openmoc.MATERIAL:
fill = openmoc_cell.getFillMaterial()
openmc_cell.fill = get_openmc_material(fill)
elif openmoc_cell.getType() == openmoc.FILL:
fill = openmoc_cell.getFillUniverse()
if fill.getType() == openmoc.LATTICE:
fill = openmoc.castUniverseToLattice(fill)
openmc_cell.fill = get_openmc_lattice(fill)
else:
openmc_cell.fill = get_openmc_universe(fill)
if openmoc_cell.isRotated():
# get rotation for each of 3 axes
rotation = openmoc_cell.retrieveRotation(3)
openmc_cell.rotation = rotation
if openmoc_cell.isTranslated():
# get translation for each of 3 axes
translation = openmoc_cell.retrieveTranslation(3)
openmc_cell.translation = translation
# Convert OpenMC's cell region to an equivalent OpenMOC region
openmoc_region = openmoc_cell.getRegion()
if openmoc_region is not None:
openmc_cell.region = get_openmc_region(openmoc_region)
# Add the OpenMC Cell to the global collection of all OpenMC Cells
OPENMC_CELLS[cell_id] = openmc_cell
# Add the OpenMOC Cell to the global collection of all OpenMOC Cells
OPENMOC_CELLS[cell_id] = openmoc_cell
return openmc_cell
def get_openmoc_universe(openmc_universe):
"""Return an OpenMOC universe corresponding to an OpenMC universe.
Parameters
----------
openmc_universe : openmc.Universe
OpenMC universe
Returns
-------
openmoc_universe : openmoc.Universe
Equivalent OpenMOC universe
"""
cv.check_type('openmc_universe', openmc_universe, openmc.Universe)
universe_id = openmc_universe.id
# If this Universe was already created, use it
if universe_id in OPENMOC_UNIVERSES:
return OPENMOC_UNIVERSES[universe_id]
# Create an OpenMOC Universe to represent this OpenMC Universe
name = openmc_universe.name
openmoc_universe = openmoc.Universe(universe_id, name)
# Convert all OpenMC Cells in this Universe to OpenMOC Cells
openmc_cells = openmc_universe.cells
for openmc_cell in openmc_cells.values():
openmoc_cell = get_openmoc_cell(openmc_cell)
openmoc_universe.addCell(openmoc_cell)
# Add the OpenMC Universe to the global collection of all OpenMC Universes
OPENMC_UNIVERSES[universe_id] = openmc_universe
# Add the OpenMOC Universe to the global collection of all OpenMOC Universes
OPENMOC_UNIVERSES[universe_id] = openmoc_universe
return openmoc_universe
def get_openmc_universe(openmoc_universe):
"""Return an OpenMC universe corresponding to an OpenMOC universe.
Parameters
----------
openmoc_universe : openmoc.Universe
OpenMOC universe
Returns
-------
openmc_universe : openmc.Universe
Equivalent OpenMC universe
"""
cv.check_type('openmoc_universe', openmoc_universe, openmoc.Universe)
universe_id = openmoc_universe.getId()
# If this Universe was already created, use it
if universe_id in OPENMC_UNIVERSES:
return OPENMC_UNIVERSES[universe_id]
# Create an OpenMC Universe to represent this OpenMOC Universe
name = openmoc_universe.getName()
openmc_universe = openmc.Universe(universe_id, name)
# Convert all OpenMOC Cells in this Universe to OpenMC Cells
for openmoc_cell in openmoc_universe.getCells().values():
openmc_cell = get_openmc_cell(openmoc_cell)
openmc_universe.add_cell(openmc_cell)
# Add the OpenMC Universe to the global collection of all OpenMC Universes
OPENMC_UNIVERSES[universe_id] = openmc_universe
# Add the OpenMOC Universe to the global collection of all OpenMOC Universes
OPENMOC_UNIVERSES[universe_id] = openmoc_universe
return openmc_universe
def get_openmoc_lattice(openmc_lattice):
"""Return an OpenMOC lattice corresponding to an OpenMOC lattice.
Parameters
----------
openmc_lattice : openmc.RectLattice
OpenMC lattice
Returns
-------
openmoc_lattice : openmoc.Lattice
Equivalent OpenMOC lattice
"""
cv.check_type('openmc_lattice', openmc_lattice, openmc.RectLattice)
lattice_id = openmc_lattice.id
# If this Lattice was already created, use it
if lattice_id in OPENMOC_LATTICES:
return OPENMOC_LATTICES[lattice_id]
# Create an OpenMOC Lattice to represent this OpenMC Lattice
name = openmc_lattice.name
dimension = openmc_lattice.shape
pitch = openmc_lattice.pitch
lower_left = openmc_lattice.lower_left
universes = openmc_lattice.universes
# Convert 2D dimension to 3D for OpenMOC
if len(dimension) == 2:
new_dimension = np.ones(3, dtype=int)
new_dimension[:2] = dimension
dimension = new_dimension
# Convert 2D pitch to 3D for OpenMOC
if len(pitch) == 2:
new_pitch = np.ones(3, dtype=np.float64) * np.finfo(np.float64).max
new_pitch[:2] = pitch
pitch = new_pitch
# Convert 2D lower left to 3D for OpenMOC
if len(lower_left) == 2:
new_lower_left = np.ones(3, dtype=np.float64)
new_lower_left *= np.finfo(np.float64).min / 2.
new_lower_left[:2] = lower_left
lower_left = new_lower_left
# Convert 2D universes array to 3D for OpenMOC
if len(universes.shape) == 2:
new_universes = universes.copy()
new_universes.shape = (1,) + universes.shape
universes = new_universes
# Initialize an empty array for the OpenMOC nested Universes in this Lattice
universe_array = np.ndarray(tuple(dimension[::-1]), dtype=openmoc.Universe)
# Create OpenMOC Universes for each unique nested Universe in this Lattice
unique_universes = openmc_lattice.get_unique_universes()
for universe_id, universe in unique_universes.items():
unique_universes[universe_id] = get_openmoc_universe(universe)
# Build the nested Universe array
for z in range(dimension[2]):
for y in range(dimension[1]):
for x in range(dimension[0]):
universe_id = universes[z][y][x].id
universe_array[z][y][x] = unique_universes[universe_id]
openmoc_lattice = openmoc.Lattice(lattice_id, name)
openmoc_lattice.setWidth(pitch[0], pitch[1], pitch[2])
openmoc_lattice.setUniverses(universe_array.tolist())
offset = np.array(lower_left, dtype=np.float64) - \
((np.array(pitch, dtype=np.float64) *
np.array(dimension, dtype=np.float64))) / -2.0
openmoc_lattice.setOffset(offset[0], offset[1], offset[2])
# Add the OpenMC Lattice to the global collection of all OpenMC Lattices
OPENMC_LATTICES[lattice_id] = openmc_lattice
# Add the OpenMOC Lattice to the global collection of all OpenMOC Lattices
OPENMOC_LATTICES[lattice_id] = openmoc_lattice
return openmoc_lattice
def get_openmc_lattice(openmoc_lattice):
"""Return an OpenMC lattice corresponding to an OpenMOC lattice.
Parameters
----------
openmoc_lattice : openmoc.Lattice
OpenMOC lattice
Returns
-------
openmc_lattice : openmc.RectLattice
Equivalent OpenMC lattice
"""
cv.check_type('openmoc_lattice', openmoc_lattice, openmoc.Lattice)
lattice_id = openmoc_lattice.getId()
# If this Lattice was already created, use it
if lattice_id in OPENMC_LATTICES:
return OPENMC_LATTICES[lattice_id]
name = openmoc_lattice.getName()
dimension = [openmoc_lattice.getNumX(),
openmoc_lattice.getNumY(),
openmoc_lattice.getNumZ()]
width = [openmoc_lattice.getWidthX(),
openmoc_lattice.getWidthY(),
openmoc_lattice.getWidthZ()]
offset = openmoc_lattice.getOffset()
offset = [offset.getX(), offset.getY(), offset.getZ()]
lower_left = np.array(offset, dtype=np.float64) + \
((np.array(width, dtype=np.float64) *
np.array(dimension, dtype=np.float64))) / -2.0
# Initialize an empty array for the OpenMOC nested Universes in this Lattice
universe_array = np.ndarray(tuple(np.array(dimension)),
dtype=openmoc.Universe)
# Create OpenMOC Universes for each unique nested Universe in this Lattice
unique_universes = openmoc_lattice.getUniqueUniverses()
for universe_id, universe in unique_universes.items():
unique_universes[universe_id] = get_openmc_universe(universe)
# Build the nested Universe array
for x in range(dimension[0]):
for y in range(dimension[1]):
for z in range(dimension[2]):
universe = openmoc_lattice.getUniverse(x, y, z)
universe_id = universe.getId()
universe_array[x][y][z] = \
unique_universes[universe_id]
universe_array = np.swapaxes(universe_array, 0, 2)
# Convert axially infinite 3D OpenMOC lattice to a 2D OpenMC lattice
if width[2] == np.finfo(np.float64).max:
dimension = dimension[:2]
width = width[:2]
offset = offset[:2]
lower_left = lower_left[:2]
universe_array = np.squeeze(universe_array, 2)
openmc_lattice = openmc.RectLattice(lattice_id=lattice_id, name=name)
openmc_lattice.pitch = width
openmc_lattice.lower_left = lower_left
openmc_lattice.universes = universe_array
# Add the OpenMC Lattice to the global collection of all OpenMC Lattices
OPENMC_LATTICES[lattice_id] = openmc_lattice
# Add the OpenMOC Lattice to the global collection of all OpenMOC Lattices
OPENMOC_LATTICES[lattice_id] = openmoc_lattice
return openmc_lattice
def get_openmoc_geometry(openmc_geometry):
"""Return an OpenMC geometry corresponding to an OpenMOC geometry.
Parameters
----------
openmc_geometry : openmc.Geometry
OpenMC geometry
Returns
-------
openmoc_geometry : openmoc.Geometry
Equivalent OpenMOC geometry
"""
cv.check_type('openmc_geometry', openmc_geometry, openmc.Geometry)
# Clear dictionaries and auto-generated IDs
OPENMC_SURFACES.clear()
OPENMOC_SURFACES.clear()
OPENMC_CELLS.clear()
OPENMOC_CELLS.clear()
OPENMC_UNIVERSES.clear()
OPENMOC_UNIVERSES.clear()
OPENMC_LATTICES.clear()
OPENMOC_LATTICES.clear()
openmc_root_universe = openmc_geometry.root_universe
openmoc_root_universe = get_openmoc_universe(openmc_root_universe)
openmoc_geometry = openmoc.Geometry()
openmoc_geometry.setRootUniverse(openmoc_root_universe)
# Update OpenMOC's auto-generated object IDs (e.g., Surface, Material)
# with the maximum of those created from the OpenMC objects
all_materials = openmoc_geometry.getAllMaterials()
all_surfaces = openmoc_geometry.getAllSurfaces()
all_cells = openmoc_geometry.getAllCells()
all_universes = openmoc_geometry.getAllUniverses()
max_material_id = max(all_materials.keys())
max_surface_id = max(all_surfaces.keys())
max_cell_id = max(all_cells.keys())
max_universe_id = max(all_universes.keys())
openmoc.maximize_material_id(max_material_id+1)
openmoc.maximize_surface_id(max_surface_id+1)
openmoc.maximize_cell_id(max_cell_id+1)
openmoc.maximize_universe_id(max_universe_id+1)
return openmoc_geometry
def get_openmc_geometry(openmoc_geometry):
"""Return an OpenMC geometry corresponding to an OpenMOC geometry.
Parameters
----------
openmoc_geometry : openmoc.Geometry
OpenMOC geometry
Returns
-------
openmc_geometry : openmc.Geometry
Equivalent OpenMC geometry
"""
cv.check_type('openmoc_geometry', openmoc_geometry, openmoc.Geometry)
# Clear dictionaries and auto-generated ID
OPENMC_SURFACES.clear()
OPENMOC_SURFACES.clear()
OPENMC_CELLS.clear()
OPENMOC_CELLS.clear()
OPENMC_UNIVERSES.clear()
OPENMOC_UNIVERSES.clear()
OPENMC_LATTICES.clear()
OPENMOC_LATTICES.clear()
openmoc_root_universe = openmoc_geometry.getRootUniverse()
openmc_root_universe = get_openmc_universe(openmoc_root_universe)
openmc_geometry = openmc.Geometry()
openmc_geometry.root_universe = openmc_root_universe
return openmc_geometry
|
|
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/lib/colors.py
__version__=''' $Id$ '''
__doc__='''Defines standard colour-handling classes and colour names.
We define standard classes to hold colours in two models: RGB and CMYK.
These can be constructed from several popular formats. We also include
- pre-built colour objects for the HTML standard colours
- pre-built colours used in ReportLab's branding
- various conversion and construction functions
'''
import string, math
from types import StringType, ListType, TupleType
from reportlab.lib.utils import fp_str
_SeqTypes = (ListType,TupleType)
class Color:
"""This class is used to represent color. Components red, green, blue
are in the range 0 (dark) to 1 (full intensity)."""
def __init__(self, red=0, green=0, blue=0):
"Initialize with red, green, blue in range [0-1]."
self.red, self.green, self.blue = red,green,blue
def __repr__(self):
return "Color(%s)" % string.replace(fp_str(self.red, self.green, self.blue),' ',',')
def __hash__(self):
return hash( (self.red, self.green, self.blue) )
def __cmp__(self,other):
try:
dsum = 4*self.red-4*other.red + 2*self.green-2*other.green + self.blue-other.blue
except:
return -1
if dsum > 0: return 1
if dsum < 0: return -1
return 0
def rgb(self):
"Returns a three-tuple of components"
return (self.red, self.green, self.blue)
def bitmap_rgb(self):
return tuple(map(lambda x: int(x*255)&255, self.rgb()))
def hexval(self):
return '0x%02x%02x%02x' % self.bitmap_rgb()
class CMYKColor(Color):
"""This represents colors using the CMYK (cyan, magenta, yellow, black)
model commonly used in professional printing. This is implemented
as a derived class so that renderers which only know about RGB "see it"
as an RGB color through its 'red','green' and 'blue' attributes, according
to an approximate function.
The RGB approximation is worked out when the object in constructed, so
the color attributes should not be changed afterwards.
Extra attributes may be attached to the class to support specific ink models,
and renderers may look for these."""
def __init__(self, cyan=0, magenta=0, yellow=0, black=0,
spotName=None, density=1, knockout=None):
"""
Initialize with four colors in range [0-1]. the optional
spotName, density & knockout may be of use to specific renderers.
spotName is intended for use as an identifier to the renderer not client programs.
density is used to modify the overall amount of ink.
knockout is a renderer dependent option that determines whether the applied colour
knocksout (removes) existing colour; None means use the global default.
"""
self.cyan = cyan
self.magenta = magenta
self.yellow = yellow
self.black = black
self.spotName = spotName
self.density = max(min(density,1),0) # force into right range
self.knockout = knockout
# now work out the RGB approximation. override
self.red, self.green, self.blue = cmyk2rgb( (cyan, magenta, yellow, black) )
if density<1:
#density adjustment of rgb approximants, effectively mix with white
r, g, b = self.red, self.green, self.blue
r = density*(r-1)+1
g = density*(g-1)+1
b = density*(b-1)+1
self.red, self.green, self.blue = (r,g,b)
def __repr__(self):
return "CMYKColor(%s%s%s%s)" % (
string.replace(fp_str(self.cyan, self.magenta, self.yellow, self.black),' ',','),
(self.spotName and (',spotName='+repr(self.spotName)) or ''),
(self.density!=1 and (',density='+fp_str(self.density)) or ''),
(self.knockout is not None and (',knockout=%d' % self.knockout) or ''),
)
def __hash__(self):
return hash( (self.cyan, self.magenta, self.yellow, self.black, self.density, self.spotName) )
def __cmp__(self,other):
"""Partial ordering of colors according to a notion of distance.
Comparing across the two color models is of limited use."""
# why the try-except? What can go wrong?
if isinstance(other, CMYKColor):
dsum = (((( (self.cyan-other.cyan)*2 +
(self.magenta-other.magenta))*2+
(self.yellow-other.yellow))*2+
(self.black-other.black))*2+
(self.density-other.density))*2 + cmp(self.spotName or '',other.spotName or '')
else: # do the RGB comparison
try:
dsum = ((self.red-other.red)*2+(self.green-other.green))*2+(self.blue-other.blue)
except: # or just return 'not equal' if not a color
return -1
if dsum >= 0:
return dsum>0
else:
return -1
def cmyk(self):
"Returns a tuple of four color components - syntactic sugar"
return (self.cyan, self.magenta, self.yellow, self.black)
def _density_str(self):
return fp_str(self.density)
class PCMYKColor(CMYKColor):
'''100 based CMYKColor with density and a spotName; just like Rimas uses'''
def __init__(self,cyan,magenta,yellow,black,density=100,spotName=None,knockout=None):
CMYKColor.__init__(self,cyan/100.,magenta/100.,yellow/100.,black/100.,spotName,density/100.,knockout=knockout)
def __repr__(self):
return "PCMYKColor(%s%s%s%s)" % (
string.replace(fp_str(self.cyan*100, self.magenta*100, self.yellow*100, self.black*100),' ',','),
(self.spotName and (',spotName='+repr(self.spotName)) or ''),
(self.density!=1 and (',density='+fp_str(self.density*100)) or ''),
(self.knockout is not None and (',knockout=%d' % self.knockout) or ''),
)
def cmyk2rgb((c,m,y,k),density=1):
"Convert from a CMYK color tuple to an RGB color tuple"
# From the Adobe Postscript Ref. Manual 2nd ed.
r = 1.0 - min(1.0, c + k)
g = 1.0 - min(1.0, m + k)
b = 1.0 - min(1.0, y + k)
return (r,g,b)
def rgb2cmyk(r,g,b):
'''one way to get cmyk from rgb'''
c = 1 - r
m = 1 - g
y = 1 - b
k = min(c,m,y)
c = min(1,max(0,c-k))
m = min(1,max(0,m-k))
y = min(1,max(0,y-k))
k = min(1,max(0,k))
return (c,m,y,k)
def color2bw(colorRGB):
"Transform an RGB color to a black and white equivalent."
col = colorRGB
r, g, b = col.red, col.green, col.blue
n = (r + g + b) / 3.0
bwColorRGB = Color(n, n, n)
return bwColorRGB
def HexColor(val, htmlOnly=False):
"""This function converts a hex string, or an actual integer number,
into the corresponding color. E.g., in "#AABBCC" or 0xAABBCC,
AA is the red, BB is the green, and CC is the blue (00-FF).
For completeness I assume that #aabbcc or 0xaabbcc are hex numbers
otherwise a pure integer is converted as decimal rgb. If htmlOnly is true,
only the #aabbcc form is allowed.
>>> HexColor('#ffffff')
Color(1,1,1)
>>> HexColor('#FFFFFF')
Color(1,1,1)
>>> HexColor('0xffffff')
Color(1,1,1)
>>> HexColor('16777215')
Color(1,1,1)
An '0x' or '#' prefix is required for hex (as opposed to decimal):
>>> HexColor('ffffff')
Traceback (most recent call last):
ValueError: invalid literal for int(): ffffff
>>> HexColor('#FFFFFF', htmlOnly=True)
Color(1,1,1)
>>> HexColor('0xffffff', htmlOnly=True)
Traceback (most recent call last):
ValueError: not a hex string
>>> HexColor('16777215', htmlOnly=True)
Traceback (most recent call last):
ValueError: not a hex string
""" #" for emacs
if type(val) == StringType:
b = 10
if val[:1] == '#':
val = val[1:]
b = 16
else:
if htmlOnly:
raise ValueError('not a hex string')
if string.lower(val[:2]) == '0x':
b = 16
val = val[2:]
val = string.atoi(val,b)
return Color(((val>>16)&0xFF)/255.0,((val>>8)&0xFF)/255.0,(val&0xFF)/255.0)
def linearlyInterpolatedColor(c0, c1, x0, x1, x):
"""
Linearly interpolates colors. Can handle RGB, CMYK and PCMYK
colors - give ValueError if colours aren't the same.
Doesn't currently handle 'Spot Color Interpolation'.
"""
if c0.__class__ != c1.__class__:
raise ValueError, "Color classes must be the same for interpolation!"
if x1<x0:
x0,x1,c0,c1 = x1,x0,c1,c0 # normalized so x1>x0
if x<x0-1e-8 or x>x1+1e-8: # fudge factor for numerical problems
raise ValueError, "Can't interpolate: x=%f is not between %f and %f!" % (x,x0,x1)
if x<=x0:
return c0
elif x>=x1:
return c1
cname = c0.__class__.__name__
dx = float(x1-x0)
x = x-x0
if cname == 'Color': # RGB
r = c0.red+x*(c1.red - c0.red)/dx
g = c0.green+x*(c1.green- c0.green)/dx
b = c0.blue+x*(c1.blue - c0.blue)/dx
return Color(r,g,b)
elif cname == 'CMYKColor':
c = c0.cyan+x*(c1.cyan - c0.cyan)/dx
m = c0.magenta+x*(c1.magenta - c0.magenta)/dx
y = c0.yellow+x*(c1.yellow - c0.yellow)/dx
k = c0.black+x*(c1.black - c0.black)/dx
d = c0.density+x*(c1.density - c0.density)/dx
return CMYKColor(c,m,y,k, density=d)
elif cname == 'PCMYKColor':
if cmykDistance(c0,c1)<1e-8:
#colors same do density and preserve spotName if any
assert c0.spotName == c1.spotName, "Identical cmyk, but different spotName"
c = c0.cyan
m = c0.magenta
y = c0.yellow
k = c0.black
d = c0.density+x*(c1.density - c0.density)/dx
return PCMYKColor(c*100,m*100,y*100,k*100, density=d*100, spotName=c0.spotName)
elif cmykDistance(c0,_CMYK_white)<1e-8:
#special c0 is white
c = c1.cyan
m = c1.magenta
y = c1.yellow
k = c1.black
d = x*c1.density/dx
return PCMYKColor(c*100,m*100,y*100,k*100, density=d*100, spotName=c1.spotName)
elif cmykDistance(c1,_CMYK_white)<1e-8:
#special c1 is white
c = c0.cyan
m = c0.magenta
y = c0.yellow
k = c0.black
d = x*c0.density/dx
d = c0.density*(1-x/dx)
return PCMYKColor(c*100,m*100,y*100,k*100, density=d*100, spotName=c0.spotName)
else:
c = c0.cyan+x*(c1.cyan - c0.cyan)/dx
m = c0.magenta+x*(c1.magenta - c0.magenta)/dx
y = c0.yellow+x*(c1.yellow - c0.yellow)/dx
k = c0.black+x*(c1.black - c0.black)/dx
d = c0.density+x*(c1.density - c0.density)/dx
return PCMYKColor(c*100,m*100,y*100,k*100, density=d*100)
else:
raise ValueError, "Can't interpolate: Unknown color class %s!" % cname
# special case -- indicates no drawing should be done
# this is a hangover from PIDDLE - suggest we ditch it since it is not used anywhere
#transparent = Color(-1, -1, -1)
_CMYK_white=CMYKColor(0,0,0,0)
_PCMYK_white=PCMYKColor(0,0,0,0)
_CMYK_black=CMYKColor(0,0,0,1)
_PCMYK_black=PCMYKColor(0,0,0,100)
# Special colors
ReportLabBlueOLD = HexColor(0x4e5688)
ReportLabBlue = HexColor(0x00337f)
ReportLabBluePCMYK = PCMYKColor(100,65,0,30,spotName='Pantone 288U')
ReportLabLightBlue = HexColor(0xb7b9d3)
ReportLabFidBlue=HexColor(0x3366cc)
ReportLabFidRed=HexColor(0xcc0033)
ReportLabGreen = HexColor(0x336600)
ReportLabLightGreen = HexColor(0x339933)
# color constants -- mostly from HTML standard
aliceblue = HexColor(0xF0F8FF)
antiquewhite = HexColor(0xFAEBD7)
aqua = HexColor(0x00FFFF)
aquamarine = HexColor(0x7FFFD4)
azure = HexColor(0xF0FFFF)
beige = HexColor(0xF5F5DC)
bisque = HexColor(0xFFE4C4)
black = HexColor(0x000000)
blanchedalmond = HexColor(0xFFEBCD)
blue = HexColor(0x0000FF)
blueviolet = HexColor(0x8A2BE2)
brown = HexColor(0xA52A2A)
burlywood = HexColor(0xDEB887)
cadetblue = HexColor(0x5F9EA0)
chartreuse = HexColor(0x7FFF00)
chocolate = HexColor(0xD2691E)
coral = HexColor(0xFF7F50)
cornflowerblue = cornflower = HexColor(0x6495ED)
cornsilk = HexColor(0xFFF8DC)
crimson = HexColor(0xDC143C)
cyan = HexColor(0x00FFFF)
darkblue = HexColor(0x00008B)
darkcyan = HexColor(0x008B8B)
darkgoldenrod = HexColor(0xB8860B)
darkgray = HexColor(0xA9A9A9)
darkgrey = darkgray
darkgreen = HexColor(0x006400)
darkkhaki = HexColor(0xBDB76B)
darkmagenta = HexColor(0x8B008B)
darkolivegreen = HexColor(0x556B2F)
darkorange = HexColor(0xFF8C00)
darkorchid = HexColor(0x9932CC)
darkred = HexColor(0x8B0000)
darksalmon = HexColor(0xE9967A)
darkseagreen = HexColor(0x8FBC8B)
darkslateblue = HexColor(0x483D8B)
darkslategray = HexColor(0x2F4F4F)
darkslategrey = darkslategray
darkturquoise = HexColor(0x00CED1)
darkviolet = HexColor(0x9400D3)
deeppink = HexColor(0xFF1493)
deepskyblue = HexColor(0x00BFFF)
dimgray = HexColor(0x696969)
dimgrey = dimgray
dodgerblue = HexColor(0x1E90FF)
firebrick = HexColor(0xB22222)
floralwhite = HexColor(0xFFFAF0)
forestgreen = HexColor(0x228B22)
fuchsia = HexColor(0xFF00FF)
gainsboro = HexColor(0xDCDCDC)
ghostwhite = HexColor(0xF8F8FF)
gold = HexColor(0xFFD700)
goldenrod = HexColor(0xDAA520)
gray = HexColor(0x808080)
grey = gray
green = HexColor(0x008000)
greenyellow = HexColor(0xADFF2F)
honeydew = HexColor(0xF0FFF0)
hotpink = HexColor(0xFF69B4)
indianred = HexColor(0xCD5C5C)
indigo = HexColor(0x4B0082)
ivory = HexColor(0xFFFFF0)
khaki = HexColor(0xF0E68C)
lavender = HexColor(0xE6E6FA)
lavenderblush = HexColor(0xFFF0F5)
lawngreen = HexColor(0x7CFC00)
lemonchiffon = HexColor(0xFFFACD)
lightblue = HexColor(0xADD8E6)
lightcoral = HexColor(0xF08080)
lightcyan = HexColor(0xE0FFFF)
lightgoldenrodyellow = HexColor(0xFAFAD2)
lightgreen = HexColor(0x90EE90)
lightgrey = HexColor(0xD3D3D3)
lightpink = HexColor(0xFFB6C1)
lightsalmon = HexColor(0xFFA07A)
lightseagreen = HexColor(0x20B2AA)
lightskyblue = HexColor(0x87CEFA)
lightslategray = HexColor(0x778899)
lightslategrey = lightslategray
lightsteelblue = HexColor(0xB0C4DE)
lightyellow = HexColor(0xFFFFE0)
lime = HexColor(0x00FF00)
limegreen = HexColor(0x32CD32)
linen = HexColor(0xFAF0E6)
magenta = HexColor(0xFF00FF)
maroon = HexColor(0x800000)
mediumaquamarine = HexColor(0x66CDAA)
mediumblue = HexColor(0x0000CD)
mediumorchid = HexColor(0xBA55D3)
mediumpurple = HexColor(0x9370DB)
mediumseagreen = HexColor(0x3CB371)
mediumslateblue = HexColor(0x7B68EE)
mediumspringgreen = HexColor(0x00FA9A)
mediumturquoise = HexColor(0x48D1CC)
mediumvioletred = HexColor(0xC71585)
midnightblue = HexColor(0x191970)
mintcream = HexColor(0xF5FFFA)
mistyrose = HexColor(0xFFE4E1)
moccasin = HexColor(0xFFE4B5)
navajowhite = HexColor(0xFFDEAD)
navy = HexColor(0x000080)
oldlace = HexColor(0xFDF5E6)
olive = HexColor(0x808000)
olivedrab = HexColor(0x6B8E23)
orange = HexColor(0xFFA500)
orangered = HexColor(0xFF4500)
orchid = HexColor(0xDA70D6)
palegoldenrod = HexColor(0xEEE8AA)
palegreen = HexColor(0x98FB98)
paleturquoise = HexColor(0xAFEEEE)
palevioletred = HexColor(0xDB7093)
papayawhip = HexColor(0xFFEFD5)
peachpuff = HexColor(0xFFDAB9)
peru = HexColor(0xCD853F)
pink = HexColor(0xFFC0CB)
plum = HexColor(0xDDA0DD)
powderblue = HexColor(0xB0E0E6)
purple = HexColor(0x800080)
red = HexColor(0xFF0000)
rosybrown = HexColor(0xBC8F8F)
royalblue = HexColor(0x4169E1)
saddlebrown = HexColor(0x8B4513)
salmon = HexColor(0xFA8072)
sandybrown = HexColor(0xF4A460)
seagreen = HexColor(0x2E8B57)
seashell = HexColor(0xFFF5EE)
sienna = HexColor(0xA0522D)
silver = HexColor(0xC0C0C0)
skyblue = HexColor(0x87CEEB)
slateblue = HexColor(0x6A5ACD)
slategray = HexColor(0x708090)
slategrey = slategray
snow = HexColor(0xFFFAFA)
springgreen = HexColor(0x00FF7F)
steelblue = HexColor(0x4682B4)
tan = HexColor(0xD2B48C)
teal = HexColor(0x008080)
thistle = HexColor(0xD8BFD8)
tomato = HexColor(0xFF6347)
turquoise = HexColor(0x40E0D0)
violet = HexColor(0xEE82EE)
wheat = HexColor(0xF5DEB3)
white = HexColor(0xFFFFFF)
whitesmoke = HexColor(0xF5F5F5)
yellow = HexColor(0xFFFF00)
yellowgreen = HexColor(0x9ACD32)
fidblue=HexColor(0x3366cc)
fidred=HexColor(0xcc0033)
fidlightblue=HexColor("#d6e0f5")
ColorType=type(black)
################################################################
#
# Helper functions for dealing with colors. These tell you
# which are predefined, so you can print color charts;
# and can give the nearest match to an arbitrary color object
#
#################################################################
def colorDistance(col1, col2):
"""Returns a number between 0 and root(3) stating how similar
two colours are - distance in r,g,b, space. Only used to find
names for things."""
return math.sqrt(
(col1.red - col2.red)**2 +
(col1.green - col2.green)**2 +
(col1.blue - col2.blue)**2
)
def cmykDistance(col1, col2):
"""Returns a number between 0 and root(4) stating how similar
two colours are - distance in r,g,b, space. Only used to find
names for things."""
return math.sqrt(
(col1.cyan - col2.cyan)**2 +
(col1.magenta - col2.magenta)**2 +
(col1.yellow - col2.yellow)**2 +
(col1.black - col2.black)**2
)
_namedColors = None
def getAllNamedColors():
#returns a dictionary of all the named ones in the module
# uses a singleton for efficiency
global _namedColors
if _namedColors is not None: return _namedColors
import colors
_namedColors = {}
for (name, value) in colors.__dict__.items():
if isinstance(value, Color):
_namedColors[name] = value
return _namedColors
def describe(aColor,mode=0):
'''finds nearest colour match to aColor.
mode=0 print a string desription
mode=1 return a string description
mode=2 return (distance, colorName)
'''
namedColors = getAllNamedColors()
closest = (10, None, None) #big number, name, color
for (name, color) in namedColors.items():
distance = colorDistance(aColor, color)
if distance < closest[0]:
closest = (distance, name, color)
if mode<=1:
s = 'best match is %s, distance %0.4f' % (closest[1], closest[0])
if mode==0: print s
else: return s
elif mode==2:
return (closest[1], closest[0])
else:
raise ValueError, "Illegal value for mode "+str(mode)
def toColor(arg,default=None):
'''try to map an arbitrary arg to a color instance'''
if isinstance(arg,Color): return arg
tArg = type(arg)
if tArg in _SeqTypes:
assert 3<=len(arg)<=4, 'Can only convert 3 and 4 sequences to color'
assert 0<=min(arg) and max(arg)<=1
return len(arg)==3 and Color(arg[0],arg[1],arg[2]) or CMYKColor(arg[0],arg[1],arg[2],arg[3])
elif tArg == StringType:
C = getAllNamedColors()
s = string.lower(arg)
if C.has_key(s): return C[s]
try:
return toColor(eval(arg))
except:
pass
try:
return HexColor(arg)
except:
if default is None:
raise ValueError('Invalid color value %r' % arg)
return default
def toColorOrNone(arg,default=None):
'''as above but allows None as a legal value'''
if arg is None:
return None
else:
return toColor(arg, default)
def setColors(**kw):
UNDEF = []
progress = 1
assigned = {}
while kw and progress:
progress = 0
for k, v in kw.items():
if type(v) in (type(()),type([])):
c = map(lambda x,UNDEF=UNDEF: toColor(x,UNDEF),v)
if type(v) is type(()): c = tuple(c)
ok = UNDEF not in c
else:
c = toColor(v,UNDEF)
ok = c is not UNDEF
if ok:
assigned[k] = c
del kw[k]
progress = 1
if kw: raise ValueError("Can't convert\n%s" % str(kw))
getAllNamedColors()
for k, c in assigned.items():
globals()[k] = c
if isinstance(c,Color): _namedColors[k] = c
def Whiter(c,f):
'''given a color combine with white as c*f w*(1-f) 0<=f<=1'''
c = toColor(c)
if isinstance(c,PCMYKColor):
w = _PCMYK_white
elif isinstance(c,CMYKColor): w = _CMYK_white
else: w = white
return linearlyInterpolatedColor(w, c, 0, 1, f)
def Blacker(c,f):
'''given a color combine with black as c*f+b*(1-f) 0<=f<=1'''
c = toColor(c)
if isinstance(c,PCMYKColor):
b = _PCMYK_black
elif isinstance(c,CMYKColor): b = _CMYK_black
else: b = black
return linearlyInterpolatedColor(b, c, 0, 1, f)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
|
import os
import socket
import atexit
import re
import pkg_resources
from pkg_resources import ResolutionError, ExtractionError
from setuptools.compat import urllib2
try:
import ssl
except ImportError:
ssl = None
__all__ = [
'VerifyingHTTPSHandler', 'find_ca_bundle', 'is_available', 'cert_paths',
'opener_for'
]
cert_paths = """
/etc/pki/tls/certs/ca-bundle.crt
/etc/ssl/certs/ca-certificates.crt
/usr/share/ssl/certs/ca-bundle.crt
/usr/local/share/certs/ca-root.crt
/etc/ssl/cert.pem
/System/Library/OpenSSL/certs/cert.pem
""".strip().split()
HTTPSHandler = HTTPSConnection = object
for what, where in (
('HTTPSHandler', ['urllib2','urllib.request']),
('HTTPSConnection', ['httplib', 'http.client']),
):
for module in where:
try:
exec("from %s import %s" % (module, what))
except ImportError:
pass
is_available = ssl is not None and object not in (HTTPSHandler, HTTPSConnection)
try:
from ssl import CertificateError, match_hostname
except ImportError:
try:
from backports.ssl_match_hostname import CertificateError
from backports.ssl_match_hostname import match_hostname
except ImportError:
CertificateError = None
match_hostname = None
if not CertificateError:
class CertificateError(ValueError):
pass
if not match_hostname:
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
class VerifyingHTTPSHandler(HTTPSHandler):
"""Simple verifying handler: no auth, subclasses, timeouts, etc."""
def __init__(self, ca_bundle):
self.ca_bundle = ca_bundle
HTTPSHandler.__init__(self)
def https_open(self, req):
return self.do_open(
lambda host, **kw: VerifyingHTTPSConn(host, self.ca_bundle, **kw), req
)
class VerifyingHTTPSConn(HTTPSConnection):
"""Simple verifying connection: no auth, subclasses, timeouts, etc."""
def __init__(self, host, ca_bundle, **kw):
HTTPSConnection.__init__(self, host, **kw)
self.ca_bundle = ca_bundle
def connect(self):
sock = socket.create_connection(
(self.host, self.port), getattr(self, 'source_address', None)
)
# Handle the socket if a (proxy) tunnel is present
if hasattr(self, '_tunnel') and getattr(self, '_tunnel_host', None):
self.sock = sock
self._tunnel()
# http://bugs.python.org/issue7776: Python>=3.4.1 and >=2.7.7
# change self.host to mean the proxy server host when tunneling is
# being used. Adapt, since we are interested in the destination
# host for the match_hostname() comparison.
actual_host = self._tunnel_host
else:
actual_host = self.host
self.sock = ssl.wrap_socket(
sock, cert_reqs=ssl.CERT_REQUIRED, ca_certs=self.ca_bundle
)
try:
match_hostname(self.sock.getpeercert(), actual_host)
except CertificateError:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
def opener_for(ca_bundle=None):
"""Get a urlopen() replacement that uses ca_bundle for verification"""
return urllib2.build_opener(
VerifyingHTTPSHandler(ca_bundle or find_ca_bundle())
).open
_wincerts = None
def get_win_certfile():
global _wincerts
if _wincerts is not None:
return _wincerts.name
try:
from wincertstore import CertFile
except ImportError:
return None
class MyCertFile(CertFile):
def __init__(self, stores=(), certs=()):
CertFile.__init__(self)
for store in stores:
self.addstore(store)
self.addcerts(certs)
atexit.register(self.close)
_wincerts = MyCertFile(stores=['CA', 'ROOT'])
return _wincerts.name
def find_ca_bundle():
"""Return an existing CA bundle path, or None"""
if os.name=='nt':
return get_win_certfile()
else:
for cert_path in cert_paths:
if os.path.isfile(cert_path):
return cert_path
try:
return pkg_resources.resource_filename('certifi', 'cacert.pem')
except (ImportError, ResolutionError, ExtractionError):
return None
|
|
# -*- coding: utf-8 -*-
"""
|oauth2| Providers
-------------------
Providers which implement the |oauth2|_ protocol.
.. autosummary::
OAuth2
Amazon
Behance
Bitly
Cosm
DeviantART
Eventbrite
Facebook
Foursquare
GitHub
Google
LinkedIn
PayPal
Reddit
Viadeo
VK
WindowsLive
Yammer
Yandex
"""
from authomatic.six.moves.urllib.parse import urlencode
import datetime
import logging
from authomatic import providers
from authomatic.exceptions import CancellationError, FailureError, OAuth2Error
import authomatic.core as core
__all__ = ['OAuth2', 'Amazon', 'Behance', 'Bitly', 'Cosm', 'DeviantART',
'Eventbrite', 'Facebook', 'Foursquare', 'GitHub', 'Google',
'LinkedIn', 'PayPal', 'Reddit', 'Viadeo', 'VK', 'WindowsLive',
'Yammer', 'Yandex']
class OAuth2(providers.AuthorizationProvider):
"""
Base class for |oauth2|_ providers.
"""
PROVIDER_TYPE_ID = 2
TOKEN_TYPES = ['', 'Bearer']
#: A scope preset to get most of the **user** info.
#: Use it in the :doc:`config` like ``{'scope': oauth2.Facebook.user_info_scope}``.
user_info_scope = []
#: :class:`bool` If ``False``, the provider doesn't support CSRF protection.
supports_csrf_protection = True
token_request_method = 'POST' # method for requesting an access token
def __init__(self, *args, **kwargs):
"""
Accepts additional keyword arguments:
:param list scope:
List of strings specifying requested permissions as described in the
`OAuth 2.0 spec <http://tools.ietf.org/html/rfc6749#section-3.3>`_.
:param bool offline:
If ``True`` the **provider** will be set up to request an *offline access token*.
default is ``False``.
As well as those inherited from :class:`.AuthorizationProvider` constructor.
"""
super(OAuth2, self).__init__(*args, **kwargs)
self.scope = self._kwarg(kwargs, 'scope', [])
self.offline = self._kwarg(kwargs, 'offline', False)
#===========================================================================
# Internal methods
#===========================================================================
def _x_scope_parser(self, scope):
"""
Override this to handle differences between accepted format of scope across providers.
:attr list scope:
List of scopes.
"""
# Most providers accept csv scope.
return ','.join(scope) if scope else ''
@classmethod
def create_request_elements(cls, request_type, credentials, url, method='GET', params=None,
headers=None, body='', secret=None, redirect_uri='', scope='', csrf=''):
"""
Creates |oauth2| request elements.
"""
headers = headers or {}
params = params or {}
consumer_key = credentials.consumer_key or ''
consumer_secret = credentials.consumer_secret or ''
token = credentials.token or ''
refresh_token = credentials.refresh_token or credentials.token or ''
# Separate url base and query parameters.
url, base_params = cls._split_url(url)
# Add params extracted from URL.
params.update(dict(base_params))
if request_type == cls.USER_AUTHORIZATION_REQUEST_TYPE:
# User authorization request.
# TODO: Raise error for specific message for each missing argument.
if consumer_key and redirect_uri and (csrf or not cls.supports_csrf_protection):
params['client_id'] = consumer_key
params['redirect_uri'] = redirect_uri
params['scope'] = scope
params['state'] = csrf
params['response_type'] = 'code'
# Add authorization header
headers.update(cls._authorization_header(credentials))
else:
raise OAuth2Error('Credentials with valid consumer_key and arguments redirect_uri, scope and ' + \
'state are required to create OAuth 2.0 user authorization request elements!')
elif request_type == cls.ACCESS_TOKEN_REQUEST_TYPE:
# Access token request.
if consumer_key and consumer_secret:
params['code'] = token
params['client_id'] = consumer_key
params['client_secret'] = consumer_secret
params['redirect_uri'] = redirect_uri
params['grant_type'] = 'authorization_code'
# TODO: Check whether all providers accept it
headers.update(cls._authorization_header(credentials))
else:
raise OAuth2Error('Credentials with valid token, consumer_key, consumer_secret and argument ' + \
'redirect_uri are required to create OAuth 2.0 access token request elements!')
elif request_type == cls.REFRESH_TOKEN_REQUEST_TYPE:
# Refresh access token request.
if refresh_token and consumer_key and consumer_secret:
params['refresh_token'] = refresh_token
params['client_id'] = consumer_key
params['client_secret'] = consumer_secret
params['grant_type'] = 'refresh_token'
else:
raise OAuth2Error('Credentials with valid refresh_token, consumer_key, consumer_secret ' + \
'are required to create OAuth 2.0 refresh token request elements!')
elif request_type == cls.PROTECTED_RESOURCE_REQUEST_TYPE:
# Protected resource request.
# Add Authorization header. See: http://tools.ietf.org/html/rfc6749#section-7.1
if credentials.token_type == cls.BEARER:
# http://tools.ietf.org/html/rfc6750#section-2.1
headers.update({'Authorization': 'Bearer {0}'.format(credentials.token)})
elif token:
params['access_token'] = token
else:
raise OAuth2Error('Credentials with valid token are required to create ' + \
'OAuth 2.0 protected resources request elements!')
request_elements = core.RequestElements(url, method, params, headers, body)
return cls._x_request_elements_filter(request_type, request_elements, credentials)
@staticmethod
def _x_refresh_credentials_if(credentials):
"""
Override this to specify conditions when it gives sense to refresh credentials.
.. warning::
|classmethod|
:param credentials:
:class:`.Credentials`
:returns:
``True`` or ``False``
"""
if credentials.refresh_token:
return True
#===========================================================================
# Exposed methods
#===========================================================================
@classmethod
def to_tuple(cls, credentials):
return (credentials.token,
credentials.refresh_token,
credentials.expiration_time,
cls.TOKEN_TYPES.index(credentials.token_type))
@classmethod
def reconstruct(cls, deserialized_tuple, credentials, cfg):
token, refresh_token, expiration_time, token_type = deserialized_tuple
credentials.token = token
credentials.refresh_token = refresh_token
credentials.expiration_time = expiration_time
credentials.token_type=cls.TOKEN_TYPES[int(token_type)]
return credentials
def refresh_credentials(self, credentials):
"""
Refreshes :class:`.Credentials` if it gives sense.
:param credentials:
:class:`.Credentials` to be refreshed.
:returns:
:class:`.Response`.
"""
if not self._x_refresh_credentials_if(credentials):
return
# We need consumer key and secret to make this kind of request.
cfg = credentials.config.get(credentials.provider_name)
credentials.consumer_key = cfg.get('consumer_key')
credentials.consumer_secret = cfg.get('consumer_secret')
request_elements = self.create_request_elements(request_type=self.REFRESH_TOKEN_REQUEST_TYPE,
credentials=credentials,
url=self.access_token_url,
method='POST')
self._log(logging.INFO, u'Refreshing credentials.')
response = self._fetch(*request_elements)
# We no longer need consumer info.
credentials.consumer_key = None
credentials.consumer_secret = None
# Extract the refreshed data.
access_token = response.data.get('access_token')
refresh_token = response.data.get('refresh_token')
# Update credentials only if there is access token.
if access_token:
credentials.token = access_token
credentials.expire_in = response.data.get('expires_in')
# Update refresh token only if there is a new one.
if refresh_token:
credentials.refresh_token = refresh_token
# Handle different naming conventions across providers.
credentials = self._x_credentials_parser(credentials, response.data)
return response
@providers.login_decorator
def login(self):
# get request parameters from which we can determine the login phase
authorization_code = self.params.get('code')
error = self.params.get('error')
error_message = self.params.get('error_message')
state = self.params.get('state')
if authorization_code or not self.user_authorization_url:
if authorization_code:
#===================================================================
# Phase 2 after redirect with success
#===================================================================
self._log(logging.INFO, u'Continuing OAuth 2.0 authorization procedure after redirect.')
# validate CSRF token
if self.supports_csrf_protection:
self._log(logging.INFO, u'Validating request by comparing request state with stored state.')
stored_state = self._session_get('state')
if not stored_state:
raise FailureError(u'Unable to retrieve stored state!')
elif not stored_state == state:
raise FailureError(u'The returned state "{0}" doesn\'t match with the stored state!'.format(state),
url=self.user_authorization_url)
self._log(logging.INFO, u'Request is valid.')
else:
self._log(logging.WARN, u'Skipping CSRF validation!')
elif not self.user_authorization_url:
#===================================================================
# Phase 1 without user authorization redirect.
#===================================================================
self._log(logging.INFO, u'Starting OAuth 2.0 authorization procedure without ' + \
u'user authorization redirect.')
# exchange authorization code for access token by the provider
self._log(logging.INFO, u'Fetching access token from {0}.'.format(self.access_token_url))
self.credentials.token = authorization_code
request_elements = self.create_request_elements(request_type=self.ACCESS_TOKEN_REQUEST_TYPE,
credentials=self.credentials,
url=self.access_token_url,
method=self.token_request_method,
redirect_uri=self.url,
params=self.access_token_params,
headers=self.access_token_headers)
response = self._fetch(*request_elements)
self.access_token_response = response
access_token = response.data.get('access_token', '')
refresh_token = response.data.get('refresh_token', '')
if response.status != 200 or not access_token:
raise FailureError('Failed to obtain OAuth 2.0 access token from {0}! HTTP status: {1}, message: {2}.'\
.format(self.access_token_url, response.status, response.content),
original_message=response.content,
status=response.status,
url=self.access_token_url)
self._log(logging.INFO, u'Got access token.')
if refresh_token:
self._log(logging.INFO, u'Got refresh access token.')
# OAuth 2.0 credentials need access_token, refresh_token, token_type and expire_in.
self.credentials.token = access_token
self.credentials.refresh_token = refresh_token
self.credentials.expire_in = response.data.get('expires_in')
self.credentials.token_type = response.data.get('token_type', '')
# sWe don't need these two guys anymore.
self.credentials.consumer_key = ''
self.credentials.consumer_secret = ''
# update credentials
self.credentials = self._x_credentials_parser(self.credentials, response.data)
# create user
self._update_or_create_user(response.data, self.credentials)
#===================================================================
# We're done!
#===================================================================
elif error or error_message:
#===================================================================
# Phase 2 after redirect with error
#===================================================================
error_reason = self.params.get('error_reason') or error
error_description = self.params.get('error_description') \
or error_message or error
if error_reason and 'denied' in error_reason:
raise CancellationError(error_description,
url=self.user_authorization_url)
else:
raise FailureError(error_description, url=self.user_authorization_url)
elif not self.params:
#===================================================================
# Phase 1 before redirect
#===================================================================
self._log(logging.INFO, u'Starting OAuth 2.0 authorization procedure.')
csrf = ''
if self.supports_csrf_protection:
# generate csfr
csrf = self.csrf_generator(self.settings.secret)
# and store it to session
self._session_set('state', csrf)
else:
self._log(logging.WARN, u'Provider doesn\'t support CSRF validation!')
request_elements = self.create_request_elements(request_type=self.USER_AUTHORIZATION_REQUEST_TYPE,
credentials=self.credentials,
url=self.user_authorization_url,
redirect_uri=self.url,
scope=self._x_scope_parser(self.scope),
csrf=csrf,
params=self.user_authorization_params)
self._log(logging.INFO, u'Redirecting user to {0}.'.format(request_elements.full_url))
self.redirect(request_elements.full_url)
class Amazon(OAuth2):
"""
Amazon |oauth2| provider.
Thanks to `Ghufran Syed <https://github.com/ghufransyed>`__.
* Dashboard: https://developer.amazon.com/lwa/sp/overview.html
* Docs: https://developer.amazon.com/public/apis/engage/login-with-amazon/docs/conceptual_overview.html
* API reference: https://developer.amazon.com/public/apis
.. note::
Amazon only accepts **redirect_uri** with **https** schema,
Therefore the *login handler* must also be accessible through **https**.
Supported :class:`.User` properties:
* email
* id
* name
* postal_code
Unsupported :class:`.User` properties:
* birth_date
* city
* country
* first_name
* gender
* last_name
* link
* locale
* nickname
* phone
* picture
* timezone
* username
"""
user_authorization_url = 'https://www.amazon.com/ap/oa'
access_token_url = 'https://api.amazon.com/auth/o2/token'
user_info_url = 'https://api.amazon.com/user/profile'
user_info_scope = ['profile', 'postal_code']
supported_user_attributes = core.SupportedUserAttributes(
email=True,
id=True,
name=True,
postal_code=True
)
def _x_scope_parser(self, scope):
# Amazon has space-separated scopes
return ' '.join(scope)
@staticmethod
def _x_user_parser(user, data):
user.id = data.get('user_id')
return user
@classmethod
def _x_credentials_parser(cls, credentials, data):
if data.get('token_type') == 'bearer':
credentials.token_type = cls.BEARER
return credentials
class Behance(OAuth2):
"""
Behance |oauth2| provider.
.. note::
Behance doesn't support third party authorization anymore,
which renders this class pretty much useless.
* Dashboard: http://www.behance.net/dev/apps
* Docs: http://www.behance.net/dev/authentication
* API reference: http://www.behance.net/dev/api/endpoints/
"""
user_authorization_url = 'https://www.behance.net/v2/oauth/authenticate'
access_token_url = 'https://www.behance.net/v2/oauth/token'
user_info_url = ''
user_info_scope = ['activity_read']
def _x_scope_parser(self, scope):
"""
Behance has pipe-separated scopes
"""
return '|'.join(scope)
@staticmethod
def _x_user_parser(user, data):
_user = data.get('user', {})
user.id = _user.get('id')
user.first_name = _user.get('first_name')
user.last_name = _user.get('last_name')
user.username = _user.get('username')
user.city = _user.get('city')
user.country = _user.get('country')
user.link = _user.get('url')
user.name = _user.get('display_name')
user.picture = _user.get('images', {}).get('138')
return user
class Bitly(OAuth2):
"""
Bitly |oauth2| provider.
.. warning::
|no-csrf|
* Dashboard: https://bitly.com/a/oauth_apps
* Docs: http://dev.bitly.com/authentication.html
* API reference: http://dev.bitly.com/api.html
Supported :class:`.User` properties:
* id
* link
* name
* picture
* username
Unsupported :class:`.User` properties:
* birth_date
* city
* country
* email
* first_name
* gender
* last_name
* locale
* nickname
* phone
* postal_code
* timezone
"""
supported_user_attributes = core.SupportedUserAttributes(
id=True,
link=True,
name=True,
picture=True,
username=True
)
supports_csrf_protection = False
_x_use_authorization_header = False
user_authorization_url = 'https://bitly.com/oauth/authorize'
access_token_url = 'https://api-ssl.bitly.com/oauth/access_token'
user_info_url = 'https://api-ssl.bitly.com/v3/user/info'
def __init__(self, *args, **kwargs):
super(Bitly, self).__init__(*args, **kwargs)
if self.offline:
if not 'grant_type' in self.access_token_params:
self.access_token_params['grant_type'] = 'refresh_token'
@staticmethod
def _x_user_parser(user, data):
info = data.get('data', {})
user.id = info.get('login')
user.name = info.get('full_name')
user.username = info.get('display_name')
user.picture = info.get('profile_image')
user.link = info.get('profile_url')
return user
class Cosm(OAuth2):
"""
Cosm |oauth2| provider.
.. note::
Cosm doesn't provide any *user info URL*.
* Dashboard: https://cosm.com/users/{your_username}/apps
* Docs: https://cosm.com/docs/
* API reference: https://cosm.com/docs/v2/
"""
user_authorization_url = 'https://cosm.com/oauth/authenticate'
access_token_url = 'https://cosm.com/oauth/token'
user_info_url = ''
@staticmethod
def _x_user_parser(user, data):
user.id = user.username = data.get('user')
return user
class DeviantART(OAuth2):
"""
DeviantART |oauth2| provider.
* Dashboard: https://www.deviantart.com/settings/myapps
* Docs: https://www.deviantart.com/developers/authentication
* API reference: http://www.deviantart.com/developers/oauth2
Supported :class:`.User` properties:
* name
* picture
* username
Unsupported :class:`.User` properties:
* birth_date
* city
* country
* email
* first_name
* gender
* id
* last_name
* link
* locale
* nickname
* phone
* postal_code
* timezone
"""
user_authorization_url = 'https://www.deviantart.com/oauth2/draft15/authorize'
access_token_url = 'https://www.deviantart.com/oauth2/draft15/token'
user_info_url = 'https://www.deviantart.com/api/oauth2/user/whoami'
user_info_scope = ['basic']
supported_user_attributes = core.SupportedUserAttributes(
name=True,
picture=True,
username=True
)
def __init__(self, *args, **kwargs):
super(DeviantART, self).__init__(*args, **kwargs)
if self.offline:
if not 'grant_type' in self.access_token_params:
self.access_token_params['grant_type'] = 'refresh_token'
@staticmethod
def _x_user_parser(user, data):
user.picture = data.get('usericonurl')
return user
class Eventbrite(OAuth2):
"""
Eventbrite |oauth2| provider.
Thanks to `Paul Brown <http://www.paulsprogrammingnotes.com/>`__.
* Dashboard: http://www.eventbrite.com/myaccount/apps/
* Docs: https://developer.eventbrite.com/docs/auth/
* API: http://developer.eventbrite.com/docs/
Supported :class:`.User` properties:
* email
* first_name
* id
* last_name
* name
Unsupported :class:`.User` properties:
* birth_date
* city
* country
* gender
* link
* locale
* nickname
* phone
* picture
* postal_code
* timezone
* username
"""
user_authorization_url = 'https://www.eventbrite.com/oauth/authorize'
access_token_url = 'https://www.eventbrite.com/oauth/token'
user_info_url = 'https://www.eventbriteapi.com/v3/users/me'
supported_user_attributes = core.SupportedUserAttributes(
email=True,
first_name=True,
id=True,
last_name=True,
name=True,
)
@classmethod
def _x_credentials_parser(cls, credentials, data):
if data.get('token_type') == 'bearer':
credentials.token_type = cls.BEARER
return credentials
@staticmethod
def _x_user_parser(user, data):
for email in data.get('emails', []):
if email.get('primary'):
user.email = email.get('email')
break
return user
class Facebook(OAuth2):
"""
Facebook |oauth2| provider.
* Dashboard: https://developers.facebook.com/apps
* Docs: http://developers.facebook.com/docs/howtos/login/server-side-login/
* API reference: http://developers.facebook.com/docs/reference/api/
* API explorer: http://developers.facebook.com/tools/explorer
Supported :class:`.User` properties:
* birth_date
* city
* country
* email
* first_name
* gender
* id
* last_name
* link
* locale
* location
* name
* picture
* timezone
Unsupported :class:`.User` properties:
* nickname
* phone
* postal_code
* username
"""
user_authorization_url = 'https://www.facebook.com/dialog/oauth'
access_token_url = 'https://graph.facebook.com/oauth/access_token'
user_info_url = 'https://graph.facebook.com/v2.3/me'
user_info_scope = ['email', 'user_about_me', 'user_birthday',
'user_location']
same_origin = False
supported_user_attributes = core.SupportedUserAttributes(
birth_date=True,
city=True,
country=True,
email=True,
first_name=True,
gender=True,
id=True,
last_name=True,
link=True,
locale=True,
location=True,
name=True,
picture=True,
timezone=True
)
@classmethod
def _x_request_elements_filter(cls, request_type, request_elements,
credentials):
if request_type == cls.REFRESH_TOKEN_REQUEST_TYPE:
# As always, Facebook has it's original name for "refresh_token"!
url, method, params, headers, body = request_elements
params['fb_exchange_token'] = params.pop('refresh_token')
params['grant_type'] = 'fb_exchange_token'
request_elements = core.RequestElements(url, method, params,
headers, body)
return request_elements
def __init__(self, *args, **kwargs):
super(Facebook, self).__init__(*args, **kwargs)
# Handle special Facebook requirements to be able
# to refresh the access token.
if self.offline:
# Facebook needs an offline_access scope.
if not 'offline_access' in self.scope:
self.scope.append('offline_access')
if self.popup:
self.user_authorization_url += '?display=popup'
@staticmethod
def _x_user_parser(user, data):
_birth_date = data.get('birthday')
if _birth_date:
try:
user.birth_date = datetime.datetime.strptime(_birth_date,
'%m/%d/%Y')
except ValueError:
pass
user.picture = ('http://graph.facebook.com/{0}/picture?type=large'
.format(user.id))
user.location = data.get('location', {}).get('name')
if user.location:
split_location = user.location.split(', ')
user.city = split_location[0].strip()
if len(split_location) > 1:
user.country = split_location[1].strip()
return user
@staticmethod
def _x_credentials_parser(credentials, data):
"""
We need to override this method to fix Facebooks naming deviation.
"""
# Facebook returns "expires" instead of "expires_in".
credentials.expire_in = data.get('expires')
if data.get('token_type') == 'bearer':
# TODO: cls is not available here, hardcode for now.
credentials.token_type = 'Bearer'
return credentials
@staticmethod
def _x_refresh_credentials_if(credentials):
# Always refresh.
return True
def access(self, url, params=None, **kwargs):
if params is None:
params = {}
# syntax ?fields=field1,field2 is reqiured for getting attributes,
# see https://developers.facebook.com/blog/post/2015/07/08/graph-api-v2.4/
params["fields"] = "id,email,name,first_name,last_name,gender,hometown,link,timezone,verified,website,locale,languages"
return super(Facebook, self).access(url, params, **kwargs)
class Foursquare(OAuth2):
"""
Foursquare |oauth2| provider.
* Dashboard: https://foursquare.com/developers/apps
* Docs: https://developer.foursquare.com/overview/auth.html
* API reference: https://developer.foursquare.com/docs/
.. note::
Foursquare requires a *version* parameter in each request.
The default value is ``v=20140501``. You can override the version in
the ``params`` parameter of the :meth:`.Authomatic.access` method.
See https://developer.foursquare.com/overview/versioning
Supported :class:`.User` properties:
* city
* country
* email
* first_name
* gender
* id
* last_name
* location
* name
* phone
* picture
Unsupported :class:`.User` properties:
* birth_date
* link
* locale
* nickname
* postal_code
* timezone
* username
"""
user_authorization_url = 'https://foursquare.com/oauth2/authenticate'
access_token_url = 'https://foursquare.com/oauth2/access_token'
user_info_url = 'https://api.foursquare.com/v2/users/self'
same_origin = False
supported_user_attributes = core.SupportedUserAttributes(
birth_date=True,
city=True,
country=True,
email=True,
first_name=True,
gender=True,
id=True,
last_name=True,
location=True,
name=True,
phone=True,
picture=True
)
@classmethod
def _x_request_elements_filter(cls, request_type, request_elements,
credentials):
if request_type == cls.PROTECTED_RESOURCE_REQUEST_TYPE:
# Foursquare uses OAuth 1.0 "oauth_token" for what should be
# "access_token" in OAuth 2.0!
url, method, params, headers, body = request_elements
params['oauth_token'] = params.pop('access_token')
# Foursquare needs the version "v" parameter in every request.
# https://developer.foursquare.com/overview/versioning
if not params.get('v'):
params['v'] = '20140501'
request_elements = core.RequestElements(url, method, params,
headers, body)
return request_elements
@staticmethod
def _x_user_parser(user, data):
_resp = data.get('response', {})
_user = _resp.get('user', {})
user.id = _user.get('id')
user.first_name = _user.get('firstName')
user.last_name = _user.get('lastName')
user.gender = _user.get('gender')
_birth_date = _user.get('birthday')
if _birth_date:
user.birth_date = datetime.datetime.fromtimestamp(_birth_date)
_photo = _user.get('photo', {})
if isinstance(_photo, dict):
_photo_prefix = _photo.get('prefix', '').strip('/')
_photo_suffix = _photo.get('suffix', '').strip('/')
user.picture = '/'.join([_photo_prefix, _photo_suffix])
if isinstance(_photo, str):
user.picture = _photo
user.location = _user.get('homeCity')
if user.location:
split_location = user.location.split(',')
user.city = split_location[0].strip()
if len(user.location) > 1:
user.country = split_location[1].strip()
_contact = _user.get('contact', {})
user.email = _contact.get('email')
user.phone = _contact.get('phone')
return user
class GitHub(OAuth2):
"""
GitHub |oauth2| provider.
* Dashboard: https://github.com/settings/developers
* Docs: http://developer.github.com/v3/#authentication
* API reference: http://developer.github.com/v3/
.. note::
GitHub API `documentation <http://developer.github.com/v3/#user-agent-required>`_ sais:
all API requests MUST include a valid ``User-Agent`` header.
You can apply a default ``User-Agent`` header for all API calls in the config like this:
.. code-block:: python
:emphasize-lines: 6
CONFIG = {
'github': {
'class_': oauth2.GitHub,
'consumer_key': '#####',
'consumer_secret': '#####',
'access_headers': {'User-Agent': 'Awesome-Octocat-App'},
}
}
Supported :class:`.User` properties:
* email
* id
* link
* location
* name
* picture
* username
Unsupported :class:`.User` properties:
* birth_date
* city
* country
* first_name
* gender
* last_name
* locale
* nickname
* phone
* postal_code
* timezone
"""
user_authorization_url = 'https://github.com/login/oauth/authorize'
access_token_url = 'https://github.com/login/oauth/access_token'
user_info_url = 'https://api.github.com/user'
same_origin = False
supported_user_attributes = core.SupportedUserAttributes(
email=True,
id=True,
link=True,
location=True,
name=True,
picture=True,
username=True
)
@staticmethod
def _x_user_parser(user, data):
user.username = data.get('login')
user.picture = data.get('avatar_url')
user.link = data.get('html_url')
return user
@classmethod
def _x_credentials_parser(cls, credentials, data):
if data.get('token_type') == 'bearer':
credentials.token_type = cls.BEARER
return credentials
def access(self, url, **kwargs):
# https://developer.github.com/v3/#user-agent-required
headers = kwargs["headers"] = kwargs.get("headers", {})
if not headers.get("User-Agent"):
headers["User-Agent"] = self.settings.config[self.name]["consumer_key"]
def parent_access(url):
return super(GitHub, self).access(url, **kwargs)
response = parent_access(url)
# additional action to get email is required:
# https://developer.github.com/v3/users/emails/
if response.status == 200:
email_response = parent_access(url + "/emails")
if email_response.status == 200:
response.data["emails"] = email_response.data
# find first or primary email
primary_email = None
for item in email_response.data:
is_primary = item["primary"]
if not primary_email or is_primary:
primary_email = item["email"]
if is_primary:
break
response.data["email"] = primary_email
return response
class Google(OAuth2):
"""
Google |oauth2| provider.
* Dashboard: https://console.developers.google.com/project
* Docs: https://developers.google.com/accounts/docs/OAuth2
* API reference: https://developers.google.com/gdata/docs/directory
* API explorer: https://developers.google.com/oauthplayground/
Supported :class:`.User` properties:
* email
* first_name
* gender
* id
* last_name
* link
* locale
* name
* picture
Unsupported :class:`.User` properties:
* birth_date
* city
* country
* nickname
* phone
* postal_code
* timezone
* username
.. note::
To get the user info, you need to activate the **Google+ API**
in the **APIs & auth >> APIs** section of the`Google Developers Console
<https://console.developers.google.com/project>`__.
"""
user_authorization_url = 'https://accounts.google.com/o/oauth2/auth'
access_token_url = 'https://accounts.google.com/o/oauth2/token'
user_info_url = 'https://www.googleapis.com/oauth2/v3/userinfo?alt=json'
user_info_scope = ['profile',
'email']
supported_user_attributes = core.SupportedUserAttributes(
id=True,
email=True,
name=True,
first_name=True,
last_name=True,
gender=True,
locale=True,
link=True,
picture=True
)
def __init__(self, *args, **kwargs):
super(Google, self).__init__(*args, **kwargs)
# Handle special Google requirements to be able to refresh the access token.
if self.offline:
if not 'access_type' in self.user_authorization_params:
# Google needs access_type=offline param in the user authorization request.
self.user_authorization_params['access_type'] = 'offline'
if not 'approval_prompt' in self.user_authorization_params:
# And also approval_prompt=force.
self.user_authorization_params['approval_prompt'] = 'force'
@classmethod
def _x_request_elements_filter(cls, request_type, request_elements,
credentials):
"""
Google doesn't accept client ID and secret to be at the same time in
request parameters and in the basic authorization header in the
access token request.
"""
if request_type is cls.ACCESS_TOKEN_REQUEST_TYPE:
params = request_elements[2]
del params['client_id']
del params['client_secret']
return request_elements
@staticmethod
def _x_user_parser(user, data):
emails = data.get('emails', [])
if emails:
user.email = emails[0].get('value')
for email in emails:
if email.get('type') == 'account':
user.email = email.get('value')
break
user.id = data.get('sub')
user.name = data.get('name')
user.first_name = data.get('given_name', '')
user.last_name = data.get('family_name', '')
user.locale = data.get('locale', '')
user.picture = data.get('picture', '')
user.email_verified = data.get("email_verified")
user.hosted_domain = data.get("hd")
return user
def _x_scope_parser(self, scope):
"""
Google has space-separated scopes
"""
return ' '.join(scope)
class LinkedIn(OAuth2):
"""
Linked In |oauth2| provider.
.. note::
Doesn't support access token refreshment.
* Dashboard: https://www.linkedin.com/secure/developer
* Docs: http://developer.linkedin.com/documents/authentication
* API reference: http://developer.linkedin.com/rest
Supported :class:`.User` properties:
* country
* email
* first_name
* id
* last_name
* link
* location
* name
* picture
Unsupported :class:`.User` properties:
* birth_date
* city
* gender
* locale
* nickname
* phone
* postal_code
* timezone
* username
"""
user_authorization_url = 'https://www.linkedin.com/uas/oauth2/authorization'
access_token_url = 'https://www.linkedin.com/uas/oauth2/accessToken'
# https://docs.microsoft.com/en-us/linkedin/consumer/integrations/self-serve/migration-faq
user_info_url = 'https://api.linkedin.com/v2/me'
user_info_scope = ['r_emailaddress', 'r_liteprofile']
token_request_method = 'GET' # To avoid a bug with OAuth2.0 on Linkedin
# http://developer.linkedin.com/forum/unauthorized-invalid-or-expired-token-immediately-after-receiving-oauth2-token
supported_user_attributes = core.SupportedUserAttributes(
country=True,
email=True,
first_name=True,
id=True,
last_name=True,
link=True,
location=True,
name=True,
picture=True
)
@classmethod
def _x_request_elements_filter(cls, request_type, request_elements,
credentials):
if request_type == cls.PROTECTED_RESOURCE_REQUEST_TYPE:
# LinkedIn too has it's own terminology!
# v2 just uses Auth header
#url, method, params, headers, body = request_elements
#params['oauth2_access_token'] = params.pop('access_token')
#request_elements = core.RequestElements(url, method, params,
#headers, body)
pass
return request_elements
@classmethod
def _x_credentials_parser(cls, credentials, data):
credentials.token_type = cls.BEARER
return credentials
def _x_user_parser(self, user, data):
user.first_name = data.get('localizedFirstName')
user.last_name = data.get('localizedLastName')
user.id = data.get('id')
request_elements = self.create_request_elements(request_type=self.PROTECTED_RESOURCE_REQUEST_TYPE,
credentials=self.credentials,
url='https://api.linkedin.com/v2/emailAddress?q=members&projection=(elements*(handle~))')
response = self._fetch(*request_elements)
if response.status / 100 == 2:
for element in response.content_parser(response.content).get("elements", []):
email = element.get("handle~", {}).get("emailAddress")
if email:
user.email = email
break
return user
class PayPal(OAuth2):
"""
PayPal |oauth2| provider.
* Dashboard: https://developer.paypal.com/webapps/developer/applications
* Docs: https://developer.paypal.com/webapps/developer/docs/integration/direct/make-your-first-call/
* API reference: https://developer.paypal.com/webapps/developer/docs/api/
.. note::
Paypal doesn't redirect the **user** to authorize your app!
It grants you an **access token** based on your **app's** key and
secret instead.
"""
_x_use_authorization_header = True
supported_user_attributes = core.SupportedUserAttributes()
@classmethod
def _x_request_elements_filter(cls, request_type, request_elements, credentials):
if request_type == cls.ACCESS_TOKEN_REQUEST_TYPE:
url, method, params, headers, body = request_elements
params['grant_type'] = 'client_credentials'
request_elements = core.RequestElements(url, method, params, headers, body)
return request_elements
user_authorization_url = ''
access_token_url = 'https://api.sandbox.paypal.com/v1/oauth2/token'
user_info_url = ''
class Reddit(OAuth2):
"""
Reddit |oauth2| provider.
.. note::
Currently credentials refreshment returns ``{"error": "invalid_request"}``.
* Dashboard: https://ssl.reddit.com/prefs/apps
* Docs: https://github.com/reddit/reddit/wiki/OAuth2
* API reference: http://www.reddit.com/dev/api
.. note::
According to Reddit API
`docs <https://github.com/reddit/reddit/wiki/API#rules>`_,
you have to include a `User-Agent` header in each API call.
You can apply a default ``User-Agent`` header for all API calls in the
config like this:
.. code-block:: python
:emphasize-lines: 6
CONFIG = {
'reddit': {
'class_': oauth2.Reddit,
'consumer_key': '#####',
'consumer_secret': '#####',
'access_headers': {'User-Agent': "Andy Pipkin's App"},
}
}
Supported :class:`.User` properties:
* id
* username
Unsupported :class:`.User` properties:
* birth_date
* country
* city
* email
* first_name
* gender
* last_name
* link
* locale
* location
* name
* nickname
* phone
* picture
* postal_code
* timezone
"""
user_authorization_url = 'https://ssl.reddit.com/api/v1/authorize'
access_token_url = 'https://ssl.reddit.com/api/v1/access_token'
user_info_url = 'https://oauth.reddit.com/api/v1/me.json'
user_info_scope = ['identity']
supported_user_attributes = core.SupportedUserAttributes(
id=True,
name=True,
username=True
)
def __init__(self, *args, **kwargs):
super(Reddit, self).__init__(*args, **kwargs)
if self.offline:
if not 'duration' in self.user_authorization_params:
# http://www.reddit.com/r/changelog/comments/11jab9/reddit_change_permanent_oauth_grants_using/
self.user_authorization_params['duration'] = 'permanent'
@classmethod
def _x_credentials_parser(cls, credentials, data):
if data.get('token_type') == 'bearer':
credentials.token_type = cls.BEARER
return credentials
@staticmethod
def _x_user_parser(user, data):
user.username = data.get('name')
return user
class Viadeo(OAuth2):
"""
Viadeo |oauth2| provider.
.. note::
As stated in the `Viadeo documentation
<http://dev.viadeo.com/documentation/authentication/request-an-api-key/>`__:
Viadeo restrains access to its API.
They are now exclusively reserved for its strategic partners.
* Dashboard: http://dev.viadeo.com/dashboard/
* Docs: http://dev.viadeo.com/documentation/authentication/oauth-authentication/
* API reference: http://dev.viadeo.com/documentation/
.. note::
Viadeo doesn't support **credentials refreshment**.
As stated in their `docs <http://dev.viadeo.com/documentation/authentication/oauth-authentication/>`_:
"The access token has an infinite time to live."
"""
user_authorization_url = 'https://secure.viadeo.com/oauth-provider/authorize2'
access_token_url = 'https://secure.viadeo.com/oauth-provider/access_token2'
user_info_url = 'https://api.viadeo.com/me'
@classmethod
def _x_credentials_parser(cls, credentials, data):
if data.get('token_type') == 'bearer_token':
credentials.token_type = cls.BEARER
return credentials
@staticmethod
def _x_refresh_credentials_if(credentials):
# Never refresh.
return False
@staticmethod
def _x_user_parser(user, data):
user.username = data.get('nickname')
user.picture = data.get('picture_large')
user.picture = data.get('picture_large')
user.locale = data.get('language')
user.email = data.get('')
user.email = data.get('')
user.country = data.get('location', {}).get('country')
user.city = data.get('location', {}).get('city')
user.postal_code = data.get('location', {}).get('zipcode')
user.timezone = data.get('location', {}).get('timezone')
return user
class VK(OAuth2):
"""
VK.com |oauth2| provider.
* Dashboard: http://vk.com/apps?act=manage
* Docs: http://vk.com/developers.php?oid=-17680044&p=Authorizing_Sites
* API reference: http://vk.com/developers.php?oid=-17680044&p=API_Method_Description
.. note::
VK uses a `bitmask scope <http://vk.com/developers.php?oid=-17680044&p=Application_Rights>`_!
Use it like this:
.. code-block:: python
:emphasize-lines: 7
CONFIG = {
'vk': {
'class_': oauth2.VK,
'consumer_key': '#####',
'consumer_secret': '#####',
'id': authomatic.provider_id(),
'scope': ['1024'] # Always a single item.
}
}
Supported :class:`.User` properties:
* birth_date
* city
* country
* first_name
* gender
* id
* last_name
* location
* name
* picture
* timezone
Unsupported :class:`.User` properties:
* email
* link
* locale
* nickname
* phone
* postal_code
* username
"""
user_authorization_url = 'http://api.vkontakte.ru/oauth/authorize'
access_token_url = 'https://api.vkontakte.ru/oauth/access_token'
user_info_url = 'https://api.vk.com/method/getProfiles?v=5.131&' + \
'fields=uid,first_name,last_name,nickname,sex,bdate,city,country,timezone,photo_big'
supported_user_attributes = core.SupportedUserAttributes(
birth_date=True,
city=True,
country=True,
first_name=True,
gender=True,
id=True,
last_name=True,
location=True,
name=True,
picture=True,
timezone=True,
)
def __init__(self, *args, **kwargs):
super(VK, self).__init__(*args, **kwargs)
if self.offline:
if not 'offline' in self.scope:
self.scope.append('offline')
@staticmethod
def _x_user_parser(user, data):
_resp = data.get('response', [{}])[0]
_birth_date = _resp.get('bdate')
if _birth_date:
# :TODO: handle the use case "1.10", without year or day or ...
try:
user.birth_date = datetime.datetime.strptime(_birth_date, '%d.%m.%Y')
except:
pass
user.id = _resp.get('id')
user.first_name = _resp.get('first_name')
user.gender = _resp.get('sex')
user.last_name = _resp.get('last_name')
user.nickname = _resp.get('nickname')
user.city = _resp.get('city')
user.country = _resp.get('country')
user.timezone = _resp.get('timezone')
user.picture = _resp.get('photo_big')
return user
class WindowsLive(OAuth2):
"""
Windows Live |oauth2| provider.
* Dashboard: https://account.live.com/developers/applications
* Docs: http://msdn.microsoft.com/en-us/library/hh243647.aspx
* API explorer: http://isdk.dev.live.com/?mkt=en-us
Supported :class:`.User` properties:
* email
* first_name
* id
* last_name
* link
* locale
* name
* picture
Unsupported :class:`.User` properties:
* birth_date
* city
* country
* gender
* nickname
* location
* phone
* postal_code
* timezone
* username
"""
user_authorization_url = 'https://login.live.com/oauth20_authorize.srf'
access_token_url = 'https://login.live.com/oauth20_token.srf'
user_info_url = 'https://apis.live.net/v5.0/me'
user_info_scope = ['wl.basic', 'wl.emails', 'wl.photos']
supported_user_attributes = core.SupportedUserAttributes(
email=True,
first_name=True,
id=True,
last_name=True,
link=True,
locale=True,
name=True,
picture=True
)
def __init__(self, *args, **kwargs):
super(WindowsLive, self).__init__(*args, **kwargs)
if self.offline:
if not 'wl.offline_access' in self.scope:
self.scope.append('wl.offline_access')
@classmethod
def _x_credentials_parser(cls, credentials, data):
if data.get('token_type') == 'bearer':
credentials.token_type = cls.BEARER
return credentials
@staticmethod
def _x_user_parser(user, data):
user.email = data.get('emails', {}).get('preferred')
user.picture = 'https://apis.live.net/v5.0/{0}/picture'.format(data.get('id'))
return user
class Yammer(OAuth2):
"""
Yammer |oauth2| provider.
* Dashboard: https://www.yammer.com/client_applications
* Docs: https://developer.yammer.com/authentication/
* API reference: https://developer.yammer.com/restapi/
Supported :class:`.User` properties:
* birth_date
* city
* country
* email
* first_name
* id
* last_name
* link
* locale
* location
* name
* phone
* picture
* timezone
* username
Unsupported :class:`.User` properties:
* gender
* nickname
* postal_code
"""
user_authorization_url = 'https://www.yammer.com/dialog/oauth'
access_token_url = 'https://www.yammer.com/oauth2/access_token.json'
user_info_url = 'https://www.yammer.com/api/v1/users/current.json'
supported_user_attributes = core.SupportedUserAttributes(
birth_date=True,
city=True,
country=True,
email=True,
first_name=True,
id=True,
last_name=True,
link=True,
locale=True,
location=True,
name=True,
phone=True,
picture=True,
timezone=True,
username=True
)
@classmethod
def _x_credentials_parser(cls, credentials, data):
# import pdb; pdb.set_trace()
credentials.token_type = cls.BEARER
_access_token = data.get('access_token', {})
credentials.token = _access_token.get('token')
_expire_in = _access_token.get('expires_at', 0)
if _expire_in:
credentials.expire_in = _expire_in
return credentials
@staticmethod
def _x_user_parser(user, data):
# Yammer provides most of the user info in the access token request,
# but provides more on in user info request.
_user = data.get('user', {})
if not _user:
# If there is "user key", it is token request.
_user = data
user.username = _user.get('name')
user.name = _user.get('full_name')
user.link = _user.get('web_url')
user.picture = _user.get('mugshot_url')
user.city, user.country = _user.get('location', ',').split(',')
user.city = user.city.strip()
user.country = user.country.strip()
user.locale = _user.get('web_preferences', {}).get('locale')
# Contact
_contact = _user.get('contact', {})
user.phone = _contact.get('phone_numbers', [{}])[0].get('number')
_emails = _contact.get('email_addresses', [])
for email in _emails:
if email.get('type', '') == 'primary':
user.email = email.get('address')
break
try:
user.birth_date = datetime.datetime.strptime(_user.get('birth_date'), "%B %d")
except:
user.birth_date = _user.get('birth_date')
return user
class Yandex(OAuth2):
"""
Yandex |oauth2| provider.
* Dashboard: https://oauth.yandex.com/client/my
* Docs: http://api.yandex.com/oauth/doc/dg/reference/obtain-access-token.xml
* API reference:
Supported :class:`.User` properties:
* id
* name
* username
Unsupported :class:`.User` properties:
* birth_date
* city
* country
* email
* first_name
* gender
* last_name
* link
* locale
* location
* nickname
* phone
* picture
* postal_code
* timezone
"""
user_authorization_url = 'https://oauth.yandex.com/authorize'
access_token_url = 'https://oauth.yandex.com/token'
user_info_url = 'https://login.yandex.ru/info'
supported_user_attributes = core.SupportedUserAttributes(
id=True,
name=True,
username=True
)
@classmethod
def _x_credentials_parser(cls, credentials, data):
if data.get('token_type') == 'bearer':
credentials.token_type = cls.BEARER
return credentials
@staticmethod
def _x_user_parser(user, data):
# http://api.yandex.ru/login/doc/dg/reference/response.xml
user.name = data.get('real_name')
user.nickname = data.get('display_name')
user.gender = data.get('Sex')
user.email = data.get('Default_email')
user.username = data.get('login')
try:
user.birth_date = datetime.datetime.strptime(data.get('birthday'), "%Y-%m-%d")
except:
user.birth_date = data.get('birthday')
return user
# The provider type ID is generated from this list's indexes!
# Always append new providers at the end so that ids of existing providers don't change!
PROVIDER_ID_MAP = [OAuth2, Behance, Bitly, Cosm, DeviantART, Facebook, Foursquare, GitHub, Google, LinkedIn,
PayPal, Reddit, Viadeo, VK, WindowsLive, Yammer, Yandex, Eventbrite, Amazon]
|
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from models import Menu, Image, Page, News
from django import forms
from django.contrib.admin.widgets import AdminFileWidget
from django.utils.safestring import mark_safe
from django.conf import settings
import os, sys, glob
from datetime import datetime
from time import sleep
ckeditor = ['/static/webapp/ckeditor/ckeditor.js','/static/webapp/ckeditor.init.js']
ckeditor_css = ['/static/webapp/webapp.css']
def remove(modeladmin, request, queryset):
queryset.delete()
remove.short_description = "Verwijderen"
def make_published(modeladmin, request, queryset):
queryset.update(publish=True)
make_published.short_description = "Zet publiceren aan"
def make_depublished(modeladmin, request, queryset):
queryset.update(publish=False)
make_depublished.short_description = "Zet publiceren uit"
def clear_thumbnails(modeladmin, request, queryset):
for i in queryset:
root = settings.MEDIA_ROOT
file_name = i.image
basedir = os.path.dirname("%s%s" %(root, file_name))
base, ext = os.path.splitext(os.path.basename(str(file_name)))
for file in glob.glob(r"%s/%s_[c|t]_*x*%s" %(basedir, base, ext)):
os.remove(file)
clear_thumbnails.short_description = "Delete thumnails"
def refresh_thumbnails(modeladmin, request, queryset):
for obj in queryset:
root = settings.MEDIA_ROOT
file_name = obj.image
basedir = os.path.dirname("%s%s" %(root, file_name))
base, ext = os.path.splitext(os.path.basename(str(file_name)))
for file in glob.glob(r"%s/%s_[c|t]_*x*%s" %(basedir, base, ext)):
os.remove(file)
obj.save()
sleep(2)
refresh_thumbnails.short_description = "Refresh thumnails"
class MenuAdmin(admin.ModelAdmin):
list_display = ['title', 'sortorder', 'last_modified', 'last_modified_by', 'keywords', 'description']
#list_filter = ['', ]
ordering = ['sortorder',]
prepopulated_fields = { 'slug': ('title', ) }
actions = None
list_editable = ['sortorder',]
fieldsets = (
(None, {
'classes': ('wide', 'extrapretty'),
'fields': ('title', 'text')
}),
('Geavanceerde opties', { #Geavanceerde opties
'classes': ('collapse', 'wide', 'extrapretty'),
'fields': ('image', 'slug', 'sortorder')
}),
('Meta tags', {
'classes': ('collapse', 'wide', 'extrapretty'),
'fields': ('description', 'keywords', )
}),
)
class Media:
js = ckeditor
admin.site.register(Menu, MenuAdmin)
def thumbnail(file_path):
path, ext = file_path.rsplit('.', 1)
return u'<img src="%s_t_132x132.%s?now=%s" />' % (path, ext, datetime.now().isoformat())
class AdminImageWidget(AdminFileWidget):
"""
A FileField Widget that displays an image instead of a file path
if the current file is an image.
"""
def render(self, name, value, attrs=None):
output = []
if value:
file_path = '%s%s' % (settings.MEDIA_URL, value)
try:
output.append('<a target="_blank" href="%s?now=%s">%s</a><br />' %
(file_path, datetime.now().isoformat(), thumbnail(file_path)))
except IOError:
output.append('%s <a target="_blank" href="%s">%s</a> <br />%s ' %
('Currently:', file_path, value, 'Change:'))
output.append(super(AdminImageWidget, self).render(name, value, attrs))
return mark_safe(u''.join(output))
class ImageUploadForm(forms.ModelForm):
image = forms.ImageField(widget=AdminImageWidget)
class Meta:
model = Image
exclude = []
class ImageAdmin(admin.ModelAdmin):
form = ImageUploadForm
search_fields = ['image', ]
list_display = ['filename', 'list_thumbnail', 'image_editing', 'inline_urls', 'pub_date', 'last_modified', 'owner','last_modified_by'] #sort_order #'inline_urls',
# list_display = ['filename', 'image',]
list_editable = ['image_editing', ]
list_per_page = 50
date_hierarchy = 'pub_date'
actions = [remove, clear_thumbnails, refresh_thumbnails]
#list_filter = ['type',]
fieldsets = (
(None, {
'fields': ('image', ),
'classes': ['wide', 'extrapretty']
}),
('Geavanceerde opties', {
'classes': ('collapse', 'wide', 'extrapretty',),
'fields': ('caption', 'image_editing') #'sortorder',
}),
)
admin.site.register(Image, ImageAdmin)
class PageAdmin(admin.ModelAdmin):
"""
# ['html', 'id', 'intro', 'keywords', 'last_modified', 'last_modified_by_id', 'menu_id', 'owner_id', 'pub_date', 'publish', 'slideshow_id', 'slug', 'sortorder', 'table_of_contents', 'text', 'title']
"""
search_fields = ['title', 'text']
list_display = ['title', 'publish', 'menu', 'sortorder', 'pub_date', 'last_modified', 'owner','last_modified_by', 'keywords', 'description']
list_filter = ['menu', 'publish']
list_editable = ['menu', 'publish', 'sortorder']
prepopulated_fields = {'slug': ('title', )}
actions = [remove, make_published, make_depublished]
fieldsets = (
(None, {
'classes': ('wide', 'extrapretty'),
'fields': ('menu', 'title', 'image')
}),
(None, {
'classes': ('wide', 'extrapretty'),
'fields': ('text', )
}),
('Geavanceerde opties', {
'classes': ('collapse', 'wide', 'extrapretty'),
'fields': ('publish', 'slug', 'sortorder')
}),
('Meta tags', {
'classes': ('collapse', 'wide', 'extrapretty'),
'fields': ('description', 'keywords')
}),
)
class Media:
js = ckeditor
admin.site.register(Page, PageAdmin)
class NewsAdmin(admin.ModelAdmin):
"""
'id', 'publish', 'title', 'slug', 'text', 'location_id', 'general',
'owner_id', 'pub_date', 'last_modified_by_id', 'last_modified'
"""
list_display = ['title', 'publish', 'pub_date', 'last_modified', 'owner', 'last_modified_by']
list_filter = ['owner', ]
list_editable = ['owner', ]
actions = [remove, make_published, make_depublished]
search_fields = ['title', 'text', ]
prepopulated_fields = { 'slug': ('title', ) }
#filter_horizontal = ['image', ]
date_hierarchy = 'pub_date'
fieldsets = (
(None, {
'classes': ['wide', 'extrapretty'],
'fields': ('title', 'image', 'text')
}),
('Geavanceerde opties', {
'classes': ('collapse', 'wide', 'extrapretty'),
'fields': ('publish', 'slug')
}),
('Meta tags', {
'classes': ('collapse', 'wide', 'extrapretty'),
'fields': ('description', 'keywords')
}),
)
class Media:
js = ckeditor
admin.site.register(News, NewsAdmin)
# class SponsorAdmin(admin.ModelAdmin):
# """
# The model has more fields than utilised for future feat.
# """
# search_fields = ['name', 'url', 'text']
# list_display = ['name', 'publish', 'sortorder', 'pub_date', 'last_modified', 'owner','last_modified_by']
# list_filter = ['publish', ]
# list_editable = ['publish', 'sortorder', ] #'menu'
# prepopulated_fields = { 'slug': ('name', ) }
# actions = [remove, make_published, make_depublished]
# #filter_horizontal = ['images', ]
#
# fieldsets = (
# (None, {
# 'classes': ('wide', 'extrapretty'),
# 'fields': ('name', 'image', 'url')
# }),
#
# ('Geavanceerde opties', {
# 'classes': ('collapse', 'wide', 'extrapretty'),
# 'fields': ('publish', 'slug', 'sortorder')
# }),
#
# ('Meta tags', {
# 'classes': ('collapse', 'wide', 'extrapretty'),
# 'fields': ('description', 'keywords', )
# }),
# )
#
# admin.site.register(Sponsor, SponsorAdmin)
|
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 28 08:28:04 2010
Author: josef-pktd
"""
import numpy as np
from scipy import stats, special, optimize
import statsmodels.api as sm
from statsmodels.base.model import GenericLikelihoodModel
#redefine some shortcuts
np_log = np.log
np_pi = np.pi
sps_gamln = special.gammaln
def maxabs(arr1, arr2):
return np.max(np.abs(arr1 - arr2))
def maxabsrel(arr1, arr2):
return np.max(np.abs(arr2 / arr1 - 1))
#global
store_params = []
class MyT(GenericLikelihoodModel):
'''Maximum Likelihood Estimation of Linear Model with t-distributed errors
This is an example for generic MLE which has the same
statistical model as discretemod.Poisson.
Except for defining the negative log-likelihood method, all
methods and results are generic. Gradients and Hessian
and all resulting statistics are based on numerical
differentiation.
'''
def loglike(self, params):
return -self.nloglikeobs(params).sum(0)
# copied from discretemod.Poisson
def nloglikeobs(self, params):
"""
Loglikelihood of Poisson model
Parameters
----------
params : array-like
The parameters of the model.
Returns
-------
The log likelihood of the model evaluated at `params`
Notes
--------
.. math :: \\ln L=\\sum_{i=1}^{n}\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right]
"""
#print len(params),
store_params.append(params)
if not self.fixed_params is None:
#print 'using fixed'
params = self.expandparams(params)
beta = params[:-2]
df = params[-2]
scale = params[-1]
loc = np.dot(self.exog, beta)
endog = self.endog
x = (endog - loc)/scale
#next part is stats.t._logpdf
lPx = sps_gamln((df+1)/2) - sps_gamln(df/2.)
lPx -= 0.5*np_log(df*np_pi) + (df+1)/2.*np_log(1+(x**2)/df)
lPx -= np_log(scale) # correction for scale
return -lPx
#Example:
np.random.seed(98765678)
nobs = 1000
nvars = 6
df = 5
rvs = np.random.randn(nobs, nvars-1)
data_exog = sm.add_constant(rvs, prepend=False)
xbeta = 0.9 + 0.1*rvs.sum(1)
data_endog = xbeta + 0.1*np.random.standard_t(df, size=nobs)
print data_endog.var()
res_ols = sm.OLS(data_endog, data_exog).fit()
print res_ols.scale
print np.sqrt(res_ols.scale)
print res_ols.params
kurt = stats.kurtosis(res_ols.resid)
df_fromkurt = 6./kurt + 4
print stats.t.stats(df_fromkurt, moments='mvsk')
print stats.t.stats(df, moments='mvsk')
modp = MyT(data_endog, data_exog)
start_value = 0.1*np.ones(data_exog.shape[1]+2)
#start_value = np.zeros(data_exog.shape[1]+2)
#start_value[:nvars] = sm.OLS(data_endog, data_exog).fit().params
start_value[:nvars] = res_ols.params
start_value[-2] = df_fromkurt #10
start_value[-1] = np.sqrt(res_ols.scale) #0.5
modp.start_params = start_value
#adding fixed parameters
fixdf = np.nan * np.zeros(modp.start_params.shape)
fixdf[-2] = 100
fixone = 0
if fixone:
modp.fixed_params = fixdf
modp.fixed_paramsmask = np.isnan(fixdf)
modp.start_params = modp.start_params[modp.fixed_paramsmask]
else:
modp.fixed_params = None
modp.fixed_paramsmask = None
resp = modp.fit(start_params = modp.start_params, disp=1, method='nm')#'newton')
#resp = modp.fit(start_params = modp.start_params, disp=1, method='newton')
print '\nestimation results t-dist'
print resp.params
print resp.bse
resp2 = modp.fit(start_params = resp.params, method='Newton')
print 'using Newton'
print resp2.params
print resp2.bse
from statsmodels.tools.numdiff import approx_fprime, approx_hess
hb=-approx_hess(modp.start_params, modp.loglike, epsilon=-1e-4)
tmp = modp.loglike(modp.start_params)
print tmp.shape
#np.linalg.eigh(np.linalg.inv(hb))[0]
pp=np.array(store_params)
print pp.min(0)
print pp.max(0)
##################### Example: Pareto
# estimating scale doesn't work yet, a bug somewhere ?
# fit_ks works well, but no bse or other result statistics yet
#import for kstest based estimation
#should be replace
import statsmodels.sandbox.distributions.sppatch
class MyPareto(GenericLikelihoodModel):
'''Maximum Likelihood Estimation pareto distribution
first version: iid case, with constant parameters
'''
#copied from stats.distribution
def pdf(self, x, b):
return b * x**(-b-1)
def loglike(self, params):
return -self.nloglikeobs(params).sum(0)
def nloglikeobs(self, params):
#print params.shape
if not self.fixed_params is None:
#print 'using fixed'
params = self.expandparams(params)
b = params[0]
loc = params[1]
scale = params[2]
#loc = np.dot(self.exog, beta)
endog = self.endog
x = (endog - loc)/scale
logpdf = np_log(b) - (b+1.)*np_log(x) #use np_log(1 + x) for Pareto II
logpdf -= np.log(scale)
#lb = loc + scale
#logpdf[endog<lb] = -inf
#import pdb; pdb.set_trace()
logpdf[x<1] = -10000 #-np.inf
return -logpdf
def fit_ks(self):
'''fit Pareto with nested optimization
originally published on stackoverflow
this doesn't trim lower values during ks optimization
'''
rvs = self.endog
rvsmin = rvs.min()
fixdf = np.nan * np.ones(3)
self.fixed_params = fixdf
self.fixed_paramsmask = np.isnan(fixdf)
def pareto_ks(loc, rvs):
#start_scale = rvs.min() - loc # not used yet
#est = self.fit_fr(rvs, 1., frozen=[np.nan, loc, np.nan])
self.fixed_params[1] = loc
est = self.fit(start_params=self.start_params[self.fixed_paramsmask]).params
#est = self.fit(start_params=self.start_params, method='nm').params
args = (est[0], loc, est[1])
return stats.kstest(rvs,'pareto',args)[0]
locest = optimize.fmin(pareto_ks, rvsmin - 1.5, (rvs,))
est = stats.pareto.fit_fr(rvs, 0., frozen=[np.nan, locest, np.nan])
args = (est[0], locest[0], est[1])
return args
def fit_ks1_trim(self):
'''fit Pareto with nested optimization
originally published on stackoverflow
'''
self.nobs = self.endog.shape[0]
rvs = np.sort(self.endog)
rvsmin = rvs.min()
def pareto_ks(loc, rvs):
#start_scale = rvs.min() - loc # not used yet
est = stats.pareto.fit_fr(rvs, frozen=[np.nan, loc, np.nan])
args = (est[0], loc, est[1])
return stats.kstest(rvs,'pareto',args)[0]
#locest = optimize.fmin(pareto_ks, rvsmin*0.7, (rvs,))
maxind = min(np.floor(self.nobs*0.95).astype(int), self.nobs-10)
res = []
for trimidx in range(self.nobs//2, maxind):
xmin = loc = rvs[trimidx]
res.append([trimidx, pareto_ks(loc-1e-10, rvs[trimidx:])])
res = np.array(res)
bestidx = res[np.argmin(res[:,1]),0].astype(int)
print bestidx
locest = rvs[bestidx]
est = stats.pareto.fit_fr(rvs[bestidx:], 1., frozen=[np.nan, locest, np.nan])
args = (est[0], locest, est[1])
return args
def fit_ks1(self):
'''fit Pareto with nested optimization
originally published on stackoverflow
'''
rvs = self.endog
rvsmin = rvs.min()
def pareto_ks(loc, rvs):
#start_scale = rvs.min() - loc # not used yet
est = stats.pareto.fit_fr(rvs, 1., frozen=[np.nan, loc, np.nan])
args = (est[0], loc, est[1])
return stats.kstest(rvs,'pareto',args)[0]
#locest = optimize.fmin(pareto_ks, rvsmin*0.7, (rvs,))
locest = optimize.fmin(pareto_ks, rvsmin - 1.5, (rvs,))
est = stats.pareto.fit_fr(rvs, 1., frozen=[np.nan, locest, np.nan])
args = (est[0], locest[0], est[1])
return args
#y = stats.pareto.rvs(1, loc=10, scale=2, size=nobs)
y = stats.pareto.rvs(1, loc=0, scale=2, size=nobs)
par_start_params = np.array([1., 9., 2.])
mod_par = MyPareto(y)
mod_par.start_params = np.array([1., 10., 2.])
mod_par.start_params = np.array([1., -9., 2.])
mod_par.fixed_params = None
fixdf = np.nan * np.ones(mod_par.start_params.shape)
fixdf[1] = 9.9
#fixdf[2] = 2.
fixone = 0
if fixone:
mod_par.fixed_params = fixdf
mod_par.fixed_paramsmask = np.isnan(fixdf)
mod_par.start_params = mod_par.start_params[mod_par.fixed_paramsmask]
mod_par.df_model = 2
mod_par.df_resid = mod_par.endog.shape[0] - mod_par.df_model
mod_par.data.xnames = ['shape', 'scale']
else:
mod_par.fixed_params = None
mod_par.fixed_paramsmask = None
mod_par.df_model = 3
mod_par.df_resid = mod_par.endog.shape[0] - mod_par.df_model
mod_par.data.xnames = ['shape', 'loc', 'scale']
res_par = mod_par.fit(start_params=mod_par.start_params, method='nm', maxfun=10000, maxiter=5000)
#res_par2 = mod_par.fit(start_params=res_par.params, method='newton', maxfun=10000, maxiter=5000)
res_parks = mod_par.fit_ks1()
print res_par.params
#print res_par2.params
print res_parks
print res_par.params[1:].sum(), sum(res_parks[1:]), mod_par.endog.min()
#start new model, so we don't get two result instances with the same model instance
mod_par = MyPareto(y)
mod_par.fixed_params = fixdf
mod_par.fixed_paramsmask = np.isnan(fixdf)
mod_par.df_model = mod_par.fixed_paramsmask.sum()
mod_par.df_resid = mod_par.endog.shape[0] - mod_par.df_model
#mod_par.data.xnames = np.array(['shape', 'loc', 'scale'])[mod_par.fixed_paramsmask].tolist() # works also
mod_par.data.xnames = [name for (name, incl) in zip(['shape', 'loc', 'scale'], mod_par.fixed_paramsmask) if incl]
res_par3 = mod_par.start_params = par_start_params[mod_par.fixed_paramsmask]
res5 = mod_par.fit(start_params=mod_par.start_params)
##res_parks2 = mod_par.fit_ks()
##
##res_parkst = mod_par.fit_ks1_trim()
##print res_parkst
print res5.summary()
print res5.t_test([[1,0]])
'''
C:\Programs\Python25\lib\site-packages\matplotlib-0.99.1-py2.5-win32.egg\matplotlib\rcsetup.py:117: UserWarning: rcParams key "numerix" is obsolete and has no effect;
please delete it from your matplotlibrc file
warnings.warn('rcParams key "numerix" is obsolete and has no effect;\n'
0.0686702747648
0.0164150896481
0.128121386381
[ 0.10370428 0.09921315 0.09676723 0.10457413 0.10201618 0.89964496]
(array(0.0), array(1.4552599885729831), array(0.0), array(2.5072143354058238))
(array(0.0), array(1.6666666666666667), array(0.0), array(6.0))
repr(start_params) array([ 0.10370428, 0.09921315, 0.09676723, 0.10457413, 0.10201618,
0.89964496, 6.39309417, 0.12812139])
Optimization terminated successfully.
Current function value: -679.951339
Iterations: 398
Function evaluations: 609
estimation results t-dist
[ 0.10400826 0.10111893 0.09725133 0.10507788 0.10086163 0.8996041
4.72131318 0.09825355]
[ 0.00365493 0.00356149 0.00349329 0.00362333 0.003732 0.00362716
0.7232824 0.00388829]
repr(start_params) array([ 0.10400826, 0.10111893, 0.09725133, 0.10507788, 0.10086163,
0.8996041 , 4.72131318, 0.09825355])
Optimization terminated successfully.
Current function value: -679.950443
Iterations 3
using Newton
[ 0.10395383 0.10106762 0.09720665 0.10503384 0.10080599 0.89954546
4.70918964 0.09815885]
[ 0.00365299 0.00355968 0.00349147 0.00362166 0.00373015 0.00362533
0.72014031 0.00388434]
()
[ 0.09992709 0.09786601 0.09387356 0.10229919 0.09756623 0.85466272
4.60459182 0.09661986]
[ 0.11308292 0.10828401 0.1028508 0.11268895 0.10934726 0.94462721
7.15412655 0.13452746]
repr(start_params) array([ 1., 2.])
Warning: Maximum number of function evaluations has been exceeded.
>>> res_par.params
array([ 7.42705803e+152, 2.17339053e+153])
>>> mod_par.loglike(mod_p.start_params)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'mod_p' is not defined
>>> mod_par.loglike(mod_par.start_params)
-1085.1993430947232
>>> np.log(mod_par.pdf(mod_par.start_params))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: pdf() takes exactly 3 arguments (2 given)
>>> np.log(mod_par.pdf(*mod_par.start_params))
0.69314718055994529
>>> mod_par.loglike(*mod_par.start_params)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: loglike() takes exactly 2 arguments (3 given)
>>> mod_par.loglike(mod_par.start_params)
-1085.1993430947232
>>> np.log(stats.pareto.pdf(y[0],*mod_par.start_params))
-4.6414308627431353
>>> mod_par.loglike(mod_par.start_params)
-1085.1993430947232
>>> mod_par.nloglikeobs(mod_par.start_params)[0]
0.29377232943845044
>>> mod_par.start_params
array([ 1., 2.])
>>> np.log(stats.pareto.pdf(y[0],1,9.5,2))
-1.2806918394368461
>>> mod_par.fixed_params= None
>>> mod_par.nloglikeobs(np.array([1., 10., 2.]))[0]
0.087533156771285828
>>> y[0]
12.182956907488885
>>> mod_para.endog[0]
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'mod_para' is not defined
>>> mod_par.endog[0]
12.182956907488885
>>> np.log(stats.pareto.pdf(y[0],1,10,2))
-0.86821349410251702
>>> np.log(stats.pareto.pdf(y[0],1.,10.,2.))
-0.86821349410251702
>>> stats.pareto.pdf(y[0],1.,10.,2.)
0.41970067762301644
>>> mod_par.loglikeobs(np.array([1., 10., 2.]))[0]
-0.087533156771285828
>>>
'''
'''
>>> mod_par.nloglikeobs(np.array([1., 10., 2.]))[0]
0.86821349410251691
>>> np.log(stats.pareto.pdf(y,1.,10.,2.)).sum()
-2627.9403758026938
'''
#'''
#C:\Programs\Python25\lib\site-packages\matplotlib-0.99.1-py2.5-win32.egg\matplotlib\rcsetup.py:117: UserWarning: rcParams key "numerix" is obsolete and has no effect;
# please delete it from your matplotlibrc file
# warnings.warn('rcParams key "numerix" is obsolete and has no effect;\n'
#0.0686702747648
#0.0164150896481
#0.128121386381
#[ 0.10370428 0.09921315 0.09676723 0.10457413 0.10201618 0.89964496]
#(array(0.0), array(1.4552599885729827), array(0.0), array(2.5072143354058203))
#(array(0.0), array(1.6666666666666667), array(0.0), array(6.0))
#repr(start_params) array([ 0.10370428, 0.09921315, 0.09676723, 0.10457413, 0.10201618,
# 0.89964496, 6.39309417, 0.12812139])
#Optimization terminated successfully.
# Current function value: -679.951339
# Iterations: 398
# Function evaluations: 609
#
#estimation results t-dist
#[ 0.10400826 0.10111893 0.09725133 0.10507788 0.10086163 0.8996041
# 4.72131318 0.09825355]
#[ 0.00365493 0.00356149 0.00349329 0.00362333 0.003732 0.00362716
# 0.72325227 0.00388822]
#repr(start_params) array([ 0.10400826, 0.10111893, 0.09725133, 0.10507788, 0.10086163,
# 0.8996041 , 4.72131318, 0.09825355])
#Optimization terminated successfully.
# Current function value: -679.950443
# Iterations 3
#using Newton
#[ 0.10395383 0.10106762 0.09720665 0.10503384 0.10080599 0.89954546
# 4.70918964 0.09815885]
#[ 0.00365299 0.00355968 0.00349147 0.00362166 0.00373015 0.00362533
# 0.72014669 0.00388436]
#()
#[ 0.09992709 0.09786601 0.09387356 0.10229919 0.09756623 0.85466272
# 4.60459182 0.09661986]
#[ 0.11308292 0.10828401 0.1028508 0.11268895 0.10934726 0.94462721
# 7.15412655 0.13452746]
#repr(start_params) array([ 1., 2.])
#Warning: Maximum number of function evaluations has been exceeded.
#repr(start_params) array([ 3.06504406e+302, 3.29325579e+303])
#Traceback (most recent call last):
# File "C:\Josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\examples\ex_generic_mle_tdist.py", line 222, in <module>
# res_par2 = mod_par.fit(start_params=res_par.params, method='newton', maxfun=10000, maxiter=5000)
# File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\model.py", line 547, in fit
# disp=disp, callback=callback, **kwargs)
# File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\model.py", line 262, in fit
# newparams = oldparams - np.dot(np.linalg.inv(H),
# File "C:\Programs\Python25\lib\site-packages\numpy\linalg\linalg.py", line 423, in inv
# return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
# File "C:\Programs\Python25\lib\site-packages\numpy\linalg\linalg.py", line 306, in solve
# raise LinAlgError, 'Singular matrix'
#numpy.linalg.linalg.LinAlgError: Singular matrix
#
#>>> mod_par.fixed_params
#array([ NaN, 10., NaN])
#>>> mod_par.start_params
#array([ 1., 2.])
#>>> np.source(stats.pareto.fit_fr)
#In file: c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\sandbox\stats\distributions_patch.py
#
#def fit_fr(self, data, *args, **kwds):
# '''estimate distribution parameters by MLE taking some parameters as fixed
#
# Parameters
# ----------
# data : array, 1d
# data for which the distribution parameters are estimated,
# args : list ? check
# starting values for optimization
# kwds :
#
# - 'frozen' : array_like
# values for frozen distribution parameters and, for elements with
# np.nan, the corresponding parameter will be estimated
#
# Returns
# -------
# argest : array
# estimated parameters
#
#
# Examples
# --------
# generate random sample
# >>> np.random.seed(12345)
# >>> x = stats.gamma.rvs(2.5, loc=0, scale=1.2, size=200)
#
# estimate all parameters
# >>> stats.gamma.fit(x)
# array([ 2.0243194 , 0.20395655, 1.44411371])
# >>> stats.gamma.fit_fr(x, frozen=[np.nan, np.nan, np.nan])
# array([ 2.0243194 , 0.20395655, 1.44411371])
#
# keep loc fixed, estimate shape and scale parameters
# >>> stats.gamma.fit_fr(x, frozen=[np.nan, 0.0, np.nan])
# array([ 2.45603985, 1.27333105])
#
# keep loc and scale fixed, estimate shape parameter
# >>> stats.gamma.fit_fr(x, frozen=[np.nan, 0.0, 1.0])
# array([ 3.00048828])
# >>> stats.gamma.fit_fr(x, frozen=[np.nan, 0.0, 1.2])
# array([ 2.57792969])
#
# estimate only scale parameter for fixed shape and loc
# >>> stats.gamma.fit_fr(x, frozen=[2.5, 0.0, np.nan])
# array([ 1.25087891])
#
# Notes
# -----
# self is an instance of a distribution class. This can be attached to
# scipy.stats.distributions.rv_continuous
#
# *Todo*
#
# * check if docstring is correct
# * more input checking, args is list ? might also apply to current fit method
#
# '''
# loc0, scale0 = map(kwds.get, ['loc', 'scale'],[0.0, 1.0])
# Narg = len(args)
#
# if Narg == 0 and hasattr(self, '_fitstart'):
# x0 = self._fitstart(data)
# elif Narg > self.numargs:
# raise ValueError, "Too many input arguments."
# else:
# args += (1.0,)*(self.numargs-Narg)
# # location and scale are at the end
# x0 = args + (loc0, scale0)
#
# if 'frozen' in kwds:
# frmask = np.array(kwds['frozen'])
# if len(frmask) != self.numargs+2:
# raise ValueError, "Incorrect number of frozen arguments."
# else:
# # keep starting values for not frozen parameters
# x0 = np.array(x0)[np.isnan(frmask)]
# else:
# frmask = None
#
# #print x0
# #print frmask
# return optimize.fmin(self.nnlf_fr, x0,
# args=(np.ravel(data), frmask), disp=0)
#
#>>> stats.pareto.fit_fr(y, 1., frozen=[np.nan, loc, np.nan])
#Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
#NameError: name 'loc' is not defined
#
#>>> stats.pareto.fit_fr(y, 1., frozen=[np.nan, 10., np.nan])
#array([ 1.0346268 , 2.00184808])
#>>> stats.pareto.fit_fr(y, (1.,2), frozen=[np.nan, 10., np.nan])
#Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\sandbox\stats\distributions_patch.py", line 273, in fit_fr
# x0 = np.array(x0)[np.isnan(frmask)]
#ValueError: setting an array element with a sequence.
#
#>>> stats.pareto.fit_fr(y, [1.,2], frozen=[np.nan, 10., np.nan])
#Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\sandbox\stats\distributions_patch.py", line 273, in fit_fr
# x0 = np.array(x0)[np.isnan(frmask)]
#ValueError: setting an array element with a sequence.
#
#>>> stats.pareto.fit_fr(y, frozen=[np.nan, 10., np.nan])
#array([ 1.03463526, 2.00184809])
#>>> stats.pareto.pdf(y, 1.03463526, 10, 2.00184809).sum()
#173.33947284555239
#>>> mod_par(1.03463526, 10, 2.00184809)
#Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
#TypeError: 'MyPareto' object is not callable
#
#>>> mod_par.loglike(1.03463526, 10, 2.00184809)
#Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
#TypeError: loglike() takes exactly 2 arguments (4 given)
#
#>>> mod_par.loglike((1.03463526, 10, 2.00184809))
#-962.21623668859741
#>>> np.log(stats.pareto.pdf(y, 1.03463526, 10, 2.00184809)).sum()
#-inf
#>>> np.log(stats.pareto.pdf(y, 1.03463526, 9, 2.00184809)).sum()
#-3074.5947476137271
#>>> np.log(stats.pareto.pdf(y, 1.03463526, 10., 2.00184809)).sum()
#-inf
#>>> np.log(stats.pareto.pdf(y, 1.03463526, 9.9, 2.00184809)).sum()
#-2677.3867091635661
#>>> y.min()
#12.001848089426717
#>>> np.log(stats.pareto.pdf(y, 1.03463526, loc=9.9, scale=2.00184809)).sum()
#-2677.3867091635661
#>>> np.log(stats.pareto.pdf(y, 1.03463526, loc=10., scale=2.00184809)).sum()
#-inf
#>>> stats.pareto.logpdf(y, 1.03463526, loc=10., scale=2.00184809).sum()
#-inf
#>>> stats.pareto.logpdf(y, 1.03463526, loc=9.99, scale=2.00184809).sum()
#-2631.6120098202355
#>>> mod_par.loglike((1.03463526, 9.99, 2.00184809))
#-963.2513896113644
#>>> maxabs(y, mod_par.endog)
#0.0
#>>> np.source(stats.pareto.logpdf)
#In file: C:\Josef\_progs\Subversion\scipy-trunk_after\trunk\dist\scipy-0.9.0.dev6579.win32\Programs\Python25\Lib\site-packages\scipy\stats\distributions.py
#
# def logpdf(self, x, *args, **kwds):
# """
# Log of the probability density function at x of the given RV.
#
# This uses more numerically accurate calculation if available.
#
# Parameters
# ----------
# x : array-like
# quantiles
# arg1, arg2, arg3,... : array-like
# The shape parameter(s) for the distribution (see docstring of the
# instance object for more information)
# loc : array-like, optional
# location parameter (default=0)
# scale : array-like, optional
# scale parameter (default=1)
#
# Returns
# -------
# logpdf : array-like
# Log of the probability density function evaluated at x
#
# """
# loc,scale=map(kwds.get,['loc','scale'])
# args, loc, scale = self._fix_loc_scale(args, loc, scale)
# x,loc,scale = map(arr,(x,loc,scale))
# args = tuple(map(arr,args))
# x = arr((x-loc)*1.0/scale)
# cond0 = self._argcheck(*args) & (scale > 0)
# cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)
# cond = cond0 & cond1
# output = empty(shape(cond),'d')
# output.fill(NINF)
# putmask(output,(1-cond0)*array(cond1,bool),self.badvalue)
# goodargs = argsreduce(cond, *((x,)+args+(scale,)))
# scale, goodargs = goodargs[-1], goodargs[:-1]
# place(output,cond,self._logpdf(*goodargs) - log(scale))
# if output.ndim == 0:
# return output[()]
# return output
#
#>>> np.source(stats.pareto._logpdf)
#In file: C:\Josef\_progs\Subversion\scipy-trunk_after\trunk\dist\scipy-0.9.0.dev6579.win32\Programs\Python25\Lib\site-packages\scipy\stats\distributions.py
#
# def _logpdf(self, x, *args):
# return log(self._pdf(x, *args))
#
#>>> np.source(stats.pareto._pdf)
#In file: C:\Josef\_progs\Subversion\scipy-trunk_after\trunk\dist\scipy-0.9.0.dev6579.win32\Programs\Python25\Lib\site-packages\scipy\stats\distributions.py
#
# def _pdf(self, x, b):
# return b * x**(-b-1)
#
#>>> stats.pareto.a
#1.0
#>>> (1-loc)/scale
#Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
#NameError: name 'loc' is not defined
#
#>>> b, loc, scale = (1.03463526, 9.99, 2.00184809)
#>>> (1-loc)/scale
#-4.4908502522786327
#>>> (x-loc)/scale == 1
#Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
#NameError: name 'x' is not defined
#
#>>> (lb-loc)/scale == 1
#Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
#NameError: name 'lb' is not defined
#
#>>> lb = scale + loc
#>>> lb
#11.991848090000001
#>>> (lb-loc)/scale == 1
#False
#>>> (lb-loc)/scale
#1.0000000000000004
#>>>
#'''
'''
repr(start_params) array([ 1., 10., 2.])
Optimization terminated successfully.
Current function value: 2626.436870
Iterations: 102
Function evaluations: 210
Optimization terminated successfully.
Current function value: 0.016555
Iterations: 16
Function evaluations: 35
[ 1.03482659 10.00737039 1.9944777 ]
(1.0596088578825995, 9.9043376069230007, 2.0975104813987118)
>>> 9.9043376069230007 + 2.0975104813987118
12.001848088321712
>>> y.min()
12.001848089426717
'''
'''
C:\Programs\Python25\lib\site-packages\matplotlib-0.99.1-py2.5-win32.egg\matplotlib\rcsetup.py:117: UserWarning: rcParams key "numerix" is obsolete and has no effect;
please delete it from your matplotlibrc file
warnings.warn('rcParams key "numerix" is obsolete and has no effect;\n'
0.0686702747648
0.0164150896481
0.128121386381
[ 0.10370428 0.09921315 0.09676723 0.10457413 0.10201618 0.89964496]
(array(0.0), array(1.4552599885729829), array(0.0), array(2.5072143354058221))
(array(0.0), array(1.6666666666666667), array(0.0), array(6.0))
repr(start_params) array([ 0.10370428, 0.09921315, 0.09676723, 0.10457413, 0.10201618,
0.89964496, 6.39309417, 0.12812139])
Optimization terminated successfully.
Current function value: -679.951339
Iterations: 398
Function evaluations: 609
estimation results t-dist
[ 0.10400826 0.10111893 0.09725133 0.10507788 0.10086163 0.8996041
4.72131318 0.09825355]
[ 0.00365493 0.00356149 0.00349329 0.00362333 0.003732 0.00362716
0.72329352 0.00388832]
repr(start_params) array([ 0.10400826, 0.10111893, 0.09725133, 0.10507788, 0.10086163,
0.8996041 , 4.72131318, 0.09825355])
Optimization terminated successfully.
Current function value: -679.950443
Iterations 3
using Newton
[ 0.10395383 0.10106762 0.09720665 0.10503384 0.10080599 0.89954546
4.70918964 0.09815885]
[ 0.00365299 0.00355968 0.00349147 0.00362166 0.00373015 0.00362533
0.7201488 0.00388437]
()
[ 0.09992709 0.09786601 0.09387356 0.10229919 0.09756623 0.85466272
4.60459182 0.09661986]
[ 0.11308292 0.10828401 0.1028508 0.11268895 0.10934726 0.94462721
7.15412655 0.13452746]
repr(start_params) array([ 1., 9., 2.])
Optimization terminated successfully.
Current function value: 2636.129089
Iterations: 147
Function evaluations: 279
Optimization terminated successfully.
Current function value: 0.016555
Iterations: 16
Function evaluations: 35
[ 0.84856418 10.2197801 1.78206799]
(1.0596088578825995, 9.9043376069230007, 2.0975104813987118)
12.0018480891 12.0018480883 12.0018480894
repr(start_params) array([ 1., 2.])
Warning: Desired error not necessarily achieveddue to precision loss
Current function value: 2643.549907
Iterations: 2
Function evaluations: 13
Gradient evaluations: 12
>>> res_parks2 = mod_par.fit_ks()
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2642.465273
Iterations: 92
Function evaluations: 172
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2636.639863
Iterations: 73
Function evaluations: 136
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2631.568778
Iterations: 75
Function evaluations: 133
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2627.821044
Iterations: 75
Function evaluations: 135
repr(start_params) array([ 1., 2.])
Warning: Maximum number of function evaluations has been exceeded.
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2631.568778
Iterations: 75
Function evaluations: 133
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.431596
Iterations: 58
Function evaluations: 109
repr(start_params) array([ 1., 2.])
Warning: Maximum number of function evaluations has been exceeded.
repr(start_params) array([ 1., 2.])
Warning: Maximum number of function evaluations has been exceeded.
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.737426
Iterations: 60
Function evaluations: 109
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2627.821044
Iterations: 75
Function evaluations: 135
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.471666
Iterations: 48
Function evaluations: 94
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2627.196314
Iterations: 66
Function evaluations: 119
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.578538
Iterations: 56
Function evaluations: 103
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.471666
Iterations: 48
Function evaluations: 94
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.651702
Iterations: 67
Function evaluations: 122
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.737426
Iterations: 60
Function evaluations: 109
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.613505
Iterations: 73
Function evaluations: 141
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.578538
Iterations: 56
Function evaluations: 103
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.632218
Iterations: 64
Function evaluations: 119
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.651702
Iterations: 67
Function evaluations: 122
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.622789
Iterations: 63
Function evaluations: 114
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.613505
Iterations: 73
Function evaluations: 141
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.627465
Iterations: 59
Function evaluations: 109
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.632218
Iterations: 64
Function evaluations: 119
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.625104
Iterations: 59
Function evaluations: 108
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.629829
Iterations: 66
Function evaluations: 118
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.632218
Iterations: 64
Function evaluations: 119
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.632218
Iterations: 64
Function evaluations: 119
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.628642
Iterations: 67
Function evaluations: 122
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.631023
Iterations: 68
Function evaluations: 129
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.630430
Iterations: 57
Function evaluations: 108
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.629598
Iterations: 60
Function evaluations: 112
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.630430
Iterations: 57
Function evaluations: 108
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.630130
Iterations: 65
Function evaluations: 122
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.629536
Iterations: 62
Function evaluations: 111
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.630130
Iterations: 65
Function evaluations: 122
repr(start_params) array([ 1., 2.])
Optimization terminated successfully.
Current function value: 2626.629984
Iterations: 67
Function evaluations: 123
Optimization terminated successfully.
Current function value: 0.016560
Iterations: 18
Function evaluations: 38
>>> res_parks2
(1.0592352626264809, 9.9051580457572399, 2.0966900385041591)
>>> res_parks
(1.0596088578825995, 9.9043376069230007, 2.0975104813987118)
>>> res_par.params
array([ 0.84856418, 10.2197801 , 1.78206799])
>>> np.sqrt(np.diag(mod_par.hessian(res_par.params)))
array([ NaN, NaN, NaN])
>>> mod_par.hessian(res_par.params
... )
array([[ NaN, NaN, NaN],
[ NaN, NaN, NaN],
[ NaN, NaN, NaN]])
>>> mod_par.hessian(res_parks)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\model.py", line 533, in hessian
return approx_hess(params, self.loglike)[0] #need options for hess (epsilon)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\sandbox\regression\numdiff.py", line 118, in approx_hess
xh = x + h
TypeError: can only concatenate tuple (not "float") to tuple
>>> mod_par.hessian(np.array(res_parks))
array([[ NaN, NaN, NaN],
[ NaN, NaN, NaN],
[ NaN, NaN, NaN]])
>>> mod_par.fixed_params
array([ NaN, 9.90510677, NaN])
>>> mod_par.fixed_params=None
>>> mod_par.hessian(np.array(res_parks))
array([[-890.48553491, NaN, NaN],
[ NaN, NaN, NaN],
[ NaN, NaN, NaN]])
>>> mod_par.loglike(np.array(res_parks))
-2626.6322080820569
>>> mod_par.bsejac
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\decorators.py", line 85, in __get__
_cachedval = self.fget(obj)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\model.py", line 592, in bsejac
return np.sqrt(np.diag(self.covjac))
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\decorators.py", line 85, in __get__
_cachedval = self.fget(obj)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\model.py", line 574, in covjac
jacv = self.jacv
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\decorators.py", line 85, in __get__
_cachedval = self.fget(obj)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\model.py", line 557, in jacv
return self.jac(self._results.params)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\model.py", line 530, in jac
return approx_fprime1(params, self.loglikeobs, **kwds)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\sandbox\regression\numdiff.py", line 80, in approx_fprime1
f0 = f(*((xk,)+args))
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\model.py", line 522, in loglikeobs
return -self.nloglikeobs(params)
File "C:\Josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\examples\ex_generic_mle_tdist.py", line 184, in nloglikeobs
scale = params[2]
IndexError: index out of bounds
>>> hasattr(self, 'start_params')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'self' is not defined
>>> hasattr(mod_par, 'start_params')
True
>>> mod_par.start_params
array([ 1., 2.])
>>> stats.pareto.stats(1., 9., 2., moments='mvsk')
(array(1.#INF), array(1.#INF), array(1.#QNAN), array(1.#QNAN))
>>> stats.pareto.stats(1., 8., 2., moments='mvsk')
(array(1.#INF), array(1.#INF), array(1.#QNAN), array(1.#QNAN))
>>> stats.pareto.stats(1., 8., 1., moments='mvsk')
(array(1.#INF), array(1.#INF), array(1.#QNAN), array(1.#QNAN))
>>> stats.pareto.stats(1., moments='mvsk')
(array(1.#INF), array(1.#INF), array(1.#QNAN), array(1.#QNAN))
>>> stats.pareto.stats(0.5., moments='mvsk')
File "<stdin>", line 1
stats.pareto.stats(0.5., moments='mvsk')
^
SyntaxError: invalid syntax
>>> stats.pareto.stats(0.5, moments='mvsk')
(array(1.#INF), array(1.#INF), array(1.#QNAN), array(1.#QNAN))
>>> stats.pareto.stats(2, moments='mvsk')
(array(2.0), array(1.#INF), array(1.#QNAN), array(1.#QNAN))
>>> stats.pareto.stats(10, moments='mvsk')
(array(1.1111111111111112), array(0.015432098765432098), array(2.8110568859997356), array(14.828571428571429))
>>> stats.pareto.rvs(10, size=10)
array([ 1.07716265, 1.18977526, 1.07093 , 1.05157081, 1.15991232,
1.31015589, 1.06675107, 1.08082475, 1.19501243, 1.34967158])
>>> r = stats.pareto.rvs(10, size=1000)
>>> plt
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'plt' is not defined
>>> import matplotlib.pyplot as plt
>>> plt.hist(r)
(array([962, 32, 3, 2, 0, 0, 0, 0, 0, 1]), array([ 1.00013046, 1.3968991 , 1.79366773, 2.19043637, 2.587205 ,
2.98397364, 3.38074227, 3.77751091, 4.17427955, 4.57104818,
4.96781682]), <a list of 10 Patch objects>)
>>> plt.show()
'''
|
|
from __future__ import absolute_import
import six
__all__ = [
'HTML5TreeBuilder',
]
import warnings
from bs4.builder import (
PERMISSIVE,
HTML,
HTML_5,
HTMLTreeBuilder,
)
from bs4.element import NamespacedAttribute
import html5lib
from html5lib.constants import namespaces
from bs4.element import (
Comment,
Doctype,
NavigableString,
Tag,
)
class HTML5TreeBuilder(HTMLTreeBuilder):
"""Use html5lib to build a tree."""
features = ['html5lib', PERMISSIVE, HTML_5, HTML]
def prepare_markup(self, markup, user_specified_encoding):
# Store the user-specified encoding for use later on.
self.user_specified_encoding = user_specified_encoding
yield (markup, None, None, False)
# These methods are defined by Beautiful Soup.
def feed(self, markup):
if self.soup.parse_only is not None:
warnings.warn("You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.")
parser = html5lib.HTMLParser(tree=self.create_treebuilder)
doc = parser.parse(markup, encoding=self.user_specified_encoding)
# Set the character encoding detected by the tokenizer.
if isinstance(markup, six.text_type):
# We need to special-case this because html5lib sets
# charEncoding to UTF-8 if it gets Unicode input.
doc.original_encoding = None
else:
doc.original_encoding = parser.tokenizer.stream.charEncoding[0]
def create_treebuilder(self, namespaceHTMLElements):
self.underlying_builder = TreeBuilderForHtml5lib(
self.soup, namespaceHTMLElements)
return self.underlying_builder
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<html><head></head><body>%s</body></html>' % fragment
class TreeBuilderForHtml5lib(html5lib.treebuilders._base.TreeBuilder):
def __init__(self, soup, namespaceHTMLElements):
self.soup = soup
super(TreeBuilderForHtml5lib, self).__init__(namespaceHTMLElements)
def documentClass(self):
self.soup.reset()
return Element(self.soup, self.soup, None)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = Doctype.for_name_and_ids(name, publicId, systemId)
self.soup.object_was_parsed(doctype)
def elementClass(self, name, namespace):
tag = self.soup.new_tag(name, namespace)
return Element(tag, self.soup, namespace)
def commentClass(self, data):
return TextNode(Comment(data), self.soup)
def fragmentClass(self):
self.soup = BeautifulSoup("")
self.soup.name = "[document_fragment]"
return Element(self.soup, self.soup, None)
def appendChild(self, node):
# XXX This code is not covered by the BS4 tests.
self.soup.append(node.element)
def getDocument(self):
return self.soup
def getFragment(self):
return html5lib.treebuilders._base.TreeBuilder.getFragment(self).element
class AttrList(object):
def __init__(self, element):
self.element = element
self.attrs = dict(self.element.attrs)
def __iter__(self):
return list(self.attrs.items()).__iter__()
def __setitem__(self, name, value):
"set attr", name, value
self.element[name] = value
def items(self):
return list(self.attrs.items())
def keys(self):
return list(self.attrs.keys())
def __len__(self):
return len(self.attrs)
def __getitem__(self, name):
return self.attrs[name]
def __contains__(self, name):
return name in list(self.attrs.keys())
class Element(html5lib.treebuilders._base.Node):
def __init__(self, element, soup, namespace):
html5lib.treebuilders._base.Node.__init__(self, element.name)
self.element = element
self.soup = soup
self.namespace = namespace
def appendChild(self, node):
string_child = child = None
if isinstance(node, six.string_types):
# Some other piece of code decided to pass in a string
# instead of creating a TextElement object to contain the
# string.
string_child = child = node
elif isinstance(node, Tag):
# Some other piece of code decided to pass in a Tag
# instead of creating an Element object to contain the
# Tag.
child = node
elif node.element.__class__ == NavigableString:
string_child = child = node.element
else:
child = node.element
if not isinstance(child, six.string_types) and child.parent is not None:
node.element.extract()
if (string_child and self.element.contents
and self.element.contents[-1].__class__ == NavigableString):
# We are appending a string onto another string.
# TODO This has O(n^2) performance, for input like
# "a</a>a</a>a</a>..."
old_element = self.element.contents[-1]
new_element = self.soup.new_string(old_element + string_child)
old_element.replace_with(new_element)
self.soup._most_recent_element = new_element
else:
if isinstance(node, six.string_types):
# Create a brand new NavigableString from this string.
child = self.soup.new_string(node)
# Tell Beautiful Soup to act as if it parsed this element
# immediately after the parent's last descendant. (Or
# immediately after the parent, if it has no children.)
if self.element.contents:
most_recent_element = self.element._last_descendant(False)
else:
most_recent_element = self.element
self.soup.object_was_parsed(
child, parent=self.element,
most_recent_element=most_recent_element)
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes is not None and len(attributes) > 0:
converted_attributes = []
for name, value in list(attributes.items()):
if isinstance(name, tuple):
new_name = NamespacedAttribute(*name)
del attributes[name]
attributes[new_name] = value
self.soup.builder._replace_cdata_list_attribute_values(
self.name, attributes)
for name, value in attributes.items():
self.element[name] = value
# The attributes may contain variables that need substitution.
# Call set_up_substitutions manually.
#
# The Tag constructor called this method when the Tag was created,
# but we just set/changed the attributes, so call it again.
self.soup.builder.set_up_substitutions(self.element)
attributes = property(getAttributes, setAttributes)
def insertText(self, data, insertBefore=None):
if insertBefore:
text = TextNode(self.soup.new_string(data), self.soup)
self.insertBefore(data, insertBefore)
else:
self.appendChild(data)
def insertBefore(self, node, refNode):
index = self.element.index(refNode.element)
if (node.element.__class__ == NavigableString and self.element.contents
and self.element.contents[index-1].__class__ == NavigableString):
# (See comments in appendChild)
old_node = self.element.contents[index-1]
new_str = self.soup.new_string(old_node + node.element)
old_node.replace_with(new_str)
else:
self.element.insert(index, node.element)
node.parent = self
def removeChild(self, node):
node.element.extract()
def reparentChildren(self, new_parent):
"""Move all of this tag's children into another tag."""
element = self.element
new_parent_element = new_parent.element
# Determine what this tag's next_element will be once all the children
# are removed.
final_next_element = element.next_sibling
new_parents_last_descendant = new_parent_element._last_descendant(False, False)
if len(new_parent_element.contents) > 0:
# The new parent already contains children. We will be
# appending this tag's children to the end.
new_parents_last_child = new_parent_element.contents[-1]
new_parents_last_descendant_next_element = new_parents_last_descendant.next_element
else:
# The new parent contains no children.
new_parents_last_child = None
new_parents_last_descendant_next_element = new_parent_element.next_element
to_append = element.contents
append_after = new_parent.element.contents
if len(to_append) > 0:
# Set the first child's previous_element and previous_sibling
# to elements within the new parent
first_child = to_append[0]
first_child.previous_element = new_parents_last_descendant
first_child.previous_sibling = new_parents_last_child
# Fix the last child's next_element and next_sibling
last_child = to_append[-1]
last_child.next_element = new_parents_last_descendant_next_element
last_child.next_sibling = None
for child in to_append:
child.parent = new_parent_element
new_parent_element.contents.append(child)
# Now that this element has no children, change its .next_element.
element.contents = []
element.next_element = final_next_element
def cloneNode(self):
tag = self.soup.new_tag(self.element.name, self.namespace)
node = Element(tag, self.soup, self.namespace)
for key,value in self.attributes:
node.attributes[key] = value
return node
def hasContent(self):
return self.element.contents
def getNameTuple(self):
if self.namespace == None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TextNode(Element):
def __init__(self, element, soup):
html5lib.treebuilders._base.Node.__init__(self, None)
self.element = element
self.soup = soup
def cloneNode(self):
raise NotImplementedError
|
|
# -*- coding: windows-1252 -*-
from arelle.xlwt import BIFFRecords
from arelle.xlwt import Style
from arelle.xlwt.Cell import StrCell, BlankCell, NumberCell, FormulaCell, MulBlankCell, BooleanCell, ErrorCell, \
_get_cells_biff_data_mul
from arelle.xlwt import ExcelFormula
import datetime as dt
try:
from decimal import Decimal
except ImportError:
# Python 2.3: decimal not supported; create dummy Decimal class
class Decimal(object):
pass
class Row(object):
__slots__ = [# private variables
"__idx",
"__parent",
"__parent_wb",
"__cells",
"__min_col_idx",
"__max_col_idx",
"__xf_index",
"__has_default_xf_index",
"__height_in_pixels",
# public variables
"height",
"has_default_height",
"height_mismatch",
"level",
"collapse",
"hidden",
"space_above",
"space_below"]
def __init__(self, rowx, parent_sheet):
if not (isinstance(rowx, int) and 0 <= rowx <= 65535):
raise ValueError("row index (%r) not an int in range(65536)" % rowx)
self.__idx = rowx
self.__parent = parent_sheet
self.__parent_wb = parent_sheet.get_parent()
self.__cells = {}
self.__min_col_idx = 0
self.__max_col_idx = 0
self.__xf_index = 0x0F
self.__has_default_xf_index = 0
self.__height_in_pixels = 0x11
self.height = 0x00FF
self.has_default_height = 0x00
self.height_mismatch = 0
self.level = 0
self.collapse = 0
self.hidden = 0
self.space_above = 0
self.space_below = 0
def __adjust_height(self, style):
twips = style.font.height
points = float(twips)/20.0
# Cell height in pixels can be calcuted by following approx. formula:
# cell height in pixels = font height in points * 83/50 + 2/5
# It works when screen resolution is 96 dpi
pix = int(round(points*83.0/50.0 + 2.0/5.0))
if pix > self.__height_in_pixels:
self.__height_in_pixels = pix
def __adjust_bound_col_idx(self, *args):
for arg in args:
iarg = int(arg)
if not ((0 <= iarg <= 255) and arg == iarg):
raise ValueError("column index (%r) not an int in range(256)" % arg)
sheet = self.__parent
if iarg < self.__min_col_idx:
self.__min_col_idx = iarg
if iarg > self.__max_col_idx:
self.__max_col_idx = iarg
if iarg < sheet.first_used_col:
sheet.first_used_col = iarg
if iarg > sheet.last_used_col:
sheet.last_used_col = iarg
def __excel_date_dt(self, date):
adj = False
if isinstance(date, dt.date):
if self.__parent_wb.dates_1904:
epoch_tuple = (1904, 1, 1)
else:
epoch_tuple = (1899, 12, 31)
adj = True
if isinstance(date, dt.datetime):
epoch = dt.datetime(*epoch_tuple)
else:
epoch = dt.date(*epoch_tuple)
else: # it's a datetime.time instance
date = dt.datetime.combine(dt.datetime(1900, 1, 1), date)
epoch = dt.datetime(1900, 1, 1)
delta = date - epoch
xldate = delta.days + delta.seconds / 86400.0
# Add a day for Excel's missing leap day in 1900
if adj and xldate > 59:
xldate += 1
return xldate
def get_height_in_pixels(self):
return self.__height_in_pixels
def set_style(self, style):
self.__adjust_height(style)
self.__xf_index = self.__parent_wb.add_style(style)
self.__has_default_xf_index = 1
def get_xf_index(self):
return self.__xf_index
def get_cells_count(self):
return len(self.__cells)
def get_min_col(self):
return self.__min_col_idx
def get_max_col(self):
return self.__max_col_idx
def get_row_biff_data(self):
height_options = (self.height & 0x07FFF)
height_options |= (self.has_default_height & 0x01) << 15
options = (self.level & 0x07) << 0
options |= (self.collapse & 0x01) << 4
options |= (self.hidden & 0x01) << 5
options |= (self.height_mismatch & 0x01) << 6
options |= (self.__has_default_xf_index & 0x01) << 7
options |= (0x01 & 0x01) << 8
options |= (self.__xf_index & 0x0FFF) << 16
options |= (self.space_above & 1) << 28
options |= (self.space_below & 1) << 29
return BIFFRecords.RowRecord(self.__idx, self.__min_col_idx,
self.__max_col_idx, height_options, options).get()
def insert_cell(self, col_index, cell_obj):
if col_index in self.__cells:
if not self.__parent._cell_overwrite_ok:
msg = "Attempt to overwrite cell: sheetname=%r rowx=%d colx=%d" \
% (self.__parent.name, self.__idx, col_index)
raise Exception(msg)
prev_cell_obj = self.__cells[col_index]
sst_idx = getattr(prev_cell_obj, 'sst_idx', None)
if sst_idx is not None:
self.__parent_wb.del_str(sst_idx)
self.__cells[col_index] = cell_obj
def insert_mulcells(self, colx1, colx2, cell_obj):
self.insert_cell(colx1, cell_obj)
for col_index in range(colx1+1, colx2+1):
self.insert_cell(col_index, None)
def get_cells_biff_data(self):
cell_items = [item for item in self.__cells.items() if item[1] is not None]
cell_items.sort() # in column order
return _get_cells_biff_data_mul(self.__idx, cell_items)
# previously:
# return ''.join([cell.get_biff_data() for colx, cell in cell_items])
def get_index(self):
return self.__idx
def set_cell_text(self, colx, value, style=Style.default_style):
self.__adjust_height(style)
self.__adjust_bound_col_idx(colx)
xf_index = self.__parent_wb.add_style(style)
self.insert_cell(colx, StrCell(self.__idx, colx, xf_index, self.__parent_wb.add_str(value)))
def set_cell_blank(self, colx, style=Style.default_style):
self.__adjust_height(style)
self.__adjust_bound_col_idx(colx)
xf_index = self.__parent_wb.add_style(style)
self.insert_cell(colx, BlankCell(self.__idx, colx, xf_index))
def set_cell_mulblanks(self, first_colx, last_colx, style=Style.default_style):
assert 0 <= first_colx <= last_colx <= 255
self.__adjust_height(style)
self.__adjust_bound_col_idx(first_colx, last_colx)
xf_index = self.__parent_wb.add_style(style)
# ncols = last_colx - first_colx + 1
self.insert_mulcells(first_colx, last_colx, MulBlankCell(self.__idx, first_colx, last_colx, xf_index))
def set_cell_number(self, colx, number, style=Style.default_style):
self.__adjust_height(style)
self.__adjust_bound_col_idx(colx)
xf_index = self.__parent_wb.add_style(style)
self.insert_cell(colx, NumberCell(self.__idx, colx, xf_index, number))
def set_cell_date(self, colx, datetime_obj, style=Style.default_style):
self.__adjust_height(style)
self.__adjust_bound_col_idx(colx)
xf_index = self.__parent_wb.add_style(style)
self.insert_cell(colx,
NumberCell(self.__idx, colx, xf_index, self.__excel_date_dt(datetime_obj)))
def set_cell_formula(self, colx, formula, style=Style.default_style, calc_flags=0):
self.__adjust_height(style)
self.__adjust_bound_col_idx(colx)
xf_index = self.__parent_wb.add_style(style)
self.__parent_wb.add_sheet_reference(formula)
self.insert_cell(colx, FormulaCell(self.__idx, colx, xf_index, formula, calc_flags=0))
def set_cell_boolean(self, colx, value, style=Style.default_style):
self.__adjust_height(style)
self.__adjust_bound_col_idx(colx)
xf_index = self.__parent_wb.add_style(style)
self.insert_cell(colx, BooleanCell(self.__idx, colx, xf_index, bool(value)))
def set_cell_error(self, colx, error_string_or_code, style=Style.default_style):
self.__adjust_height(style)
self.__adjust_bound_col_idx(colx)
xf_index = self.__parent_wb.add_style(style)
self.insert_cell(colx, ErrorCell(self.__idx, colx, xf_index, error_string_or_code))
def write(self, col, label, style=Style.default_style):
self.__adjust_height(style)
self.__adjust_bound_col_idx(col)
style_index = self.__parent_wb.add_style(style)
if isinstance(label, str):
if len(label) > 0:
self.insert_cell(col,
StrCell(self.__idx, col, style_index, self.__parent_wb.add_str(label))
)
else:
self.insert_cell(col, BlankCell(self.__idx, col, style_index))
elif isinstance(label, bool): # bool is subclass of int; test bool first
self.insert_cell(col, BooleanCell(self.__idx, col, style_index, label))
elif isinstance(label, (float, int, Decimal)):
self.insert_cell(col, NumberCell(self.__idx, col, style_index, label))
elif isinstance(label, (dt.datetime, dt.date, dt.time)):
date_number = self.__excel_date_dt(label)
self.insert_cell(col, NumberCell(self.__idx, col, style_index, date_number))
elif label is None:
self.insert_cell(col, BlankCell(self.__idx, col, style_index))
elif isinstance(label, ExcelFormula.Formula):
self.__parent_wb.add_sheet_reference(label)
self.insert_cell(col, FormulaCell(self.__idx, col, style_index, label))
else:
raise Exception("Unexpected data type %r" % type(label))
write_blanks = set_cell_mulblanks
|
|
import sys
import unittest
from ledis import Ledis
from ledis.client import Token
from walrus import *
from walrus.containers import chainable_method
from walrus.tusks.helpers import TestHelper
class Scannable(object):
def _scan(self, cmd, match=None, count=None, ordering=None, limit=None):
parts = [self.key, '']
if match:
parts.extend([Token('MATCH'), match])
if count:
parts.extend([Token('COUNT'), count])
if ordering:
parts.append(Token(ordering.upper()))
return self._execute_scan(self.database, cmd, parts, limit)
def _execute_scan(self, database, cmd, parts, limit=None):
idx = 0
while True:
cursor, rows = database.execute_command(cmd, *parts)
for row in rows:
idx += 1
if limit and idx > limit:
cursor = None
break
yield row
if cursor:
parts[1] = cursor
else:
break
class Sortable(object):
def _sort(self, cmd, pattern=None, limit=None, offset=None,
get_pattern=None, ordering=None, alpha=True, store=None):
parts = [self.key]
def add_kw(kw, param):
if param is not None:
parts.extend([Token(kw), param])
add_kw('BY', pattern)
if limit or offset:
offset = offset or 0
limit = limit or 'Inf'
parts.extend([Token('LIMIT'), offset, limit])
add_kw('GET', get_pattern)
if ordering:
parts.append(Token(ordering))
if alpha:
parts.append(Token('ALPHA'))
add_kw('STORE', store)
return self.database.execute_command(cmd, *parts)
class LedisHash(Scannable, Hash):
@chainable_method
def clear(self):
self.database.hclear(self.key)
@chainable_method
def expire(self, ttl=None):
if ttl is not None:
self.database.hexpire(self.key, ttl)
else:
self.database.hpersist(self.key)
def __iter__(self):
return self._scan('XHSCAN')
def scan(self, match=None, count=None, ordering=None, limit=None):
if limit is not None:
limit *= 2 # Hashes yield 2 values.
return self._scan('XHSCAN', match, count, ordering, limit)
class LedisList(Sortable, List):
@chainable_method
def clear(self):
self.database.lclear(self.key)
def __setitem__(self, idx, value):
raise TypeError('Ledis does not support setting values by index.')
@chainable_method
def expire(self, ttl=None):
if ttl is not None:
self.database.lexpire(self.key, ttl)
else:
self.database.lpersist(self.key)
def sort(self, *args, **kwargs):
return self._sort('XLSORT', *args, **kwargs)
class LedisSet(Scannable, Sortable, Set):
@chainable_method
def clear(self):
self.database.sclear(self.key)
@chainable_method
def expire(self, ttl=None):
if ttl is not None:
self.database.sexpire(self.key, ttl)
else:
self.database.spersist(self.key)
def __iter__(self):
return self._scan('XSSCAN')
def scan(self, match=None, count=None, ordering=None, limit=None):
return self._scan('XSSCAN', match, count, ordering, limit)
def sort(self, *args, **kwargs):
return self._sort('XSSORT', *args, **kwargs)
class LedisZSet(Scannable, Sortable, ZSet):
@chainable_method
def clear(self):
self.database.zclear(self.key)
@chainable_method
def expire(self, ttl=None):
if ttl is not None:
self.database.zexpire(self.key, ttl)
else:
self.database.zpersist(self.key)
def __iter__(self):
return self._scan('XZSCAN')
def scan(self, match=None, count=None, ordering=None, limit=None):
if limit:
limit *= 2
return self._scan('XZSCAN', match, count, ordering, limit)
def sort(self, *args, **kwargs):
return self._sort('XZSORT', *args, **kwargs)
class LedisBitSet(Container):
def clear(self):
self.database.delete(self.key)
def __getitem__(self, idx):
return self.database.execute_command('GETBIT', self.key, idx)
def __setitem__(self, idx, value):
return self.database.execute_command('SETBIT', self.key, idx, value)
def pos(self, bit, start=None, end=None):
pieces = ['BITPOS', self.key, bit]
if start or end:
pieces.append(start or 0)
if end:
pieces.append(end)
return self.database.execute_command(*pieces)
def __iand__(self, other):
self.database.execute_command(
'BITOP',
'AND',
self.key,
self.key,
other.key)
return self
def __ior__(self, other):
self.database.execute_command(
'BITOP',
'OR',
self.key,
self.key,
other.key)
return self
def __ixor__(self, other):
self.database.execute_command(
'BITOP',
'XOR',
self.key,
self.key,
other.key)
return self
def __str__(self):
return self.database[self.key]
__unicode__ = __str__
class WalrusLedis(Ledis, Scannable, Walrus):
def __init__(self, *args, **kwargs):
super(WalrusLedis, self).__init__(*args, **kwargs)
def __setitem__(self, key, value):
self.set(key, value)
def setex(self, name, value, time):
return super(WalrusLedis, self).setex(name, time, value)
def zadd(self, key, *args, **kwargs):
if not isinstance(args[0], (int, float)):
reordered = []
for idx in range(0, len(args), 2):
reordered.append(args[idx + 1])
reordered.append(args[idx])
else:
reordered = args
return super(WalrusLedis, self).zadd(key, *reordered, **kwargs)
def hash_exists(self, key):
return self.execute_command('HKEYEXISTS', key)
def __iter__(self):
return self.scan()
def scan(self, *args, **kwargs):
return self._scan('XSCAN', *args, **kwargs)
def _scan(self, cmd, match=None, count=None, ordering=None, limit=None):
parts = ['KV', '']
if match:
parts.extend([Token('MATCH'), match])
if count:
parts.extend([Token('COUNT'), count])
if ordering:
parts.append(Token(ordering.upper()))
return self._execute_scan(self, cmd, parts, limit)
def update(self, values):
return self.mset(values)
def BitSet(self, key):
return LedisBitSet(self, key)
def Hash(self, key):
return LedisHash(self, key)
def List(self, key):
return LedisList(self, key)
def Set(self, key):
return LedisSet(self, key)
def ZSet(self, key):
return LedisZSet(self, key)
class TestWalrusLedis(TestHelper, unittest.TestCase):
def setUp(self):
self.db = WalrusLedis()
self.db.flushall()
def test_scan(self):
values = {
'k1': 'v1',
'k2': 'v2',
'k3': 'v3',
'charlie': 31,
'mickey': 7,
'huey': 5}
self.db.update(values)
results = self.db.scan()
expected = ['charlie', 'huey', 'k1', 'k2', 'k3', 'mickey']
self.assertEqual(list(results), expected)
self.assertEqual([item for item in self.db], expected)
def test_hash_iter(self):
h = self.db.Hash('h_obj')
h.clear()
h.update({'k1': 'v1', 'k2': 'v2', 'k3': 'v3'})
items = [item for item in h]
self.assertEqual(items, ['k1', 'v1', 'k2', 'v2', 'k3', 'v3'])
items = [item for item in h.scan(limit=2)]
self.assertEqual(items, ['k1', 'v1', 'k2', 'v2'])
def test_no_setitem_list(self):
l = self.db.List('l_obj').clear()
l.append('foo')
self.assertRaises(TypeError, lambda: l.__setitem__(0, 'xx'))
def test_set_iter(self):
s = self.db.Set('s_obj').clear()
s.add('charlie', 'huey', 'mickey')
items = [item for item in s]
self.assertEqual(sorted(items), ['charlie', 'huey', 'mickey'])
items = [item for item in s.scan(limit=2, ordering='DESC')]
self.assertEqual(items, ['mickey', 'huey'])
def test_zset_iter(self):
zs = self.db.ZSet('z_obj').clear()
zs.add('zaizee', 3, 'mickey', 6, 'charlie', 31, 'huey', 3, 'nuggie', 0)
items = [item for item in zs]
self.assertEqual(items, [
'charlie', '31',
'huey', '3',
'mickey', '6',
'nuggie', '0',
'zaizee', '3',
])
items = [item for item in zs.scan(limit=3, ordering='DESC')]
self.assertEqual(items, [
'zaizee', '3',
'nuggie', '0',
'mickey', '6',
])
def test_bit_set(self):
b = self.db.BitSet('bitset_obj')
b.clear()
b[0] = 1
b[1] = 1
b[2] = 0
b[3] = 1
self.assertEqual(self.db[b.key], '\xd0')
b[4] = 1
self.assertEqual(self.db[b.key], '\xd8')
self.assertEqual(b[0], 1)
self.assertEqual(b[2], 0)
self.db['b1'] = 'foobar'
self.db['b2'] = 'abcdef'
b = self.db.BitSet('b1')
b2 = self.db.BitSet('b2')
b &= b2
self.assertEqual(self.db[b.key], '`bc`ab')
self.assertEqual(str(b), '`bc`ab')
self.db['b1'] = '\x00\xff\xf0'
self.assertEqual(b.pos(1, 0), 8)
self.assertEqual(b.pos(1, 2), 16)
self.db['b1'] = '\x00\x00\x00'
self.assertEqual(b.pos(1), -1)
def test_sorting(self):
items = ['charlie', 'zaizee', 'mickey', 'huey']
sorted_items = sorted(items)
l = self.db.List('l_obj').clear()
l.extend(items)
results = l.sort()
self.assertEqual(results, sorted_items)
dest = self.db.List('l_dest')
l.sort(ordering='DESC', limit=3, store=dest.key)
results = list(dest)
self.assertEqual(results, ['zaizee', 'mickey', 'huey'])
s = self.db.Set('s_obj').clear()
s.add(*items)
results = s.sort()
self.assertEqual(results, sorted_items)
results = s.sort(ordering='DESC', limit=3)
self.assertEqual(results, ['zaizee', 'mickey', 'huey'])
z = self.db.ZSet('z_obj').clear()
z.add('charlie', 10, 'zaizee', 10, 'mickey', 3, 'huey', 4)
results = z.sort()
self.assertEqual(results, sorted_items)
results = z.sort(ordering='DESC', limit=3)
self.assertEqual(results, ['zaizee', 'mickey', 'huey'])
if __name__ == '__main__':
unittest.main(argv=sys.argv)
|
|
# Authors: Fabian Pedregosa <fabian@fseoane.net>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Nelle Varoquaux <nelle.varoquaux@gmail.com>
# License: BSD 3 clause
import numpy as np
from scipy import interpolate
from scipy.stats import spearmanr
from .base import BaseEstimator, TransformerMixin, RegressorMixin
from .utils import as_float_array, check_array, check_consistent_length
from .utils import deprecated
from .utils.fixes import astype
from ._isotonic import _isotonic_regression, _make_unique
import warnings
import math
__all__ = ['check_increasing', 'isotonic_regression',
'IsotonicRegression']
def check_increasing(x, y):
"""Determine whether y is monotonically correlated with x.
y is found increasing or decreasing with respect to x based on a Spearman
correlation test.
Parameters
----------
x : array-like, shape=(n_samples,)
Training data.
y : array-like, shape=(n_samples,)
Training target.
Returns
-------
`increasing_bool` : boolean
Whether the relationship is increasing or decreasing.
Notes
-----
The Spearman correlation coefficient is estimated from the data, and the
sign of the resulting estimate is used as the result.
In the event that the 95% confidence interval based on Fisher transform
spans zero, a warning is raised.
References
----------
Fisher transformation. Wikipedia.
https://en.wikipedia.org/wiki/Fisher_transformation
"""
# Calculate Spearman rho estimate and set return accordingly.
rho, _ = spearmanr(x, y)
increasing_bool = rho >= 0
# Run Fisher transform to get the rho CI, but handle rho=+/-1
if rho not in [-1.0, 1.0]:
F = 0.5 * math.log((1. + rho) / (1. - rho))
F_se = 1 / math.sqrt(len(x) - 3)
# Use a 95% CI, i.e., +/-1.96 S.E.
# https://en.wikipedia.org/wiki/Fisher_transformation
rho_0 = math.tanh(F - 1.96 * F_se)
rho_1 = math.tanh(F + 1.96 * F_se)
# Warn if the CI spans zero.
if np.sign(rho_0) != np.sign(rho_1):
warnings.warn("Confidence interval of the Spearman "
"correlation coefficient spans zero. "
"Determination of ``increasing`` may be "
"suspect.")
return increasing_bool
def isotonic_regression(y, sample_weight=None, y_min=None, y_max=None,
increasing=True):
"""Solve the isotonic regression model::
min sum w[i] (y[i] - y_[i]) ** 2
subject to y_min = y_[1] <= y_[2] ... <= y_[n] = y_max
where:
- y[i] are inputs (real numbers)
- y_[i] are fitted
- w[i] are optional strictly positive weights (default to 1.0)
Read more in the :ref:`User Guide <isotonic>`.
Parameters
----------
y : iterable of floating-point values
The data.
sample_weight : iterable of floating-point values, optional, default: None
Weights on each point of the regression.
If None, weight is set to 1 (equal weights).
y_min : optional, default: None
If not None, set the lowest value of the fit to y_min.
y_max : optional, default: None
If not None, set the highest value of the fit to y_max.
increasing : boolean, optional, default: True
Whether to compute ``y_`` is increasing (if set to True) or decreasing
(if set to False)
Returns
-------
y_ : list of floating-point values
Isotonic fit of y.
References
----------
"Active set algorithms for isotonic regression; A unifying framework"
by Michael J. Best and Nilotpal Chakravarti, section 3.
"""
y = np.asarray(y, dtype=np.float64)
if sample_weight is None:
sample_weight = np.ones(len(y), dtype=y.dtype)
else:
sample_weight = np.asarray(sample_weight, dtype=np.float64)
if not increasing:
y = y[::-1]
sample_weight = sample_weight[::-1]
solution = np.empty(len(y))
y_ = _isotonic_regression(y, sample_weight, solution)
if not increasing:
y_ = y_[::-1]
if y_min is not None or y_max is not None:
# Older versions of np.clip don't accept None as a bound, so use np.inf
if y_min is None:
y_min = -np.inf
if y_max is None:
y_max = np.inf
np.clip(y_, y_min, y_max, y_)
return y_
class IsotonicRegression(BaseEstimator, TransformerMixin, RegressorMixin):
"""Isotonic regression model.
The isotonic regression optimization problem is defined by::
min sum w_i (y[i] - y_[i]) ** 2
subject to y_[i] <= y_[j] whenever X[i] <= X[j]
and min(y_) = y_min, max(y_) = y_max
where:
- ``y[i]`` are inputs (real numbers)
- ``y_[i]`` are fitted
- ``X`` specifies the order.
If ``X`` is non-decreasing then ``y_`` is non-decreasing.
- ``w[i]`` are optional strictly positive weights (default to 1.0)
Read more in the :ref:`User Guide <isotonic>`.
Parameters
----------
y_min : optional, default: None
If not None, set the lowest value of the fit to y_min.
y_max : optional, default: None
If not None, set the highest value of the fit to y_max.
increasing : boolean or string, optional, default: True
If boolean, whether or not to fit the isotonic regression with y
increasing or decreasing.
The string value "auto" determines whether y should
increase or decrease based on the Spearman correlation estimate's
sign.
out_of_bounds : string, optional, default: "nan"
The ``out_of_bounds`` parameter handles how x-values outside of the
training domain are handled. When set to "nan", predicted y-values
will be NaN. When set to "clip", predicted y-values will be
set to the value corresponding to the nearest train interval endpoint.
When set to "raise", allow ``interp1d`` to throw ValueError.
Attributes
----------
X_min_ : float
Minimum value of input array `X_` for left bound.
X_max_ : float
Maximum value of input array `X_` for right bound.
f_ : function
The stepwise interpolating function that covers the domain `X_`.
Notes
-----
Ties are broken using the secondary method from Leeuw, 1977.
References
----------
Isotonic Median Regression: A Linear Programming Approach
Nilotpal Chakravarti
Mathematics of Operations Research
Vol. 14, No. 2 (May, 1989), pp. 303-308
Isotone Optimization in R : Pool-Adjacent-Violators
Algorithm (PAVA) and Active Set Methods
Leeuw, Hornik, Mair
Journal of Statistical Software 2009
Correctness of Kruskal's algorithms for monotone regression with ties
Leeuw, Psychometrica, 1977
"""
def __init__(self, y_min=None, y_max=None, increasing=True,
out_of_bounds='nan'):
self.y_min = y_min
self.y_max = y_max
self.increasing = increasing
self.out_of_bounds = out_of_bounds
@property
@deprecated("Attribute ``X_`` is deprecated in version 0.18 and will be"
" removed in version 0.20.")
def X_(self):
return self._X_
@X_.setter
def X_(self, value):
self._X_ = value
@X_.deleter
def X_(self):
del self._X_
@property
@deprecated("Attribute ``y_`` is deprecated in version 0.18 and will"
" be removed in version 0.20.")
def y_(self):
return self._y_
@y_.setter
def y_(self, value):
self._y_ = value
@y_.deleter
def y_(self):
del self._y_
def _check_fit_data(self, X, y, sample_weight=None):
if len(X.shape) != 1:
raise ValueError("X should be a 1d array")
def _build_f(self, X, y):
"""Build the f_ interp1d function."""
# Handle the out_of_bounds argument by setting bounds_error
if self.out_of_bounds not in ["raise", "nan", "clip"]:
raise ValueError("The argument ``out_of_bounds`` must be in "
"'nan', 'clip', 'raise'; got {0}"
.format(self.out_of_bounds))
bounds_error = self.out_of_bounds == "raise"
if len(y) == 1:
# single y, constant prediction
self.f_ = lambda x: y.repeat(x.shape)
else:
self.f_ = interpolate.interp1d(X, y, kind='linear',
bounds_error=bounds_error)
def _build_y(self, X, y, sample_weight, trim_duplicates=True):
"""Build the y_ IsotonicRegression."""
check_consistent_length(X, y, sample_weight)
X, y = [check_array(x, ensure_2d=False) for x in [X, y]]
y = as_float_array(y)
self._check_fit_data(X, y, sample_weight)
# Determine increasing if auto-determination requested
if self.increasing == 'auto':
self.increasing_ = check_increasing(X, y)
else:
self.increasing_ = self.increasing
# If sample_weights is passed, removed zero-weight values and clean
# order
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
mask = sample_weight > 0
X, y, sample_weight = X[mask], y[mask], sample_weight[mask]
else:
sample_weight = np.ones(len(y))
order = np.lexsort((y, X))
X, y, sample_weight = [astype(array[order], np.float64, copy=False)
for array in [X, y, sample_weight]]
unique_X, unique_y, unique_sample_weight = _make_unique(
X, y, sample_weight)
# Store _X_ and _y_ to maintain backward compat during the deprecation
# period of X_ and y_
self._X_ = X = unique_X
self._y_ = y = isotonic_regression(unique_y, unique_sample_weight,
self.y_min, self.y_max,
increasing=self.increasing_)
# Handle the left and right bounds on X
self.X_min_, self.X_max_ = np.min(X), np.max(X)
if trim_duplicates:
# Remove unnecessary points for faster prediction
keep_data = np.ones((len(y),), dtype=bool)
# Aside from the 1st and last point, remove points whose y values
# are equal to both the point before and the point after it.
keep_data[1:-1] = np.logical_or(
np.not_equal(y[1:-1], y[:-2]),
np.not_equal(y[1:-1], y[2:])
)
return X[keep_data], y[keep_data]
else:
# The ability to turn off trim_duplicates is only used to it make
# easier to unit test that removing duplicates in y does not have
# any impact the resulting interpolation function (besides
# prediction speed).
return X, y
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape=(n_samples,)
Training data.
y : array-like, shape=(n_samples,)
Training target.
sample_weight : array-like, shape=(n_samples,), optional, default: None
Weights. If set to None, all weights will be set to 1 (equal
weights).
Returns
-------
self : object
Returns an instance of self.
Notes
-----
X is stored for future use, as `transform` needs X to interpolate
new input data.
"""
# Transform y by running the isotonic regression algorithm and
# transform X accordingly.
X, y = self._build_y(X, y, sample_weight)
# It is necessary to store the non-redundant part of the training set
# on the model to make it possible to support model persistence via
# the pickle module as the object built by scipy.interp1d is not
# picklable directly.
self._necessary_X_, self._necessary_y_ = X, y
# Build the interpolation function
self._build_f(X, y)
return self
def transform(self, T):
"""Transform new data by linear interpolation
Parameters
----------
T : array-like, shape=(n_samples,)
Data to transform.
Returns
-------
T_ : array, shape=(n_samples,)
The transformed data
"""
T = as_float_array(T)
if len(T.shape) != 1:
raise ValueError("Isotonic regression input should be a 1d array")
# Handle the out_of_bounds argument by clipping if needed
if self.out_of_bounds not in ["raise", "nan", "clip"]:
raise ValueError("The argument ``out_of_bounds`` must be in "
"'nan', 'clip', 'raise'; got {0}"
.format(self.out_of_bounds))
if self.out_of_bounds == "clip":
T = np.clip(T, self.X_min_, self.X_max_)
return self.f_(T)
def predict(self, T):
"""Predict new data by linear interpolation.
Parameters
----------
T : array-like, shape=(n_samples,)
Data to transform.
Returns
-------
T_ : array, shape=(n_samples,)
Transformed data.
"""
return self.transform(T)
def __getstate__(self):
"""Pickle-protocol - return state of the estimator. """
state = super(IsotonicRegression, self).__getstate__()
# remove interpolation method
state.pop('f_', None)
return state
def __setstate__(self, state):
"""Pickle-protocol - set state of the estimator.
We need to rebuild the interpolation function.
"""
super(IsotonicRegression, self).__setstate__(state)
if hasattr(self, '_necessary_X_') and hasattr(self, '_necessary_y_'):
self._build_f(self._necessary_X_, self._necessary_y_)
|
|
from warnings import catch_warnings
import pytest
import numpy as np
import pandas as pd
from pandas import (Panel, Series, MultiIndex, DataFrame,
Timestamp, Index, date_range)
from pandas.util import testing as tm
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.tests.indexing.common import _mklbl
class TestMultiIndexBasic(object):
def test_iloc_getitem_multiindex2(self):
# TODO(wesm): fix this
pytest.skip('this test was being suppressed, '
'needs to be fixed')
arr = np.random.randn(3, 3)
df = DataFrame(arr, columns=[[2, 2, 4], [6, 8, 10]],
index=[[4, 4, 8], [8, 10, 12]])
rs = df.iloc[2]
xp = Series(arr[2], index=df.columns)
tm.assert_series_equal(rs, xp)
rs = df.iloc[:, 2]
xp = Series(arr[:, 2], index=df.index)
tm.assert_series_equal(rs, xp)
rs = df.iloc[2, 2]
xp = df.values[2, 2]
assert rs == xp
# for multiple items
# GH 5528
rs = df.iloc[[0, 1]]
xp = df.xs(4, drop_level=False)
tm.assert_frame_equal(rs, xp)
tup = zip(*[['a', 'a', 'b', 'b'], ['x', 'y', 'x', 'y']])
index = MultiIndex.from_tuples(tup)
df = DataFrame(np.random.randn(4, 4), index=index)
rs = df.iloc[[2, 3]]
xp = df.xs('b', drop_level=False)
tm.assert_frame_equal(rs, xp)
def test_setitem_multiindex(self):
with catch_warnings(record=True):
for index_fn in ('ix', 'loc'):
def assert_equal(a, b):
assert a == b
def check(target, indexers, value, compare_fn, expected=None):
fn = getattr(target, index_fn)
fn.__setitem__(indexers, value)
result = fn.__getitem__(indexers)
if expected is None:
expected = value
compare_fn(result, expected)
# GH7190
index = pd.MultiIndex.from_product([np.arange(0, 100),
np.arange(0, 80)],
names=['time', 'firm'])
t, n = 0, 2
df = DataFrame(np.nan, columns=['A', 'w', 'l', 'a', 'x',
'X', 'd', 'profit'],
index=index)
check(target=df, indexers=((t, n), 'X'), value=0,
compare_fn=assert_equal)
df = DataFrame(-999, columns=['A', 'w', 'l', 'a', 'x',
'X', 'd', 'profit'],
index=index)
check(target=df, indexers=((t, n), 'X'), value=1,
compare_fn=assert_equal)
df = DataFrame(columns=['A', 'w', 'l', 'a', 'x',
'X', 'd', 'profit'],
index=index)
check(target=df, indexers=((t, n), 'X'), value=2,
compare_fn=assert_equal)
# gh-7218: assigning with 0-dim arrays
df = DataFrame(-999, columns=['A', 'w', 'l', 'a', 'x',
'X', 'd', 'profit'],
index=index)
check(target=df,
indexers=((t, n), 'X'),
value=np.array(3),
compare_fn=assert_equal,
expected=3, )
# GH5206
df = pd.DataFrame(np.arange(25).reshape(5, 5),
columns='A,B,C,D,E'.split(','), dtype=float)
df['F'] = 99
row_selection = df['A'] % 2 == 0
col_selection = ['B', 'C']
with catch_warnings(record=True):
df.ix[row_selection, col_selection] = df['F']
output = pd.DataFrame(99., index=[0, 2, 4], columns=['B', 'C'])
with catch_warnings(record=True):
tm.assert_frame_equal(df.ix[row_selection, col_selection],
output)
check(target=df,
indexers=(row_selection, col_selection),
value=df['F'],
compare_fn=tm.assert_frame_equal,
expected=output, )
# GH11372
idx = pd.MultiIndex.from_product([
['A', 'B', 'C'],
pd.date_range('2015-01-01', '2015-04-01', freq='MS')])
cols = pd.MultiIndex.from_product([
['foo', 'bar'],
pd.date_range('2016-01-01', '2016-02-01', freq='MS')])
df = pd.DataFrame(np.random.random((12, 4)),
index=idx, columns=cols)
subidx = pd.MultiIndex.from_tuples(
[('A', pd.Timestamp('2015-01-01')),
('A', pd.Timestamp('2015-02-01'))])
subcols = pd.MultiIndex.from_tuples(
[('foo', pd.Timestamp('2016-01-01')),
('foo', pd.Timestamp('2016-02-01'))])
vals = pd.DataFrame(np.random.random((2, 2)),
index=subidx, columns=subcols)
check(target=df,
indexers=(subidx, subcols),
value=vals,
compare_fn=tm.assert_frame_equal, )
# set all columns
vals = pd.DataFrame(
np.random.random((2, 4)), index=subidx, columns=cols)
check(target=df,
indexers=(subidx, slice(None, None, None)),
value=vals,
compare_fn=tm.assert_frame_equal, )
# identity
copy = df.copy()
check(target=df, indexers=(df.index, df.columns), value=df,
compare_fn=tm.assert_frame_equal, expected=copy)
def test_loc_getitem_series(self):
# GH14730
# passing a series as a key with a MultiIndex
index = MultiIndex.from_product([[1, 2, 3], ['A', 'B', 'C']])
x = Series(index=index, data=range(9), dtype=np.float64)
y = Series([1, 3])
expected = Series(
data=[0, 1, 2, 6, 7, 8],
index=MultiIndex.from_product([[1, 3], ['A', 'B', 'C']]),
dtype=np.float64)
result = x.loc[y]
tm.assert_series_equal(result, expected)
result = x.loc[[1, 3]]
tm.assert_series_equal(result, expected)
# GH15424
y1 = Series([1, 3], index=[1, 2])
result = x.loc[y1]
tm.assert_series_equal(result, expected)
empty = Series(data=[], dtype=np.float64)
expected = Series([], index=MultiIndex(
levels=index.levels, labels=[[], []], dtype=np.float64))
result = x.loc[empty]
tm.assert_series_equal(result, expected)
def test_loc_getitem_array(self):
# GH15434
# passing an array as a key with a MultiIndex
index = MultiIndex.from_product([[1, 2, 3], ['A', 'B', 'C']])
x = Series(index=index, data=range(9), dtype=np.float64)
y = np.array([1, 3])
expected = Series(
data=[0, 1, 2, 6, 7, 8],
index=MultiIndex.from_product([[1, 3], ['A', 'B', 'C']]),
dtype=np.float64)
result = x.loc[y]
tm.assert_series_equal(result, expected)
# empty array:
empty = np.array([])
expected = Series([], index=MultiIndex(
levels=index.levels, labels=[[], []], dtype=np.float64))
result = x.loc[empty]
tm.assert_series_equal(result, expected)
# 0-dim array (scalar):
scalar = np.int64(1)
expected = Series(
data=[0, 1, 2],
index=['A', 'B', 'C'],
dtype=np.float64)
result = x.loc[scalar]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_multiindex(self):
mi_labels = DataFrame(np.random.randn(4, 3),
columns=[['i', 'i', 'j'], ['A', 'A', 'B']],
index=[['i', 'i', 'j', 'k'],
['X', 'X', 'Y', 'Y']])
mi_int = DataFrame(np.random.randn(3, 3),
columns=[[2, 2, 4], [6, 8, 10]],
index=[[4, 4, 8], [8, 10, 12]])
# the first row
rs = mi_int.iloc[0]
with catch_warnings(record=True):
xp = mi_int.ix[4].ix[8]
tm.assert_series_equal(rs, xp, check_names=False)
assert rs.name == (4, 8)
assert xp.name == 8
# 2nd (last) columns
rs = mi_int.iloc[:, 2]
with catch_warnings(record=True):
xp = mi_int.ix[:, 2]
tm.assert_series_equal(rs, xp)
# corner column
rs = mi_int.iloc[2, 2]
with catch_warnings(record=True):
xp = mi_int.ix[:, 2].ix[2]
assert rs == xp
# this is basically regular indexing
rs = mi_labels.iloc[2, 2]
with catch_warnings(record=True):
xp = mi_labels.ix['j'].ix[:, 'j'].ix[0, 0]
assert rs == xp
def test_loc_multiindex(self):
mi_labels = DataFrame(np.random.randn(3, 3),
columns=[['i', 'i', 'j'], ['A', 'A', 'B']],
index=[['i', 'i', 'j'], ['X', 'X', 'Y']])
mi_int = DataFrame(np.random.randn(3, 3),
columns=[[2, 2, 4], [6, 8, 10]],
index=[[4, 4, 8], [8, 10, 12]])
# the first row
rs = mi_labels.loc['i']
with catch_warnings(record=True):
xp = mi_labels.ix['i']
tm.assert_frame_equal(rs, xp)
# 2nd (last) columns
rs = mi_labels.loc[:, 'j']
with catch_warnings(record=True):
xp = mi_labels.ix[:, 'j']
tm.assert_frame_equal(rs, xp)
# corner column
rs = mi_labels.loc['j'].loc[:, 'j']
with catch_warnings(record=True):
xp = mi_labels.ix['j'].ix[:, 'j']
tm.assert_frame_equal(rs, xp)
# with a tuple
rs = mi_labels.loc[('i', 'X')]
with catch_warnings(record=True):
xp = mi_labels.ix[('i', 'X')]
tm.assert_frame_equal(rs, xp)
rs = mi_int.loc[4]
with catch_warnings(record=True):
xp = mi_int.ix[4]
tm.assert_frame_equal(rs, xp)
def test_getitem_partial_int(self):
# GH 12416
# with single item
l1 = [10, 20]
l2 = ['a', 'b']
df = DataFrame(index=range(2),
columns=pd.MultiIndex.from_product([l1, l2]))
expected = DataFrame(index=range(2),
columns=l2)
result = df[20]
tm.assert_frame_equal(result, expected)
# with list
expected = DataFrame(index=range(2),
columns=pd.MultiIndex.from_product([l1[1:], l2]))
result = df[[20]]
tm.assert_frame_equal(result, expected)
# missing item:
with tm.assert_raises_regex(KeyError, '1'):
df[1]
with tm.assert_raises_regex(KeyError, "'\[1\] not in index'"):
df[[1]]
def test_loc_multiindex_indexer_none(self):
# GH6788
# multi-index indexer is None (meaning take all)
attributes = ['Attribute' + str(i) for i in range(1)]
attribute_values = ['Value' + str(i) for i in range(5)]
index = MultiIndex.from_product([attributes, attribute_values])
df = 0.1 * np.random.randn(10, 1 * 5) + 0.5
df = DataFrame(df, columns=index)
result = df[attributes]
tm.assert_frame_equal(result, df)
# GH 7349
# loc with a multi-index seems to be doing fallback
df = DataFrame(np.arange(12).reshape(-1, 1),
index=pd.MultiIndex.from_product([[1, 2, 3, 4],
[1, 2, 3]]))
expected = df.loc[([1, 2], ), :]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
def test_loc_multiindex_incomplete(self):
# GH 7399
# incomplete indexers
s = pd.Series(np.arange(15, dtype='int64'),
MultiIndex.from_product([range(5), ['a', 'b', 'c']]))
expected = s.loc[:, 'a':'c']
result = s.loc[0:4, 'a':'c']
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result, expected)
result = s.loc[:4, 'a':'c']
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result, expected)
result = s.loc[0:, 'a':'c']
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result, expected)
# GH 7400
# multiindexer gettitem with list of indexers skips wrong element
s = pd.Series(np.arange(15, dtype='int64'),
MultiIndex.from_product([range(5), ['a', 'b', 'c']]))
expected = s.iloc[[6, 7, 8, 12, 13, 14]]
result = s.loc[2:4:2, 'a':'c']
tm.assert_series_equal(result, expected)
def test_multiindex_perf_warn(self):
df = DataFrame({'jim': [0, 0, 1, 1],
'joe': ['x', 'x', 'z', 'y'],
'jolie': np.random.rand(4)}).set_index(['jim', 'joe'])
with tm.assert_produces_warning(PerformanceWarning,
clear=[pd.core.index]):
df.loc[(1, 'z')]
df = df.iloc[[2, 1, 3, 0]]
with tm.assert_produces_warning(PerformanceWarning):
df.loc[(0, )]
def test_series_getitem_multiindex(self):
# GH 6018
# series regression getitem with a multi-index
s = Series([1, 2, 3])
s.index = MultiIndex.from_tuples([(0, 0), (1, 1), (2, 1)])
result = s[:, 0]
expected = Series([1], index=[0])
tm.assert_series_equal(result, expected)
result = s.loc[:, 1]
expected = Series([2, 3], index=[1, 2])
tm.assert_series_equal(result, expected)
# xs
result = s.xs(0, level=0)
expected = Series([1], index=[0])
tm.assert_series_equal(result, expected)
result = s.xs(1, level=1)
expected = Series([2, 3], index=[1, 2])
tm.assert_series_equal(result, expected)
# GH6258
dt = list(date_range('20130903', periods=3))
idx = MultiIndex.from_product([list('AB'), dt])
s = Series([1, 3, 4, 1, 3, 4], index=idx)
result = s.xs('20130903', level=1)
expected = Series([1, 1], index=list('AB'))
tm.assert_series_equal(result, expected)
# GH5684
idx = MultiIndex.from_tuples([('a', 'one'), ('a', 'two'), ('b', 'one'),
('b', 'two')])
s = Series([1, 2, 3, 4], index=idx)
s.index.set_names(['L1', 'L2'], inplace=True)
result = s.xs('one', level='L2')
expected = Series([1, 3], index=['a', 'b'])
expected.index.set_names(['L1'], inplace=True)
tm.assert_series_equal(result, expected)
def test_xs_multiindex(self):
# GH2903
columns = MultiIndex.from_tuples(
[('a', 'foo'), ('a', 'bar'), ('b', 'hello'),
('b', 'world')], names=['lvl0', 'lvl1'])
df = DataFrame(np.random.randn(4, 4), columns=columns)
df.sort_index(axis=1, inplace=True)
result = df.xs('a', level='lvl0', axis=1)
expected = df.iloc[:, 0:2].loc[:, 'a']
tm.assert_frame_equal(result, expected)
result = df.xs('foo', level='lvl1', axis=1)
expected = df.iloc[:, 1:2].copy()
expected.columns = expected.columns.droplevel('lvl1')
tm.assert_frame_equal(result, expected)
def test_multiindex_setitem(self):
# GH 3738
# setting with a multi-index right hand side
arrays = [np.array(['bar', 'bar', 'baz', 'qux', 'qux', 'bar']),
np.array(['one', 'two', 'one', 'one', 'two', 'one']),
np.arange(0, 6, 1)]
df_orig = pd.DataFrame(np.random.randn(6, 3),
index=arrays,
columns=['A', 'B', 'C']).sort_index()
expected = df_orig.loc[['bar']] * 2
df = df_orig.copy()
df.loc[['bar']] *= 2
tm.assert_frame_equal(df.loc[['bar']], expected)
# raise because these have differing levels
def f():
df.loc['bar'] *= 2
pytest.raises(TypeError, f)
# from SO
# http://stackoverflow.com/questions/24572040/pandas-access-the-level-of-multiindex-for-inplace-operation
df_orig = DataFrame.from_dict({'price': {
('DE', 'Coal', 'Stock'): 2,
('DE', 'Gas', 'Stock'): 4,
('DE', 'Elec', 'Demand'): 1,
('FR', 'Gas', 'Stock'): 5,
('FR', 'Solar', 'SupIm'): 0,
('FR', 'Wind', 'SupIm'): 0
}})
df_orig.index = MultiIndex.from_tuples(df_orig.index,
names=['Sit', 'Com', 'Type'])
expected = df_orig.copy()
expected.iloc[[0, 2, 3]] *= 2
idx = pd.IndexSlice
df = df_orig.copy()
df.loc[idx[:, :, 'Stock'], :] *= 2
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[idx[:, :, 'Stock'], 'price'] *= 2
tm.assert_frame_equal(df, expected)
def test_getitem_duplicates_multiindex(self):
# GH 5725 the 'A' happens to be a valid Timestamp so the doesn't raise
# the appropriate error, only in PY3 of course!
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
arr = np.random.randn(len(index), 1)
df = DataFrame(arr, index=index, columns=['val'])
result = df.val['D']
expected = Series(arr.ravel()[0:3], name='val', index=Index(
[26, 37, 57], name='day'))
tm.assert_series_equal(result, expected)
def f():
df.val['A']
pytest.raises(KeyError, f)
def f():
df.val['X']
pytest.raises(KeyError, f)
# A is treated as a special Timestamp
index = MultiIndex(levels=[['A', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
df = DataFrame(arr, index=index, columns=['val'])
result = df.val['A']
expected = Series(arr.ravel()[0:3], name='val', index=Index(
[26, 37, 57], name='day'))
tm.assert_series_equal(result, expected)
def f():
df.val['X']
pytest.raises(KeyError, f)
# GH 7866
# multi-index slicing with missing indexers
idx = pd.MultiIndex.from_product([['A', 'B', 'C'],
['foo', 'bar', 'baz']],
names=['one', 'two'])
s = pd.Series(np.arange(9, dtype='int64'), index=idx).sort_index()
exp_idx = pd.MultiIndex.from_product([['A'], ['foo', 'bar', 'baz']],
names=['one', 'two'])
expected = pd.Series(np.arange(3, dtype='int64'),
index=exp_idx).sort_index()
result = s.loc[['A']]
tm.assert_series_equal(result, expected)
result = s.loc[['A', 'D']]
tm.assert_series_equal(result, expected)
# not any values found
pytest.raises(KeyError, lambda: s.loc[['D']])
# empty ok
result = s.loc[[]]
expected = s.iloc[[]]
tm.assert_series_equal(result, expected)
idx = pd.IndexSlice
expected = pd.Series([0, 3, 6], index=pd.MultiIndex.from_product(
[['A', 'B', 'C'], ['foo']], names=['one', 'two'])).sort_index()
result = s.loc[idx[:, ['foo']]]
tm.assert_series_equal(result, expected)
result = s.loc[idx[:, ['foo', 'bah']]]
tm.assert_series_equal(result, expected)
# GH 8737
# empty indexer
multi_index = pd.MultiIndex.from_product((['foo', 'bar', 'baz'],
['alpha', 'beta']))
df = DataFrame(
np.random.randn(5, 6), index=range(5), columns=multi_index)
df = df.sort_index(level=0, axis=1)
expected = DataFrame(index=range(5),
columns=multi_index.reindex([])[0])
result1 = df.loc[:, ([], slice(None))]
result2 = df.loc[:, (['foo'], [])]
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
# regression from < 0.14.0
# GH 7914
df = DataFrame([[np.mean, np.median], ['mean', 'median']],
columns=MultiIndex.from_tuples([('functs', 'mean'),
('functs', 'median')]),
index=['function', 'name'])
result = df.loc['function', ('functs', 'mean')]
assert result == np.mean
def test_multiindex_assignment(self):
# GH3777 part 2
# mixed dtype
df = DataFrame(np.random.randint(5, 10, size=9).reshape(3, 3),
columns=list('abc'),
index=[[4, 4, 8], [8, 10, 12]])
df['d'] = np.nan
arr = np.array([0., 1.])
with catch_warnings(record=True):
df.ix[4, 'd'] = arr
tm.assert_series_equal(df.ix[4, 'd'],
Series(arr, index=[8, 10], name='d'))
# single dtype
df = DataFrame(np.random.randint(5, 10, size=9).reshape(3, 3),
columns=list('abc'),
index=[[4, 4, 8], [8, 10, 12]])
with catch_warnings(record=True):
df.ix[4, 'c'] = arr
exp = Series(arr, index=[8, 10], name='c', dtype='float64')
tm.assert_series_equal(df.ix[4, 'c'], exp)
# scalar ok
with catch_warnings(record=True):
df.ix[4, 'c'] = 10
exp = Series(10, index=[8, 10], name='c', dtype='float64')
tm.assert_series_equal(df.ix[4, 'c'], exp)
# invalid assignments
def f():
with catch_warnings(record=True):
df.ix[4, 'c'] = [0, 1, 2, 3]
pytest.raises(ValueError, f)
def f():
with catch_warnings(record=True):
df.ix[4, 'c'] = [0]
pytest.raises(ValueError, f)
# groupby example
NUM_ROWS = 100
NUM_COLS = 10
col_names = ['A' + num for num in
map(str, np.arange(NUM_COLS).tolist())]
index_cols = col_names[:5]
df = DataFrame(np.random.randint(5, size=(NUM_ROWS, NUM_COLS)),
dtype=np.int64, columns=col_names)
df = df.set_index(index_cols).sort_index()
grp = df.groupby(level=index_cols[:4])
df['new_col'] = np.nan
f_index = np.arange(5)
def f(name, df2):
return Series(np.arange(df2.shape[0]),
name=df2.index.values[0]).reindex(f_index)
# TODO(wesm): unused?
# new_df = pd.concat([f(name, df2) for name, df2 in grp], axis=1).T
# we are actually operating on a copy here
# but in this case, that's ok
for name, df2 in grp:
new_vals = np.arange(df2.shape[0])
with catch_warnings(record=True):
df.ix[name, 'new_col'] = new_vals
def test_multiindex_label_slicing_with_negative_step(self):
s = Series(np.arange(20),
MultiIndex.from_product([list('abcde'), np.arange(4)]))
SLC = pd.IndexSlice
def assert_slices_equivalent(l_slc, i_slc):
tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc])
tm.assert_series_equal(s[l_slc], s.iloc[i_slc])
with catch_warnings(record=True):
tm.assert_series_equal(s.ix[l_slc], s.iloc[i_slc])
assert_slices_equivalent(SLC[::-1], SLC[::-1])
assert_slices_equivalent(SLC['d'::-1], SLC[15::-1])
assert_slices_equivalent(SLC[('d', )::-1], SLC[15::-1])
assert_slices_equivalent(SLC[:'d':-1], SLC[:11:-1])
assert_slices_equivalent(SLC[:('d', ):-1], SLC[:11:-1])
assert_slices_equivalent(SLC['d':'b':-1], SLC[15:3:-1])
assert_slices_equivalent(SLC[('d', ):'b':-1], SLC[15:3:-1])
assert_slices_equivalent(SLC['d':('b', ):-1], SLC[15:3:-1])
assert_slices_equivalent(SLC[('d', ):('b', ):-1], SLC[15:3:-1])
assert_slices_equivalent(SLC['b':'d':-1], SLC[:0])
assert_slices_equivalent(SLC[('c', 2)::-1], SLC[10::-1])
assert_slices_equivalent(SLC[:('c', 2):-1], SLC[:9:-1])
assert_slices_equivalent(SLC[('e', 0):('c', 2):-1], SLC[16:9:-1])
def test_multiindex_slice_first_level(self):
# GH 12697
freq = ['a', 'b', 'c', 'd']
idx = pd.MultiIndex.from_product([freq, np.arange(500)])
df = pd.DataFrame(list(range(2000)), index=idx, columns=['Test'])
df_slice = df.loc[pd.IndexSlice[:, 30:70], :]
result = df_slice.loc['a']
expected = pd.DataFrame(list(range(30, 71)),
columns=['Test'],
index=range(30, 71))
tm.assert_frame_equal(result, expected)
result = df_slice.loc['d']
expected = pd.DataFrame(list(range(1530, 1571)),
columns=['Test'],
index=range(30, 71))
tm.assert_frame_equal(result, expected)
def test_multiindex_symmetric_difference(self):
# GH 13490
idx = MultiIndex.from_product([['a', 'b'], ['A', 'B']],
names=['a', 'b'])
result = idx ^ idx
assert result.names == idx.names
idx2 = idx.copy().rename(['A', 'B'])
result = idx ^ idx2
assert result.names == [None, None]
class TestMultiIndexSlicers(object):
def test_per_axis_per_level_getitem(self):
# GH6134
# example test case
ix = MultiIndex.from_product([_mklbl('A', 5), _mklbl('B', 7), _mklbl(
'C', 4), _mklbl('D', 2)])
df = DataFrame(np.arange(len(ix.get_values())), index=ix)
result = df.loc[(slice('A1', 'A3'), slice(None), ['C1', 'C3']), :]
expected = df.loc[[tuple([a, b, c, d])
for a, b, c, d in df.index.values
if (a == 'A1' or a == 'A2' or a == 'A3') and (
c == 'C1' or c == 'C3')]]
tm.assert_frame_equal(result, expected)
expected = df.loc[[tuple([a, b, c, d])
for a, b, c, d in df.index.values
if (a == 'A1' or a == 'A2' or a == 'A3') and (
c == 'C1' or c == 'C2' or c == 'C3')]]
result = df.loc[(slice('A1', 'A3'), slice(None), slice('C1', 'C3')), :]
tm.assert_frame_equal(result, expected)
# test multi-index slicing with per axis and per index controls
index = MultiIndex.from_tuples([('A', 1), ('A', 2),
('A', 3), ('B', 1)],
names=['one', 'two'])
columns = MultiIndex.from_tuples([('a', 'foo'), ('a', 'bar'),
('b', 'foo'), ('b', 'bah')],
names=['lvl0', 'lvl1'])
df = DataFrame(
np.arange(16, dtype='int64').reshape(
4, 4), index=index, columns=columns)
df = df.sort_index(axis=0).sort_index(axis=1)
# identity
result = df.loc[(slice(None), slice(None)), :]
tm.assert_frame_equal(result, df)
result = df.loc[(slice(None), slice(None)), (slice(None), slice(None))]
tm.assert_frame_equal(result, df)
result = df.loc[:, (slice(None), slice(None))]
tm.assert_frame_equal(result, df)
# index
result = df.loc[(slice(None), [1]), :]
expected = df.iloc[[0, 3]]
tm.assert_frame_equal(result, expected)
result = df.loc[(slice(None), 1), :]
expected = df.iloc[[0, 3]]
tm.assert_frame_equal(result, expected)
# columns
result = df.loc[:, (slice(None), ['foo'])]
expected = df.iloc[:, [1, 3]]
tm.assert_frame_equal(result, expected)
# both
result = df.loc[(slice(None), 1), (slice(None), ['foo'])]
expected = df.iloc[[0, 3], [1, 3]]
tm.assert_frame_equal(result, expected)
result = df.loc['A', 'a']
expected = DataFrame(dict(bar=[1, 5, 9], foo=[0, 4, 8]),
index=Index([1, 2, 3], name='two'),
columns=Index(['bar', 'foo'], name='lvl1'))
tm.assert_frame_equal(result, expected)
result = df.loc[(slice(None), [1, 2]), :]
expected = df.iloc[[0, 1, 3]]
tm.assert_frame_equal(result, expected)
# multi-level series
s = Series(np.arange(len(ix.get_values())), index=ix)
result = s.loc['A1':'A3', :, ['C1', 'C3']]
expected = s.loc[[tuple([a, b, c, d])
for a, b, c, d in s.index.values
if (a == 'A1' or a == 'A2' or a == 'A3') and (
c == 'C1' or c == 'C3')]]
tm.assert_series_equal(result, expected)
# boolean indexers
result = df.loc[(slice(None), df.loc[:, ('a', 'bar')] > 5), :]
expected = df.iloc[[2, 3]]
tm.assert_frame_equal(result, expected)
def f():
df.loc[(slice(None), np.array([True, False])), :]
pytest.raises(ValueError, f)
# ambiguous cases
# these can be multiply interpreted (e.g. in this case
# as df.loc[slice(None),[1]] as well
pytest.raises(KeyError, lambda: df.loc[slice(None), [1]])
result = df.loc[(slice(None), [1]), :]
expected = df.iloc[[0, 3]]
tm.assert_frame_equal(result, expected)
# not lexsorted
assert df.index.lexsort_depth == 2
df = df.sort_index(level=1, axis=0)
assert df.index.lexsort_depth == 0
with tm.assert_raises_regex(
UnsortedIndexError,
'MultiIndex slicing requires the index to be '
r'lexsorted: slicing on levels \[1\], lexsort depth 0'):
df.loc[(slice(None), slice('bar')), :]
# GH 16734: not sorted, but no real slicing
result = df.loc[(slice(None), df.loc[:, ('a', 'bar')] > 5), :]
tm.assert_frame_equal(result, df.iloc[[1, 3], :])
def test_multiindex_slicers_non_unique(self):
# GH 7106
# non-unique mi index support
df = (DataFrame(dict(A=['foo', 'foo', 'foo', 'foo'],
B=['a', 'a', 'a', 'a'],
C=[1, 2, 1, 3],
D=[1, 2, 3, 4]))
.set_index(['A', 'B', 'C']).sort_index())
assert not df.index.is_unique
expected = (DataFrame(dict(A=['foo', 'foo'], B=['a', 'a'],
C=[1, 1], D=[1, 3]))
.set_index(['A', 'B', 'C']).sort_index())
result = df.loc[(slice(None), slice(None), 1), :]
tm.assert_frame_equal(result, expected)
# this is equivalent of an xs expression
result = df.xs(1, level=2, drop_level=False)
tm.assert_frame_equal(result, expected)
df = (DataFrame(dict(A=['foo', 'foo', 'foo', 'foo'],
B=['a', 'a', 'a', 'a'],
C=[1, 2, 1, 2],
D=[1, 2, 3, 4]))
.set_index(['A', 'B', 'C']).sort_index())
assert not df.index.is_unique
expected = (DataFrame(dict(A=['foo', 'foo'], B=['a', 'a'],
C=[1, 1], D=[1, 3]))
.set_index(['A', 'B', 'C']).sort_index())
result = df.loc[(slice(None), slice(None), 1), :]
assert not result.index.is_unique
tm.assert_frame_equal(result, expected)
# GH12896
# numpy-implementation dependent bug
ints = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 12, 13, 14, 14, 16,
17, 18, 19, 200000, 200000]
n = len(ints)
idx = MultiIndex.from_arrays([['a'] * n, ints])
result = Series([1] * n, index=idx)
result = result.sort_index()
result = result.loc[(slice(None), slice(100000))]
expected = Series([1] * (n - 2), index=idx[:-2]).sort_index()
tm.assert_series_equal(result, expected)
def test_multiindex_slicers_datetimelike(self):
# GH 7429
# buggy/inconsistent behavior when slicing with datetime-like
import datetime
dates = [datetime.datetime(2012, 1, 1, 12, 12, 12) +
datetime.timedelta(days=i) for i in range(6)]
freq = [1, 2]
index = MultiIndex.from_product(
[dates, freq], names=['date', 'frequency'])
df = DataFrame(
np.arange(6 * 2 * 4, dtype='int64').reshape(
-1, 4), index=index, columns=list('ABCD'))
# multi-axis slicing
idx = pd.IndexSlice
expected = df.iloc[[0, 2, 4], [0, 1]]
result = df.loc[(slice(Timestamp('2012-01-01 12:12:12'),
Timestamp('2012-01-03 12:12:12')),
slice(1, 1)), slice('A', 'B')]
tm.assert_frame_equal(result, expected)
result = df.loc[(idx[Timestamp('2012-01-01 12:12:12'):Timestamp(
'2012-01-03 12:12:12')], idx[1:1]), slice('A', 'B')]
tm.assert_frame_equal(result, expected)
result = df.loc[(slice(Timestamp('2012-01-01 12:12:12'),
Timestamp('2012-01-03 12:12:12')), 1),
slice('A', 'B')]
tm.assert_frame_equal(result, expected)
# with strings
result = df.loc[(slice('2012-01-01 12:12:12', '2012-01-03 12:12:12'),
slice(1, 1)), slice('A', 'B')]
tm.assert_frame_equal(result, expected)
result = df.loc[(idx['2012-01-01 12:12:12':'2012-01-03 12:12:12'], 1),
idx['A', 'B']]
tm.assert_frame_equal(result, expected)
def test_multiindex_slicers_edges(self):
# GH 8132
# various edge cases
df = DataFrame(
{'A': ['A0'] * 5 + ['A1'] * 5 + ['A2'] * 5,
'B': ['B0', 'B0', 'B1', 'B1', 'B2'] * 3,
'DATE': ["2013-06-11", "2013-07-02", "2013-07-09", "2013-07-30",
"2013-08-06", "2013-06-11", "2013-07-02", "2013-07-09",
"2013-07-30", "2013-08-06", "2013-09-03", "2013-10-01",
"2013-07-09", "2013-08-06", "2013-09-03"],
'VALUES': [22, 35, 14, 9, 4, 40, 18, 4, 2, 5, 1, 2, 3, 4, 2]})
df['DATE'] = pd.to_datetime(df['DATE'])
df1 = df.set_index(['A', 'B', 'DATE'])
df1 = df1.sort_index()
# A1 - Get all values under "A0" and "A1"
result = df1.loc[(slice('A1')), :]
expected = df1.iloc[0:10]
tm.assert_frame_equal(result, expected)
# A2 - Get all values from the start to "A2"
result = df1.loc[(slice('A2')), :]
expected = df1
tm.assert_frame_equal(result, expected)
# A3 - Get all values under "B1" or "B2"
result = df1.loc[(slice(None), slice('B1', 'B2')), :]
expected = df1.iloc[[2, 3, 4, 7, 8, 9, 12, 13, 14]]
tm.assert_frame_equal(result, expected)
# A4 - Get all values between 2013-07-02 and 2013-07-09
result = df1.loc[(slice(None), slice(None),
slice('20130702', '20130709')), :]
expected = df1.iloc[[1, 2, 6, 7, 12]]
tm.assert_frame_equal(result, expected)
# B1 - Get all values in B0 that are also under A0, A1 and A2
result = df1.loc[(slice('A2'), slice('B0')), :]
expected = df1.iloc[[0, 1, 5, 6, 10, 11]]
tm.assert_frame_equal(result, expected)
# B2 - Get all values in B0, B1 and B2 (similar to what #2 is doing for
# the As)
result = df1.loc[(slice(None), slice('B2')), :]
expected = df1
tm.assert_frame_equal(result, expected)
# B3 - Get all values from B1 to B2 and up to 2013-08-06
result = df1.loc[(slice(None), slice('B1', 'B2'),
slice('2013-08-06')), :]
expected = df1.iloc[[2, 3, 4, 7, 8, 9, 12, 13]]
tm.assert_frame_equal(result, expected)
# B4 - Same as A4 but the start of the date slice is not a key.
# shows indexing on a partial selection slice
result = df1.loc[(slice(None), slice(None),
slice('20130701', '20130709')), :]
expected = df1.iloc[[1, 2, 6, 7, 12]]
tm.assert_frame_equal(result, expected)
def test_per_axis_per_level_doc_examples(self):
# test index maker
idx = pd.IndexSlice
# from indexing.rst / advanced
index = MultiIndex.from_product([_mklbl('A', 4), _mklbl('B', 2),
_mklbl('C', 4), _mklbl('D', 2)])
columns = MultiIndex.from_tuples([('a', 'foo'), ('a', 'bar'),
('b', 'foo'), ('b', 'bah')],
names=['lvl0', 'lvl1'])
df = DataFrame(np.arange(len(index) * len(columns), dtype='int64')
.reshape((len(index), len(columns))),
index=index, columns=columns)
result = df.loc[(slice('A1', 'A3'), slice(None), ['C1', 'C3']), :]
expected = df.loc[[tuple([a, b, c, d])
for a, b, c, d in df.index.values
if (a == 'A1' or a == 'A2' or a == 'A3') and (
c == 'C1' or c == 'C3')]]
tm.assert_frame_equal(result, expected)
result = df.loc[idx['A1':'A3', :, ['C1', 'C3']], :]
tm.assert_frame_equal(result, expected)
result = df.loc[(slice(None), slice(None), ['C1', 'C3']), :]
expected = df.loc[[tuple([a, b, c, d])
for a, b, c, d in df.index.values
if (c == 'C1' or c == 'C3')]]
tm.assert_frame_equal(result, expected)
result = df.loc[idx[:, :, ['C1', 'C3']], :]
tm.assert_frame_equal(result, expected)
# not sorted
def f():
df.loc['A1', ('a', slice('foo'))]
pytest.raises(UnsortedIndexError, f)
# GH 16734: not sorted, but no real slicing
tm.assert_frame_equal(df.loc['A1', (slice(None), 'foo')],
df.loc['A1'].iloc[:, [0, 2]])
df = df.sort_index(axis=1)
# slicing
df.loc['A1', (slice(None), 'foo')]
df.loc[(slice(None), slice(None), ['C1', 'C3']), (slice(None), 'foo')]
# setitem
df.loc(axis=0)[:, :, ['C1', 'C3']] = -10
def test_loc_axis_arguments(self):
index = MultiIndex.from_product([_mklbl('A', 4), _mklbl('B', 2),
_mklbl('C', 4), _mklbl('D', 2)])
columns = MultiIndex.from_tuples([('a', 'foo'), ('a', 'bar'),
('b', 'foo'), ('b', 'bah')],
names=['lvl0', 'lvl1'])
df = DataFrame(np.arange(len(index) * len(columns), dtype='int64')
.reshape((len(index), len(columns))),
index=index,
columns=columns).sort_index().sort_index(axis=1)
# axis 0
result = df.loc(axis=0)['A1':'A3', :, ['C1', 'C3']]
expected = df.loc[[tuple([a, b, c, d])
for a, b, c, d in df.index.values
if (a == 'A1' or a == 'A2' or a == 'A3') and (
c == 'C1' or c == 'C3')]]
tm.assert_frame_equal(result, expected)
result = df.loc(axis='index')[:, :, ['C1', 'C3']]
expected = df.loc[[tuple([a, b, c, d])
for a, b, c, d in df.index.values
if (c == 'C1' or c == 'C3')]]
tm.assert_frame_equal(result, expected)
# axis 1
result = df.loc(axis=1)[:, 'foo']
expected = df.loc[:, (slice(None), 'foo')]
tm.assert_frame_equal(result, expected)
result = df.loc(axis='columns')[:, 'foo']
expected = df.loc[:, (slice(None), 'foo')]
tm.assert_frame_equal(result, expected)
# invalid axis
def f():
df.loc(axis=-1)[:, :, ['C1', 'C3']]
pytest.raises(ValueError, f)
def f():
df.loc(axis=2)[:, :, ['C1', 'C3']]
pytest.raises(ValueError, f)
def f():
df.loc(axis='foo')[:, :, ['C1', 'C3']]
pytest.raises(ValueError, f)
def test_per_axis_per_level_setitem(self):
# test index maker
idx = pd.IndexSlice
# test multi-index slicing with per axis and per index controls
index = MultiIndex.from_tuples([('A', 1), ('A', 2),
('A', 3), ('B', 1)],
names=['one', 'two'])
columns = MultiIndex.from_tuples([('a', 'foo'), ('a', 'bar'),
('b', 'foo'), ('b', 'bah')],
names=['lvl0', 'lvl1'])
df_orig = DataFrame(
np.arange(16, dtype='int64').reshape(
4, 4), index=index, columns=columns)
df_orig = df_orig.sort_index(axis=0).sort_index(axis=1)
# identity
df = df_orig.copy()
df.loc[(slice(None), slice(None)), :] = 100
expected = df_orig.copy()
expected.iloc[:, :] = 100
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc(axis=0)[:, :] = 100
expected = df_orig.copy()
expected.iloc[:, :] = 100
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[(slice(None), slice(None)), (slice(None), slice(None))] = 100
expected = df_orig.copy()
expected.iloc[:, :] = 100
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[:, (slice(None), slice(None))] = 100
expected = df_orig.copy()
expected.iloc[:, :] = 100
tm.assert_frame_equal(df, expected)
# index
df = df_orig.copy()
df.loc[(slice(None), [1]), :] = 100
expected = df_orig.copy()
expected.iloc[[0, 3]] = 100
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[(slice(None), 1), :] = 100
expected = df_orig.copy()
expected.iloc[[0, 3]] = 100
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc(axis=0)[:, 1] = 100
expected = df_orig.copy()
expected.iloc[[0, 3]] = 100
tm.assert_frame_equal(df, expected)
# columns
df = df_orig.copy()
df.loc[:, (slice(None), ['foo'])] = 100
expected = df_orig.copy()
expected.iloc[:, [1, 3]] = 100
tm.assert_frame_equal(df, expected)
# both
df = df_orig.copy()
df.loc[(slice(None), 1), (slice(None), ['foo'])] = 100
expected = df_orig.copy()
expected.iloc[[0, 3], [1, 3]] = 100
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[idx[:, 1], idx[:, ['foo']]] = 100
expected = df_orig.copy()
expected.iloc[[0, 3], [1, 3]] = 100
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc['A', 'a'] = 100
expected = df_orig.copy()
expected.iloc[0:3, 0:2] = 100
tm.assert_frame_equal(df, expected)
# setting with a list-like
df = df_orig.copy()
df.loc[(slice(None), 1), (slice(None), ['foo'])] = np.array(
[[100, 100], [100, 100]], dtype='int64')
expected = df_orig.copy()
expected.iloc[[0, 3], [1, 3]] = 100
tm.assert_frame_equal(df, expected)
# not enough values
df = df_orig.copy()
def f():
df.loc[(slice(None), 1), (slice(None), ['foo'])] = np.array(
[[100], [100, 100]], dtype='int64')
pytest.raises(ValueError, f)
def f():
df.loc[(slice(None), 1), (slice(None), ['foo'])] = np.array(
[100, 100, 100, 100], dtype='int64')
pytest.raises(ValueError, f)
# with an alignable rhs
df = df_orig.copy()
df.loc[(slice(None), 1), (slice(None), ['foo'])] = df.loc[(slice(
None), 1), (slice(None), ['foo'])] * 5
expected = df_orig.copy()
expected.iloc[[0, 3], [1, 3]] = expected.iloc[[0, 3], [1, 3]] * 5
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[(slice(None), 1), (slice(None), ['foo'])] *= df.loc[(slice(
None), 1), (slice(None), ['foo'])]
expected = df_orig.copy()
expected.iloc[[0, 3], [1, 3]] *= expected.iloc[[0, 3], [1, 3]]
tm.assert_frame_equal(df, expected)
rhs = df_orig.loc[(slice(None), 1), (slice(None), ['foo'])].copy()
rhs.loc[:, ('c', 'bah')] = 10
df = df_orig.copy()
df.loc[(slice(None), 1), (slice(None), ['foo'])] *= rhs
expected = df_orig.copy()
expected.iloc[[0, 3], [1, 3]] *= expected.iloc[[0, 3], [1, 3]]
tm.assert_frame_equal(df, expected)
class TestMultiIndexPanel(object):
def test_iloc_getitem_panel_multiindex(self):
with catch_warnings(record=True):
# GH 7199
# Panel with multi-index
multi_index = pd.MultiIndex.from_tuples([('ONE', 'one'),
('TWO', 'two'),
('THREE', 'three')],
names=['UPPER', 'lower'])
simple_index = [x[0] for x in multi_index]
wd1 = Panel(items=['First', 'Second'],
major_axis=['a', 'b', 'c', 'd'],
minor_axis=multi_index)
wd2 = Panel(items=['First', 'Second'],
major_axis=['a', 'b', 'c', 'd'],
minor_axis=simple_index)
expected1 = wd1['First'].iloc[[True, True, True, False], [0, 2]]
result1 = wd1.iloc[0, [True, True, True, False], [0, 2]] # WRONG
tm.assert_frame_equal(result1, expected1)
expected2 = wd2['First'].iloc[[True, True, True, False], [0, 2]]
result2 = wd2.iloc[0, [True, True, True, False], [0, 2]]
tm.assert_frame_equal(result2, expected2)
expected1 = DataFrame(index=['a'], columns=multi_index,
dtype='float64')
result1 = wd1.iloc[0, [0], [0, 1, 2]]
tm.assert_frame_equal(result1, expected1)
expected2 = DataFrame(index=['a'], columns=simple_index,
dtype='float64')
result2 = wd2.iloc[0, [0], [0, 1, 2]]
tm.assert_frame_equal(result2, expected2)
# GH 7516
mi = MultiIndex.from_tuples([(0, 'x'), (1, 'y'), (2, 'z')])
p = Panel(np.arange(3 * 3 * 3, dtype='int64').reshape(3, 3, 3),
items=['a', 'b', 'c'], major_axis=mi,
minor_axis=['u', 'v', 'w'])
result = p.iloc[:, 1, 0]
expected = Series([3, 12, 21], index=['a', 'b', 'c'], name='u')
tm.assert_series_equal(result, expected)
result = p.loc[:, (1, 'y'), 'u']
tm.assert_series_equal(result, expected)
def test_panel_setitem_with_multiindex(self):
with catch_warnings(record=True):
# 10360
# failing with a multi-index
arr = np.array([[[1, 2, 3], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0]]],
dtype=np.float64)
# reg index
axes = dict(items=['A', 'B'], major_axis=[0, 1],
minor_axis=['X', 'Y', 'Z'])
p1 = Panel(0., **axes)
p1.iloc[0, 0, :] = [1, 2, 3]
expected = Panel(arr, **axes)
tm.assert_panel_equal(p1, expected)
# multi-indexes
axes['items'] = pd.MultiIndex.from_tuples(
[('A', 'a'), ('B', 'b')])
p2 = Panel(0., **axes)
p2.iloc[0, 0, :] = [1, 2, 3]
expected = Panel(arr, **axes)
tm.assert_panel_equal(p2, expected)
axes['major_axis'] = pd.MultiIndex.from_tuples(
[('A', 1), ('A', 2)])
p3 = Panel(0., **axes)
p3.iloc[0, 0, :] = [1, 2, 3]
expected = Panel(arr, **axes)
tm.assert_panel_equal(p3, expected)
axes['minor_axis'] = pd.MultiIndex.from_product(
[['X'], range(3)])
p4 = Panel(0., **axes)
p4.iloc[0, 0, :] = [1, 2, 3]
expected = Panel(arr, **axes)
tm.assert_panel_equal(p4, expected)
arr = np.array(
[[[1, 0, 0], [2, 0, 0]], [[0, 0, 0], [0, 0, 0]]],
dtype=np.float64)
p5 = Panel(0., **axes)
p5.iloc[0, :, 0] = [1, 2]
expected = Panel(arr, **axes)
tm.assert_panel_equal(p5, expected)
|
|
#!/usr/bin/env python
#--------------------------------------------------------------
# Function to add the aperture class instances to the linac lattice,
# and a function to create the array with aperture nodes and losses.
#--------------------------------------------------------------
import math
import sys
import os
from orbit.py_linac.lattice import LinacApertureNode
from orbit.py_linac.lattice import Quad, Drift, Bend, BaseRF_Gap, AxisFieldRF_Gap
from orbit.py_linac.lattice import OverlappingQuadsNode
from orbit.py_linac.lattice import AxisField_and_Quad_RF_Gap
def Add_quad_apertures_to_lattice(accLattice, aprtNodes=[]):
"""
Function will add Aperture nodes at the entrance and exit of quads.
It returns the list of Aperture nodes.
"""
node_pos_dict = accLattice.getNodePositionsDict()
quads = accLattice.getNodesOfClasses([Quad,OverlappingQuadsNode])
for node in quads:
if(isinstance(node,Quad)):
if(node.hasParam("aperture") and node.hasParam("aprt_type")):
shape = node.getParam("aprt_type")
a = node.getParam("aperture")
node_name = node.getName()
(posBefore, posAfter) = node_pos_dict[node]
apertureNodeBefore = LinacApertureNode(shape,a/2.0,a/2.0,posBefore)
apertureNodeAfter = LinacApertureNode(shape,a/2.0,a/2.0,posAfter)
apertureNodeBefore.setName(node_name+":AprtIn")
apertureNodeAfter.setName(node_name+":AprtOut")
apertureNodeBefore.setSequence(node.getSequence())
apertureNodeAfter.setSequence(node.getSequence())
node.addChildNode(apertureNodeBefore,node.ENTRANCE)
node.addChildNode(apertureNodeAfter,node.EXIT)
aprtNodes.append(apertureNodeBefore)
aprtNodes.append(apertureNodeAfter)
if(isinstance(node,OverlappingQuadsNode)):
# place to add aperture for the overlapping fields quads
nParts = node.getnParts()
simple_quads = node.getQuads()
quad_centers = node.getCentersOfField()
(node_start_pos,node_end_pos) = node_pos_dict[node]
pos_part_arr = []
s = 0.
for part_ind in range(nParts):
pos_part_arr.append(s)
s += node.getLength(part_ind)
for quad_ind in range(len(simple_quads)):
quad = simple_quads[quad_ind]
if(quad.hasParam("aperture") and quad.hasParam("aprt_type")):
shape = quad.getParam("aprt_type")
a = quad.getParam("aperture")
quad_name = quad.getName()
length = quad.getLength()
pos_z = quad_centers[quad_ind]
posBefore = pos_z - length/2.
posAfter = pos_z + length/2.
for part_ind in range(nParts-1):
if(posBefore >= pos_part_arr[part_ind] and posBefore < pos_part_arr[part_ind+1]):
apertureNodeBefore = LinacApertureNode(shape,a/2.0,a/2.0,posBefore + node_start_pos)
apertureNodeBefore.setName(quad_name+":AprtIn")
apertureNodeBefore.setSequence(node.getSequence())
node.addChildNode(apertureNodeBefore,node.BODY,part_ind,node.BEFORE)
aprtNodes.append(apertureNodeBefore)
if(posAfter > pos_part_arr[part_ind] and posAfter <= pos_part_arr[part_ind+1]):
apertureNodeAfter = LinacApertureNode(shape,a/2.0,a/2.0,posAfter + node_start_pos)
apertureNodeAfter.setName(quad_name+":AprtOut")
apertureNodeAfter.setSequence(node.getSequence())
node.addChildNode(apertureNodeAfter,node.BODY,part_ind,node.AFTER)
aprtNodes.append(apertureNodeAfter)
return aprtNodes
def Add_bend_apertures_to_lattice(accLattice, aprtNodes=[], step = 0.25):
"""
Function will add Aperture nodes at the bend nodes of the lattice if they
have aperture parameters.
The distance between aperture nodes is defined by the step input parameter
(in meters).
"""
node_pos_dict = accLattice.getNodePositionsDict()
bends = accLattice.getNodesOfClasses([Bend,])
if(len(bends) <= 0): return aprtNodes
for node in bends:
if(node.hasParam("aperture_x") and node.hasParam("aperture_y") and node.hasParam("aprt_type")):
shape = node.getParam("aprt_type")
a_x = node.getParam("aperture_x")
a_y = node.getParam("aperture_y")
if(shape != 3): continue
node_name = node.getName()
(posBefore, posAfter) = node_pos_dict[node]
nParts = node.getnParts()
s = posBefore
pos_part_arr = []
for part_ind in range(nParts):
pos_part_arr.append(s)
s += node.getLength(part_ind)
run_path = pos_part_arr[0]
run_path_old = pos_part_arr[0] - 2*step
for part_ind in range(nParts):
run_path = pos_part_arr[part_ind]
if(run_path >= run_path_old + step):
apertureNode = LinacApertureNode(shape,a_x/2.0,a_y/2.0,run_path)
aprt_name = node_name+":"+str(part_ind)+":Aprt"
if(nParts == 1): aprt_name = node_name+":Aprt"
apertureNode.setName(aprt_name)
apertureNode.setSequence(node.getSequence())
node.addChildNode(apertureNode,node.BODY,part_ind,node.BEFORE)
aprtNodes.append(apertureNode)
run_path_old = run_path
return aprtNodes
def Add_rfgap_apertures_to_lattice(accLattice, aprtNodes=[]):
"""
Function will add Aperture nodes at the entrance and exit of RF gap.
It returns the list of Aperture nodes.
"""
node_pos_dict = accLattice.getNodePositionsDict()
rfgaps = accLattice.getNodesOfClasses([BaseRF_Gap,AxisFieldRF_Gap,AxisField_and_Quad_RF_Gap])
for node in rfgaps:
if(node.hasParam("aperture") and node.hasParam("aprt_type")):
shape = node.getParam("aprt_type")
a = node.getParam("aperture")
node_name = node.getName()
(posBefore, posAfter) = node_pos_dict[node]
apertureNodeBefore = LinacApertureNode(shape,a/2.0,a/2.0,posBefore)
apertureNodeAfter = LinacApertureNode(shape,a/2.0,a/2.0,posAfter)
apertureNodeBefore.setName(node_name+":AprtIn")
apertureNodeAfter.setName(node_name+":AprtOut")
apertureNodeBefore.setSequence(node.getSequence())
apertureNodeAfter.setSequence(node.getSequence())
node.addChildNode(apertureNodeBefore,node.ENTRANCE)
node.addChildNode(apertureNodeAfter,node.EXIT)
aprtNodes.append(apertureNodeBefore)
aprtNodes.append(apertureNodeAfter)
return aprtNodes
def Add_drift_apertures_to_lattice(accLattice, pos_start, pos_end, step, aperture_d, aprtNodes=[]):
"""
Function will add Aperture nodes at the drift nodes of the lattice between
positions pos_start and pos_end with the minimal distance of 'step' between
aperture nodes. The shape of this apertures defines here is always shape=1
which means a circle with the diameter = aperture_d in meters.
It returns the list of Aperture nodes.
"""
shape = 1
a = aperture_d
node_pos_dict = accLattice.getNodePositionsDict()
drifts = accLattice.getNodesOfClasses([Drift,])
if(len(drifts) <= 0): return aprtNodes
#---- run_path = posBefore in (posBefore, posAfter) = node_pos_dict[node]
run_path = node_pos_dict[drifts[0]][0]
run_path_old = run_path - 2*step
for node in drifts:
node_name = node.getName()
(posBefore, posAfter) = node_pos_dict[node]
if(posBefore > pos_end): break
nParts = node.getnParts()
s = posBefore
pos_part_arr = []
for part_ind in range(nParts):
pos_part_arr.append(s)
s += node.getLength(part_ind)
for part_ind in range(nParts):
run_path = pos_part_arr[part_ind]
if(run_path >= pos_start and run_path <= pos_end):
if(run_path >= run_path_old + step):
apertureNode = LinacApertureNode(shape,a/2.0,a/2.0,run_path)
aprt_name = node_name+":"+str(part_ind)+":Aprt"
if(nParts == 1): aprt_name = node_name+":Aprt"
apertureNode.setName(aprt_name)
apertureNode.setSequence(node.getSequence())
node.addChildNode(apertureNode,node.BODY,part_ind,node.BEFORE)
aprtNodes.append(apertureNode)
run_path_old = run_path
return aprtNodes
def AddScrapersAperturesToLattice(accLattice, node_name, x_size, y_size, aprtNodes=[]):
"""
Function will add the rectangular Aperture node (shape=3) at the node with a particular name.
Parameters x_size and y_size are full horizontal and vertical sizes of the aperture.
"""
shape = 3
node_pos_dict = accLattice.getNodePositionsDict()
node = accLattice.getNodesForName(node_name)[0]
(posBefore, posAfter) = node_pos_dict[node]
apertureNode = LinacApertureNode(shape,x_size/2.0,y_size/2.0,posBefore)
apertureNode.setName(node_name+":Aprt")
apertureNode.setSequence(node.getSequence())
node.addChildNode(apertureNode,node.ENTRANCE)
aprtNodes.append(apertureNode)
aprtNodes = sorted(aprtNodes, key = lambda x: x.getPosition(), reverse = False)
return aprtNodes
def GetLostDistributionArr(aprtNodes, bunch_lost):
"""
Function returns the array with [aptrNode,sum_of_losses]
The sum_of_losses is a number of particles or the sum of macro sizes if the
particle attribute "macrosize" is defined.
"""
lossDist_arr = []
aprtPos_arr = []
#--- first we will sort apertures according to the position
aprtNodes = sorted(aprtNodes, key = lambda x: x.getPosition(), reverse = False)
for aprt_node in aprtNodes:
aprtPos_arr.append(aprt_node.getPosition())
loss_sum = 0.
lossDist_arr.append([aprt_node,loss_sum])
if(len(aprtPos_arr) <= 0): return lossDist_arr
#-----------------------------------------------------------------
def indexFindF(pos_arr,pos,ind_start = -1,ind_stop = -1):
""" This function will find the index of nearest to pos point in pos_arr"""
if(ind_start < 0 or ind_stop < 0):
ind_start = 0
ind_stop = len(pos_arr) - 1
return indexFindF(pos_arr,pos,ind_start,ind_stop)
if(abs(ind_start - ind_stop) <= 1):
dist0 = abs(pos_arr[ind_start] - pos)
dist1 = abs(pos_arr[ind_stop] - pos)
if(dist0 <= dist1): return ind_start
return ind_stop
ind_mdl = int((ind_start + ind_stop)/2.0)
if(pos_arr[ind_start] <= pos and pos <pos_arr[ind_mdl]):
return indexFindF(pos_arr,pos,ind_start,ind_mdl)
else:
return indexFindF(pos_arr,pos,ind_mdl,ind_stop)
#-----------------------------------------------------------------
has_macrosize_partAttr = bunch_lost.hasPartAttr("macrosize")
if(not bunch_lost.hasPartAttr("LostParticleAttributes")): return lossDist_arr
macroSize = bunch_lost.macroSize()
nParticles = bunch_lost.getSize()
for ind in range(nParticles):
pos = bunch_lost.partAttrValue("LostParticleAttributes",ind,0)
if(pos < aprtPos_arr[0] or pos > aprtPos_arr[len(aprtPos_arr)-1]): continue
pos_ind = indexFindF(aprtPos_arr,pos)
if(has_macrosize_partAttr):
macroSize = bunch_lost.partAttrValue("macrosize",ind,0)
lossDist_arr[pos_ind][1] += macroSize
continue
lossDist_arr[pos_ind][1] += 1.0
return lossDist_arr
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""Tests for python.tpu.feature_column."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from absl.testing import parameterized
from tensorflow.python.client import session
from tensorflow.python.feature_column import feature_column_lib as fc_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.tpu import feature_column_v2 as tpu_fc
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_function
def _initialized_session():
sess = session.Session()
sess.run(variables_lib.global_variables_initializer())
sess.run(lookup_ops.tables_initializer())
return sess
class _TestStateManager(fc_lib.StateManager):
def __init__(self, trainable=True):
self._all_variables = {}
self._trainable = trainable
def create_variable(self,
feature_column,
name,
shape,
dtype=None,
trainable=True,
use_resource=True,
initializer=None):
if feature_column not in self._all_variables:
self._all_variables[feature_column] = {}
var_dict = self._all_variables[feature_column]
if name in var_dict:
return var_dict[name]
else:
var = variable_scope.get_variable(
name=name,
shape=shape,
dtype=dtype,
trainable=self._trainable and trainable,
use_resource=use_resource,
initializer=initializer)
var_dict[name] = var
return var
def get_variable(self, feature_column, name):
return self._all_variables[feature_column][name]
class EmbeddingColumnTestV2(test.TestCase, parameterized.TestCase):
def test_defaults(self):
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
embedding_column = tpu_fc.embedding_column_v2(
categorical_column, dimension=embedding_dimension)
# Can't test default initializer as it's a random function.
self.assertIs(categorical_column, embedding_column.categorical_column)
self.assertEqual(embedding_dimension, embedding_column.dimension)
self.assertEqual('mean', embedding_column.combiner)
self.assertEqual('aaa_embedding', embedding_column.name)
self.assertEqual((embedding_dimension,), embedding_column.variable_shape)
def test_all_constructor_args(self):
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
embedding_column = tpu_fc.embedding_column_v2(
categorical_column,
dimension=embedding_dimension,
combiner='my_combiner',
initializer=lambda: 'my_initializer')
self.assertIs(categorical_column, embedding_column.categorical_column)
self.assertEqual(embedding_dimension, embedding_column.dimension)
self.assertEqual('my_combiner', embedding_column.combiner)
self.assertEqual('my_initializer', embedding_column.initializer())
self.assertEqual('aaa_embedding', embedding_column.name)
self.assertEqual((embedding_dimension,), embedding_column.variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column._parse_example_spec)
@parameterized.named_parameters(
{
'testcase_name': 'use_safe_embedding_lookup',
'use_safe_embedding_lookup': True,
}, {
'testcase_name': 'dont_use_safe_embedding_lookup',
'use_safe_embedding_lookup': False,
})
@test_util.deprecated_graph_mode_only
def test_feature_layer_cpu(self, use_safe_embedding_lookup):
# Inputs.
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 1), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 2))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info=None):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
expected_lookups_sequence = (
# example 0, ids [2], embedding = [[7, 11], [0, 0]]
((7., 11.), (0., 0.),),
# example 1, ids [0, 1], embedding = [[1, 2], [3. 5]]
((1., 2.), (3., 5.),),
# example 2, ids [], embedding = [0, 0]
((0., 0.), (0., 0.),),
# example 3, ids [1], embedding = [3, 5]
((3., 5.), (0., 0.),),
)
# Build columns.
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
sequence_categorical_column = (
fc_lib.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size))
embedding_column = tpu_fc.embedding_column_v2(
categorical_column,
dimension=embedding_dimension,
initializer=_initializer,
use_safe_embedding_lookup=use_safe_embedding_lookup)
sequence_embedding_column = tpu_fc.embedding_column_v2(
sequence_categorical_column,
dimension=embedding_dimension,
initializer=_initializer,
max_sequence_length=2,
use_safe_embedding_lookup=use_safe_embedding_lookup)
# Provide sparse input and get dense result.
features = {'aaa': sparse_input, 'bbb': sparse_input}
dense_features = fc_lib.DenseFeatures([embedding_column])
sequence_features = fc_lib.SequenceFeatures([sequence_embedding_column])
embedding_lookup = dense_features(features)
sequence_embedding_lookup = sequence_features(features)
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(
('dense_features/aaa_embedding/embedding_weights:0',
'sequence_features/bbb_embedding/embedding_weights:0',),
tuple([v.name for v in global_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0])
self.assertAllEqual(expected_lookups, embedding_lookup)
self.assertAllEqual(expected_lookups_sequence,
sequence_embedding_lookup[0].eval())
# The graph will still have SparseFillEmptyRows due to sequence being
# a Rank3 embedding lookup.
if use_safe_embedding_lookup:
self.assertEqual(2, [
x.type for x in ops.get_default_graph().get_operations()
].count('SparseFillEmptyRows'))
else:
self.assertEqual(1, [
x.type for x in ops.get_default_graph().get_operations()
].count('SparseFillEmptyRows'))
def test_deepcopy(self):
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_column = tpu_fc.embedding_column_v2(
categorical_column, dimension=2)
embedding_column_copy = copy.deepcopy(embedding_column)
self.assertEqual(embedding_column.dimension,
embedding_column_copy.dimension)
self.assertEqual(embedding_column._max_sequence_length,
embedding_column_copy._max_sequence_length)
def test_with_scope_validation(self):
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
initializer = init_ops.truncated_normal_initializer(mean=0.0, stddev=.5)
embedding_column = tpu_fc._TPUEmbeddingColumnV2(
categorical_column=categorical_column,
dimension=embedding_dimension,
combiner='mean',
initializer=initializer,
max_sequence_length=0,
learning_rate_fn=None,
use_safe_embedding_lookup=True,
bypass_scope_validation=False)
self.assertIs(categorical_column, embedding_column.categorical_column)
self.assertEqual(embedding_dimension, embedding_column.dimension)
state_manager = _TestStateManager()
with tpu_function.tpu_shard_context(1):
with variable_scope.variable_scope('tower1/scope1'):
embedding_column.create_state(state_manager)
with variable_scope.variable_scope('tower2/scope2'):
# With default scope validation, the same column cannot be used in a new
# variable scope.
with self.assertRaisesRegex(ValueError,
'the variable scope name is different'):
embedding_column.create_state(state_manager)
def test_bypass_scope_validation(self):
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
initializer = init_ops.truncated_normal_initializer(mean=0.0, stddev=.5)
embedding_column = tpu_fc._TPUEmbeddingColumnV2(
categorical_column=categorical_column,
dimension=embedding_dimension,
combiner='mean',
initializer=initializer,
max_sequence_length=0,
learning_rate_fn=None,
use_safe_embedding_lookup=True,
bypass_scope_validation=True)
self.assertIs(categorical_column, embedding_column.categorical_column)
self.assertEqual(embedding_dimension, embedding_column.dimension)
state_manager = _TestStateManager()
with tpu_function.tpu_shard_context(1):
with variable_scope.variable_scope('tower1/scope1'):
embedding_column.create_state(state_manager)
with variable_scope.variable_scope('tower2/scope2'):
embedding_column.create_state(state_manager)
def test_deepcopy_with_bypass_scope_validation(self):
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
initializer = init_ops.truncated_normal_initializer(mean=0.0, stddev=.5)
embedding_column = tpu_fc._TPUEmbeddingColumnV2(
categorical_column=categorical_column,
dimension=embedding_dimension,
combiner='mean',
initializer=initializer,
max_sequence_length=0,
use_safe_embedding_lookup=False,
bypass_scope_validation=True)
embedding_column_copy = copy.deepcopy(embedding_column)
self.assertEqual(embedding_dimension, embedding_column_copy.dimension)
self.assertEqual(embedding_column._max_sequence_length,
embedding_column_copy._max_sequence_length)
self.assertTrue(embedding_column_copy._bypass_scope_validation)
self.assertFalse(embedding_column_copy.use_safe_embedding_lookup)
class SharedEmbeddingColumnTestV2(test.TestCase, parameterized.TestCase):
@test_util.deprecated_graph_mode_only
def test_defaults(self):
vocabulary_size = 3
categorical_column_a = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc_lib.categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_dimension = 2
embedding_column_b, embedding_column_a = tpu_fc.shared_embedding_columns_v2(
[categorical_column_b, categorical_column_a],
dimension=embedding_dimension)
self.assertIs(categorical_column_a, embedding_column_a.categorical_column)
self.assertIs(categorical_column_b, embedding_column_b.categorical_column)
self.assertEqual((vocabulary_size, embedding_dimension),
embedding_column_a.get_embedding_table_size())
self.assertEqual((vocabulary_size, embedding_dimension),
embedding_column_a.get_embedding_table_size())
self.assertEqual('mean', embedding_column_a.combiner)
self.assertEqual('mean', embedding_column_b.combiner)
self.assertIsNotNone(embedding_column_a.get_initializer())
self.assertIsNotNone(embedding_column_b.get_initializer())
self.assertEqual('aaa_bbb_shared_embedding',
embedding_column_a.get_embedding_var_name())
self.assertEqual('aaa_bbb_shared_embedding',
embedding_column_b.get_embedding_var_name())
self.assertEqual('aaa_shared_embedding', embedding_column_a.name)
self.assertEqual('bbb_shared_embedding', embedding_column_b.name)
self.assertEqual((embedding_dimension,), embedding_column_a.variable_shape)
self.assertEqual((embedding_dimension,), embedding_column_b.variable_shape)
@test_util.deprecated_graph_mode_only
def test_all_constructor_args(self):
vocabulary_size = 3
categorical_column_a = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc_lib.categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_dimension = 2
embedding_column_a, embedding_column_b = tpu_fc.shared_embedding_columns_v2(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
combiner='my_combiner',
initializer=lambda: 'my_initializer',
shared_embedding_collection_name='var_scope_name')
self.assertIs(categorical_column_a, embedding_column_a.categorical_column)
self.assertIs(categorical_column_b, embedding_column_b.categorical_column)
self.assertEqual((vocabulary_size, embedding_dimension),
embedding_column_a.get_embedding_table_size())
self.assertEqual((vocabulary_size, embedding_dimension),
embedding_column_a.get_embedding_table_size())
self.assertEqual('my_combiner', embedding_column_a.combiner)
self.assertEqual('my_combiner', embedding_column_b.combiner)
self.assertEqual('my_initializer', embedding_column_a.get_initializer()())
self.assertEqual('my_initializer', embedding_column_b.get_initializer()())
self.assertEqual('var_scope_name',
embedding_column_a.get_embedding_var_name())
self.assertEqual('var_scope_name',
embedding_column_b.get_embedding_var_name())
self.assertEqual('aaa_shared_embedding', embedding_column_a.name)
self.assertEqual('bbb_shared_embedding', embedding_column_b.name)
self.assertEqual((embedding_dimension,), embedding_column_a.variable_shape)
self.assertEqual((embedding_dimension,), embedding_column_b.variable_shape)
@parameterized.named_parameters(
{
'testcase_name': 'use_safe_embedding_lookup',
'use_safe_embedding_lookup': True
}, {
'testcase_name': 'dont_use_safe_embedding_lookup',
'use_safe_embedding_lookup': False
})
@test_util.deprecated_graph_mode_only
def test_feature_layer_cpu(self, use_safe_embedding_lookup):
# Inputs.
vocabulary_size = 3
input_a = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
input_b = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(3, 2))
input_features = {'aaa': input_a, 'bbb': input_b}
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info=None):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups_a = (
# example 0:
(7., 11.), # ids [2], embedding = [7, 11]
# example 1:
(2., 3.5), # ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
)
expected_lookups_b = (
# example 0:
((7., 11.), (0., 0.),), # ids [2], embedding = [[7, 11], [0, 0]]
# example 1:
((1., 2.), (3., 5.),), # ids [0, 1], embedding = [[1, 2], [3, 5]]
# example 2:
((0., 0.), (0., 0.),), # ids [], embedding = [[0, 0], [0, 0]]
)
# Build columns.
categorical_column_a = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc_lib.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_column_a, embedding_column_b = tpu_fc.shared_embedding_columns_v2(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
initializer=_initializer,
max_sequence_lengths=[0, 2],
use_safe_embedding_lookup=use_safe_embedding_lookup)
# Provide sparse input and get dense result.
dense_features = fc_lib.DenseFeatures([embedding_column_a])
sequence_features = fc_lib.SequenceFeatures([embedding_column_b])
embedding_lookup_a = dense_features(input_features)
embedding_lookup_b = sequence_features(input_features)
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(
('aaa_bbb_shared_embedding:0',),
tuple([v.name for v in global_vars]))
embedding_var = global_vars[0]
with _initialized_session():
self.assertAllEqual(embedding_values, embedding_var)
self.assertAllEqual(expected_lookups_a, embedding_lookup_a)
self.assertAllEqual(expected_lookups_b,
embedding_lookup_b[0].eval())
# The graph will still have SparseFillEmptyRows due to sequence being
# a Rank3 embedding lookup.
if use_safe_embedding_lookup:
self.assertEqual(2, [
x.type for x in ops.get_default_graph().get_operations()
].count('SparseFillEmptyRows'))
else:
self.assertEqual(1, [
x.type for x in ops.get_default_graph().get_operations()
].count('SparseFillEmptyRows'))
def test_deepcopy(self):
vocabulary_size = 3
categorical_column_a = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc_lib.categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_dimension = 2
columns = tpu_fc.shared_embedding_columns_v2(
[categorical_column_b, categorical_column_a],
dimension=embedding_dimension)
columns_copy = copy.deepcopy(columns)
self.assertEqual(
[column._shared_embedding_collection_name for column in columns],
[column._shared_embedding_collection_name for column in columns_copy])
class DeviceSpecificEmbeddingColumnTestV2(test.TestCase,
parameterized.TestCase):
@parameterized.named_parameters(
{
'testcase_name': 'invalid_shared',
'shared': True,
}, {
'testcase_name': 'invalid_not_shared',
'shared': False,
})
@test_util.deprecated_graph_mode_only
def test_invalid_cases(self, shared):
# Inputs.
input_sparse_tensor = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (1, 4)),
values=(2, 0, 1, 3),
dense_shape=(2, 5))
input_features = {'inp': input_sparse_tensor}
# Build columns.
categorical_column_input = fc_lib.categorical_column_with_identity(
key='inp', num_buckets=3)
# Training on TPU with cpu embedding lookups is not supported.
if shared:
embedding_column = tpu_fc.shared_embedding_columns_v2(
[categorical_column_input],
dimension=2,
embedding_lookup_device='cpu',
tensor_core_shape=[None, 3])
else:
embedding_column = tpu_fc.embedding_column_v2(
categorical_column_input,
dimension=2,
embedding_lookup_device='cpu',
tensor_core_shape=[None, 3])
dense_features = fc_lib.DenseFeatures(embedding_column)
with self.assertRaisesRegex(
ValueError,
r'.*embedding_lookup_device=\"cpu\" during training is not'):
dense_features(input_features)
# Inference on with TPU Embedding Hardware is not supported.
if shared:
embedding_column = tpu_fc.shared_embedding_columns_v2(
[categorical_column_input],
dimension=2,
embedding_lookup_device='tpu_embedding_core',
tensor_core_shape=[None, 3])
else:
embedding_column = tpu_fc.embedding_column_v2(
categorical_column_input,
dimension=2,
embedding_lookup_device='tpu_embedding_core',
tensor_core_shape=[None, 3])
context = tpu._TPUInferenceContext('tpu_inference')
context.Enter()
dense_features = fc_lib.DenseFeatures(embedding_column)
with self.assertRaisesRegex(
ValueError,
r'Using embedding_lookup_device=tpu_embedding_core during inference is '
):
dense_features(input_features)
context.Exit()
@parameterized.named_parameters(
{
'testcase_name': 'combiner_mean_shared',
'shared': True,
'combiner': 'mean'
}, {
'testcase_name': 'combiner_sum_shared',
'shared': True,
'combiner': 'sum'
}, {
'testcase_name': 'combiner_sqrtn_shared',
'shared': True,
'combiner': 'sqrtn'
}, {
'testcase_name': 'combiner_mean_not_shared',
'shared': False,
'combiner': 'mean'
}, {
'testcase_name': 'combiner_sum_not_shared',
'shared': False,
'combiner': 'sum'
}, {
'testcase_name': 'combiner_sqrtn_not_shared',
'shared': False,
'combiner': 'sqrtn'
})
@test_util.deprecated_graph_mode_only
def test_dense_embedding_lookup(self, shared, combiner):
# Inputs.
vocabulary_size = 3
input_sparse_tensor = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1, 3]
indices=((0, 0), (1, 0), (1, 1), (1, 4)),
values=(2, 0, 1, 3),
dense_shape=(2, 5))
input_features = {'inp': input_sparse_tensor}
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.), # id 2
(13., 17.) # id 3
)
def _initializer(shape, dtype, partition_info=None):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Build columns.
categorical_column_input = fc_lib.categorical_column_with_identity(
key='inp', num_buckets=vocabulary_size)
# Set tensor_core_shape to be [None, 20] to ensure some padding and
# dynamic batch size.
if shared:
embedding_column = tpu_fc.shared_embedding_columns_v2(
[categorical_column_input],
dimension=embedding_dimension,
initializer=_initializer,
combiner=combiner,
embedding_lookup_device='tpu_tensor_core',
tensor_core_shape=[None, 3])
else:
embedding_column = tpu_fc.embedding_column_v2(
categorical_column_input,
dimension=embedding_dimension,
initializer=_initializer,
combiner=combiner,
embedding_lookup_device='tpu_tensor_core',
tensor_core_shape=[None, 3])
# Run in TPUContexts so that we hit the intended densification case.
context = tpu._TPUInferenceContext('tpu_inference')
context.Enter()
with tpu_function.tpu_shard_context(1):
dense_features = fc_lib.DenseFeatures(embedding_column)
# Sqrtn combiner not supported for now.
if combiner == 'sqrtn':
with self.assertRaisesRegex(
ValueError, 'Dense TPU Embedding does not support combiner'):
embedding_lookup = dense_features(input_features)
return
if combiner == 'mean':
expected_lookups = (
# example 0:
(7., 11.), # ids [2], embedding = [7, 11]
# example 1:
(2., 3.5), # ids [0, 1], embedding = mean([1, 2] + [3, 5]) =
# [2, 3.5]
)
elif combiner == 'sum':
expected_lookups = (
# example 0:
(7., 11.), # ids [2], embedding = [7, 11]
# example 1:
(4., 7), # ids [0, 1], embedding = sum([1, 2] + [3, 5]) = [4, 7]
)
embedding_lookup = dense_features(input_features)
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
if shared:
self.assertCountEqual(('inp_shared_embedding:0',),
tuple([v.name for v in global_vars]))
else:
self.assertCountEqual(
('dense_features/inp_embedding/embedding_weights:0',),
tuple([v.name for v in global_vars]))
embedding_var = global_vars[0]
with _initialized_session():
self.assertAllEqual(embedding_values, embedding_var)
eval_res = embedding_lookup.eval()
self.assertAllEqual(expected_lookups, eval_res)
context.Exit()
@test_util.deprecated_graph_mode_only
def test_empty_row(self):
# Inputs.
vocabulary_size = 3
input_sparse_tensor = sparse_tensor.SparseTensorValue(
# example 0, ids []
# example 1, ids [0, 1, 3]
indices=((1, 0), (1, 1), (1, 4)),
values=(0, 1, 3),
dense_shape=(2, 5))
input_features = {'inp': input_sparse_tensor}
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.), # id 2
(13., 17.) # id 3
)
def _initializer(shape, dtype, partition_info=None):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Build columns.
categorical_column_input = fc_lib.categorical_column_with_identity(
key='inp', num_buckets=vocabulary_size)
# Set tensor_core_shape to be [None, 20] to ensure some padding and
# dynamic batch size.
embedding_column = tpu_fc.embedding_column_v2(
categorical_column_input,
dimension=embedding_dimension,
initializer=_initializer,
combiner='mean',
embedding_lookup_device='tpu_tensor_core',
tensor_core_shape=[None, 3])
# Run in TPUContexts so that we hit the intended densification case.
context = tpu._TPUInferenceContext('tpu_inference')
context.Enter()
with tpu_function.tpu_shard_context(1):
dense_features = fc_lib.DenseFeatures(embedding_column)
expected_lookups = (
# example 0:
(0., 0.), # ids [], embedding = [0, 0]
# example 1:
(2., 3.5), # ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
)
embedding_lookup = dense_features(input_features)
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertCountEqual(
('dense_features/inp_embedding/embedding_weights:0',),
tuple([v.name for v in global_vars]))
embedding_var = global_vars[0]
with _initialized_session():
self.assertAllEqual(embedding_values, embedding_var)
eval_res = embedding_lookup.eval()
self.assertAllEqual(expected_lookups, eval_res)
context.Exit()
@test_util.deprecated_graph_mode_only
def test_error_dense_shape_invalid(self):
categorical_column_input = fc_lib.categorical_column_with_identity(
key='inp', num_buckets=5)
with self.assertRaisesRegex(ValueError, 'tensor_core_shape must be size 2'):
tpu_fc.shared_embedding_columns_v2([categorical_column_input],
dimension=20,
tensor_core_shape=[None, 20, 15])
if __name__ == '__main__':
test.main()
|
|
from nose.tools import eq_
import hashlib
import json
import nose
from js_helper import _do_real_test_raw as _js_test
from validator.testcases.markup.markuptester import MarkupParser
import validator.testcases.jetpack as jetpack
from validator.errorbundler import ErrorBundle
from validator.xpi import XPIManager
def _do_test(xpi_package, allow_old_sdk=True):
err = ErrorBundle()
jetpack.inspect_jetpack(err, xpi_package, allow_old_sdk=allow_old_sdk)
return err
class MockXPI(object):
def __init__(self, resources):
self.resources = resources
def read(self, name):
if isinstance(self.resources[name], bool):
return ''
return self.resources[name]
def __iter__(self):
for name in self.resources.keys():
yield name
def __contains__(self, name):
return name in self.resources
def test_not_jetpack():
"""Test that add-ons which do not match the Jetpack pattern are ignored."""
err = _do_test(MockXPI({'foo': True, 'bar': True}))
assert not err.errors
assert not err.warnings
assert not err.notices
eq_(err.metadata.get('is_jetpack', False), False)
def test_package_json_jetpack():
"""Test that add-ons with the new package.json are treated as jetpack."""
err = _do_test(MockXPI({'bootstrap.js': '', 'package.json': ''}))
assert not err.errors
assert not err.warnings
assert not err.notices
eq_(err.metadata.get('is_jetpack'), True)
def test_bad_harnessoptions():
"""Test that a malformed harness-options.json file is warned against."""
err = _do_test(MockXPI({'bootstrap.js': True,
'components/harness.js': True,
'harness-options.json': 'foo bar'}))
assert err.failed()
assert err.warnings
print err.warnings
assert err.warnings[0]['id'][-1] == 'bad_harness-options.json'
def test_pass_jetpack():
"""Test that a minimalistic Jetpack setup will pass."""
harnessoptions = {'sdkVersion': '1.8-dev',
'jetpackID': '',
'manifest': {}}
with open('tests/resources/bootstrap.js') as bootstrap_file:
bootstrap = bootstrap_file.read()
err = _do_test(MockXPI({'bootstrap.js': bootstrap,
'harness-options.json':
json.dumps(harnessoptions)}))
print err.print_summary(verbose=True)
assert not err.failed()
assert 'is_jetpack' in err.metadata and err.metadata['is_jetpack']
# Test that all files are marked as pretested.
pretested_files = err.get_resource('pretested_files')
assert pretested_files
assert 'bootstrap.js' in pretested_files
def test_package_json_pass_jetpack():
"""Test that a minimalistic package.json Jetpack setup will pass."""
with open('tests/resources/bootstrap.js') as bootstrap_file:
bootstrap = bootstrap_file.read()
err = _do_test(MockXPI({'bootstrap.js': bootstrap,
'package.json': '{}'}))
print err.print_summary(verbose=True)
assert not err.failed()
assert 'is_jetpack' in err.metadata and err.metadata['is_jetpack']
# Test that all files are marked as pretested.
pretested_files = err.get_resource('pretested_files')
assert pretested_files
assert 'bootstrap.js' in pretested_files
def test_package_json_different_bootstrap():
"""Test that a minimalistic package.json Jetpack setup will pass."""
err = _do_test(MockXPI({'bootstrap.js': "var foo = 'bar';",
'package.json': '{}'}))
print err.print_summary(verbose=True)
assert not err.failed()
assert 'is_jetpack' in err.metadata and err.metadata['is_jetpack']
# Test that all files are not marked as pretested.
pretested_files = err.get_resource('pretested_files')
assert not pretested_files
assert 'bootstrap.js' not in pretested_files
def test_missing_elements():
"""Test that missing elements in harness-options will fail."""
harnessoptions = {'sdkVersion': '1.8-dev',
'jetpackID': ''}
with open('tests/resources/bootstrap.js') as bootstrap_file:
bootstrap = bootstrap_file.read()
err = _do_test(MockXPI({'bootstrap.js': bootstrap,
'harness-options.json':
json.dumps(harnessoptions)}))
assert err.failed()
def test_skip_safe_files():
"""Test that missing elements in harness-options will fail."""
harnessoptions = {'sdkVersion': '1.8-dev',
'jetpackID': '',
'manifest': {}}
with open('tests/resources/bootstrap.js') as bootstrap_file:
bootstrap = bootstrap_file.read()
err = _do_test(MockXPI({'bootstrap.js': bootstrap,
'harness-options.json':
json.dumps(harnessoptions),
'foo.png': True,
'bar.JpG': True,
'safe.GIF': True,
'icon.ico': True,
'foo/.DS_Store': True}))
assert not err.failed()
def test_pass_manifest_elements():
"""Test that proper elements in harness-options will pass."""
with open('tests/resources/bootstrap.js') as bootstrap_file:
bootstrap = bootstrap_file.read()
bootstrap_hash = hashlib.sha256(bootstrap).hexdigest()
harnessoptions = {
'jetpackID': 'foobar',
'sdkVersion': '1.8-dev',
'manifest': {
'bootstrap.js':
{'requirements': {},
'packageName': 'addon-kit',
'sectionName': 'lib',
'moduleName': 'drawing',
'jsSHA256': bootstrap_hash,
'docsSHA256': bootstrap_hash}}}
err = _do_test(MockXPI({'bootstrap.js': bootstrap,
'harness-options.json':
json.dumps(harnessoptions),
'resources/bootstrap.js': bootstrap}))
print err.print_summary(verbose=True)
assert not err.failed()
assert 'jetpack_loaded_modules' in err.metadata
nose.tools.eq_(err.metadata['jetpack_loaded_modules'],
['addon-kit-lib/drawing.js'])
assert 'jetpack_identified_files' in err.metadata
assert 'identified_files' in err.metadata
assert 'bootstrap.js' in err.metadata['jetpack_identified_files']
assert 'bootstrap.js' in err.metadata['identified_files']
assert 'jetpack_unknown_files' in err.metadata
assert not err.metadata['jetpack_unknown_files']
def test_ok_resource():
"""Test that resource:// URIs aren't flagged."""
with open('tests/resources/bootstrap.js') as bootstrap_file:
bootstrap = bootstrap_file.read()
bootstrap_hash = hashlib.sha256(bootstrap).hexdigest()
harnessoptions = {
'jetpackID': 'foobar',
'sdkVersion': '1.8-dev',
'manifest': {
'resource://bootstrap.js':
{'requirements': {},
'packageName': 'addon-kit',
'sectionName': 'lib',
'moduleName': 'drawing',
'jsSHA256': bootstrap_hash,
'docsSHA256': bootstrap_hash}}}
err = _do_test(MockXPI({'bootstrap.js': bootstrap,
'resources/bootstrap.js': bootstrap,
'harness-options.json':
json.dumps(harnessoptions)}))
print err.print_summary(verbose=True)
assert not err.failed()
def test_bad_resource():
"""Test for failure on non-resource:// modules."""
with open('tests/resources/bootstrap.js') as bootstrap_file:
bootstrap = bootstrap_file.read()
bootstrap_hash = hashlib.sha256(bootstrap).hexdigest()
harnessoptions = {
'sdkVersion': '1.8-dev',
'jetpackID': 'foobar',
'manifest':
{'http://foo.com/bar/bootstrap.js':
{'requirements': {},
'packageName': 'addon-kit',
'sectionName': 'lib',
'moduleName': 'drawing',
'jsSHA256': bootstrap_hash,
'docsSHA256': bootstrap_hash}}}
err = _do_test(MockXPI({'bootstrap.js': bootstrap,
'resources/bootstrap.js': bootstrap,
'harness-options.json':
json.dumps(harnessoptions)}))
print err.print_summary(verbose=True)
assert err.failed()
def test_missing_manifest_elements():
"""Test that missing manifest elements in harness-options will fail."""
with open('tests/resources/bootstrap.js') as bootstrap_file:
bootstrap = bootstrap_file.read()
bootstrap_hash = hashlib.sha256(bootstrap).hexdigest()
harnessoptions = {
'sdkVersion': '1.8-dev',
'jetpackID': 'foobar',
'manifest':
{'resource://bootstrap.js':
{'requirements': {},
'packageName': 'addon-kit',
'moduleName': 'drawing',
'jsSHA256': bootstrap_hash,
'docsSHA256': bootstrap_hash}}}
err = _do_test(MockXPI({'bootstrap.js': bootstrap,
'resources/bootstrap.js': bootstrap,
'harness-options.json':
json.dumps(harnessoptions)}))
print err.print_summary(verbose=True)
assert err.failed()
def test_mismatched_hash():
"""
Test that failure occurs when the actual file hash doesn't match the hash
provided by harness-options.js.
"""
harnessoptions = {
'sdkVersion': '1.8-dev',
'jetpackID': 'foobar',
'manifest':
{'resource://bootstrap.js':
{'requirements': {},
'packageName': 'addon-kit',
'moduleName': 'drawing',
'jsSHA256': '',
'docsSHA256': ''}}}
with open('tests/resources/bootstrap.js') as bootstrap_file:
bootstrap = bootstrap_file.read()
err = _do_test(MockXPI({'bootstrap.js': bootstrap,
'resources/bootstrap.js': bootstrap,
'harness-options.json':
json.dumps(harnessoptions)}))
print err.print_summary(verbose=True)
assert err.failed()
def test_mismatched_db_hash():
"""
Test that failure occurs when the hash of a file doesn't exist in the
Jetpack known file database.
"""
with open('tests/resources/bootstrap.js') as bootstrap_file:
bootstrap = bootstrap_file.read()
# Break the hash with this.
bootstrap = 'function() {}; %s' % bootstrap
bootstrap_hash = hashlib.sha256(bootstrap).hexdigest()
harnessoptions = {
'sdkVersion': '1.8-dev',
'jetpackID': 'foobar',
'manifest':
{'resource://bootstrap.js':
{'requirements': {},
'packageName': 'addon-kit',
'moduleName': 'drawing',
'sectionName': 'lib',
'jsSHA256': bootstrap_hash,
'docsSHA256': bootstrap_hash}}}
err = _do_test(MockXPI({'bootstrap.js': bootstrap,
'resources/bootstrap.js': bootstrap,
'harness-options.json':
json.dumps(harnessoptions)}))
print err.print_summary(verbose=True)
assert not err.failed()
assert 'jetpack_loaded_modules' in err.metadata
assert not err.metadata['jetpack_loaded_modules']
assert 'jetpack_identified_files' in err.metadata
assert 'jetpack_unknown_files' in err.metadata
unknown_files = err.metadata['jetpack_unknown_files']
nose.tools.eq_(len(unknown_files), 2)
nose.tools.ok_('bootstrap.js' in unknown_files)
nose.tools.ok_('resources/bootstrap.js' in unknown_files)
def test_mismatched_module_version():
"""
Tests that add-ons using modules from a version of the SDK
other than the version they claim.
"""
xpi = XPIManager('tests/resources/jetpack/jetpack-1.8-pretending-1.8.1.xpi')
err = _do_test(xpi)
assert err.failed()
assert any(w['id'][2] == 'mismatched_version' for w in err.warnings)
def test_new_module_location_spec():
"""
Tests that we don't fail for missing modules in add-ons generated with
newer versions of the SDK.
"""
xpi = XPIManager('tests/resources/jetpack/jetpack-1.14.xpi')
err = _do_test(xpi)
assert not any(w['id'][2] == 'missing_jetpack_module'
for w in err.warnings)
def test_components_flagged():
"""Test that `Components` is flagged in Jetpack."""
js = """
var x = Components.services.foo.bar;
"""
assert not _js_test(js).failed()
assert _js_test(js, jetpack=True).failed()
def test_safe_require():
"""Test that requiring an innocuous module does not add the
requires_chrome flag."""
def base_case():
err = _js_test("""var foo = require("bar");""",
jetpack=True)
eq_(err.metadata['requires_chrome'], False)
yield base_case
def test_unsafe_safe_require():
"""Test that requiring low-level modules does add the requires_chrome
flag."""
interfaces = ['chrome', 'window-utils', 'observer-service']
def interface_cases(interface):
err = _js_test("""var {cc, ci} = require("%s")""" % interface,
jetpack=True)
print err.print_summary(verbose=True)
first_message = err.warnings[0]['message']
assert 'non-SDK interface' in first_message, ('unexpected: %s' %
first_message)
assert 'requires_chrome' in err.metadata, \
'unexpected: "requires_chrome" should be in metadata'
eq_(err.metadata['requires_chrome'], True)
for case in interfaces:
yield interface_cases, case
def test_absolute_uris_in_js():
"""
Test that a warning is thrown for absolute URIs within JS files.
"""
bad_js = 'alert("resource://foo-data/bar/zap.png");'
assert not _js_test(bad_js).failed()
err =_js_test(bad_js, jetpack=True)
assert err.failed()
assert err.compat_summary['errors']
# Test that literals are inspected even if they're the result of an
# operation.
bad_js = 'alert("resou" + "rce://foo-" + "data/bar/zap.png");'
assert not _js_test(bad_js).failed()
err =_js_test(bad_js, jetpack=True)
assert err.failed()
assert err.compat_summary['errors']
def test_observer_service_flagged():
assert _js_test("""
var {Ci} = require("chrome");
thing.QueryInterface(Ci.nsIObserverService);
""", jetpack=True).failed()
assert not _js_test("""
thing.QueryInterface(Ci.nsIObserverService);
""").failed()
def test_absolute_uris_in_markup():
"""
Test that a warning is thrown for absolute URIs within markup files.
"""
err = ErrorBundle()
bad_html = '<foo><bar src="resource://foo-data/bar/zap.png" /></foo>'
parser = MarkupParser(err)
parser.process('foo.html', bad_html, 'html')
assert not err.failed()
err.metadata['is_jetpack'] = True
parser = MarkupParser(err)
parser.process('foo.html', bad_html, 'html')
assert err.failed()
assert err.compat_summary['errors']
def test_bad_sdkversion():
"""Test that a redacted SDK version is not used."""
harnessoptions = {'sdkVersion': '1.4',
'jetpackID': '',
'manifest': {}}
with open('tests/resources/bootstrap.js') as bootstrap_file:
bootstrap = bootstrap_file.read()
with open('jetpack/addon-sdk/packages/test-harness/lib/'
'harness.js') as harness_file:
harness = harness_file.read()
err = _do_test(MockXPI({'bootstrap.js': bootstrap,
'components/harness.js': harness,
'harness-options.json':
json.dumps(harnessoptions)}))
assert err.failed() and err.errors
def test_outdated_sdkversion():
"""
Tests that add-ons using a version other than the latest release
are warned against, but module hashes are still recognized.
"""
xpi = XPIManager('tests/resources/jetpack/jetpack-1.8-outdated.xpi')
err = _do_test(xpi, allow_old_sdk=False)
assert err.failed()
# Make sure we don't have any version mismatch warnings
eq_(len(err.warnings), 1)
eq_(err.warnings[0]['id'][2], 'outdated_version')
def test_future_sdkversion():
"""
Test that if the developer is using a verison of the SDK that's newer than
the latest recognized version, we don't throw an error.
"""
xpi = XPIManager('tests/resources/jetpack/jetpack-1.8-future.xpi')
err = _do_test(xpi, allow_old_sdk=False)
print err.print_summary(verbose=True)
assert not err.failed()
|
|
import copy
import time,pdb
import ephem
import pandas as pd
import numpy as np
from astropy.io import ascii
from itertools import product
from .pdf import * # part of isoclassify package (to do make explicit import)
from .priors import * # part of isoclassify package (to do make explicit import)
from .plot import * # part of isoclassify package (to do make explicit import)
class obsdata():
def __init__(self):
self.plx = -99.0
self.plxe = -99.0
self.teff = -99.0
self.teffe = -99.0
self.logg = -99.0
self.logge = -99.0
self.feh = -99.0
self.fehe = -99.0
self.lum = -99.0
self.lume = -99.0
self.bmag = -99.0
self.bmage = -99.0
self.vmag = -99.0
self.vmage = -99.0
self.btmag = -99.0
self.btmage = -99.0
self.vtmag = -99.0
self.vtmage = -99.0
self.dmag = -99.0
self.dmage = -99.0
self.gmag = -99.0
self.gmage = -99.0
self.rmag = -99.0
self.rmage = -99.0
self.imag = -99.0
self.image = -99.0
self.zmag = -99.0
self.zmage = -99.0
self.jmag = -99.0
self.jmage = -99.0
self.hmag = -99.0
self.hmage = -99.0
self.kmag = -99.0
self.kmage = -99.0
self.gamag = -99.0
self.gamage = -99.0
self.bpmag = -99.0
self.bpmage = -99.0
self.rpmag = -99.0
self.rpmage = -99.0
self.numax = -99.0
self.numaxe = -99.0
self.dnu = -99.0
self.dnue = -99.0
def addspec(self,value,sigma):
self.teff = value[0]
self.teffe = sigma[0]
self.logg = value[1]
self.logge = sigma[1]
self.feh = value[2]
self.fehe = sigma[2]
def addlum(self,value,sigma):
self.lum = value[0]
self.lume = sigma[0]
def addbv(self,value,sigma):
self.bmag = value[0]
self.bmage = sigma[0]
self.vmag = value[1]
self.vmage = sigma[1]
def addbvt(self,value,sigma):
self.btmag = value[0]
self.btmage = sigma[0]
self.vtmag = value[1]
self.vtmage = sigma[1]
def addgriz(self,value,sigma):
self.gmag = value[0]
self.gmage = sigma[0]
self.rmag = value[1]
self.rmage = sigma[1]
self.imag = value[2]
self.image = sigma[2]
self.zmag = value[3]
self.zmage = sigma[3]
def addjhk(self,value,sigma):
self.jmag = value[0]
self.jmage = sigma[0]
self.hmag = value[1]
self.hmage = sigma[1]
self.kmag = value[2]
self.kmage = sigma[2]
def addgaia(self,value,sigma):
self.gamag = value[0]
self.gamage = sigma[0]
self.bpmag = value[1]
self.bpmage = sigma[1]
self.rpmag = value[2]
self.rpmage = sigma[2]
def addplx(self,value,sigma):
self.plx = value
self.plxe = sigma
def adddmag(self,value,sigma):
self.dmag = value
self.dmage = sigma
def addseismo(self,value,sigma):
self.numax = value[0]
self.numaxe = sigma[0]
self.dnu = value[1]
self.dnue = sigma[1]
def addcoords(self,value1,value2):
self.ra = value1
self.dec = value2
class resdata():
def __init__(self):
self.teff = 0.0
self.teffep = 0.0
self.teffem = 0.0
self.teffpx = 0.0
self.teffpy = 0.0
self.logg = 0.0
self.loggep = 0.0
self.loggem = 0.0
self.loggpx = 0.0
self.loggpy = 0.0
self.feh = 0.0
self.fehep = 0.0
self.fehem = 0.0
self.fehpx = 0.0
self.fehpy = 0.0
self.rad = 0.0
self.radep = 0.0
self.radem = 0.0
self.radpx = 0.0
self.radpy = 0.0
self.mass = 0.0
self.massep = 0.0
self.massem = 0.0
self.masspx = 0.0
self.masspy = 0.0
self.rho = 0.0
self.rhoep = 0.0
self.rhoem = 0.0
self.rhopx = 0.0
self.rhopy = 0.0
self.lum = 0.0
self.lumep = 0.0
self.lumem = 0.0
self.lumpx = 0.0
self.lumpy = 0.0
self.age = 0.0
self.ageep = 0.0
self.ageem = 0.0
self.agepx = 0.0
self.agepy = 0.0
self.avs = 0.0
self.avsep = 0.0
self.avsem = 0.0
self.avspx = 0.0
self.avspy = 0.0
self.dis = 0.0
self.disep = 0.0
self.disem = 0.0
self.dispx = 0.0
self.dispy = 0.0
self.teffsec = 0.0
self.teffsecep = 0.0
self.teffsecem = 0.0
self.teffsecpx = 0.0
self.teffsecpy = 0.0
self.radsec = 0.0
self.radsecep = 0.0
self.radsecem = 0.0
self.radsecpx = 0.0
self.radsecpy = 0.0
self.loggsec = 0.0
self.loggsecep = 0.0
self.loggsecem = 0.0
self.loggsecpx = 0.0
self.loggsecpy = 0.0
self.rhosec = 0.0
self.rhosecep = 0.0
self.rhosecem = 0.0
self.rhosecpx = 0.0
self.rhosecpy = 0.0
self.masssec = 0.0
self.masssecep = 0.0
self.masssecem = 0.0
self.masssecpx = 0.0
self.masssecpy = 0.0
class extinction():
def __init__(self):
self.ab = 1.3454449
self.av = 1.00
self.abt = 1.3986523
self.avt = 1.0602271
self.ag = 1.2348743
self.ar = 0.88343449
self.ai = 0.68095687
self.az = 0.48308430
self.aj = 0.28814896
self.ah = 0.18152716
self.ak = 0.11505195
self.aga=1.2348743
def classify(input, model, dustmodel=0, plot=1, useav=-99.0, ext=-99.0, band=''):
"""
Run grid based classifier
Args:
input (object): input object
model (dict): dictionary of arrays
dustmodel (Optional[DataFrame]): extinction model
useav (float):
ext (float):
"""
## constants
gsun = 27420.010
numaxsun = 3090.0
dnusun = 135.1
teffsun = 5772.0
# bolometric correction error; kinda needs to be motivated better ...
bcerr = 0.03
## extinction coefficients
extfactors = ext
## class containing output results
result = resdata()
# calculate colors + errors:
bvcol = input.bmag - input.vmag
bvtcol = input.btmag - input.vtmag
grcol = input.gmag - input.rmag
ricol = input.rmag - input.imag
izcol = input.imag - input.zmag
gicol = input.gmag - input.imag
rzcol = input.rmag - input.zmag
gzcol = input.gmag - input.zmag
jhcol = input.jmag - input.hmag
hkcol = input.hmag - input.kmag
jkcol = input.jmag - input.kmag
bpgacol = input.bpmag - input.gamag
garpcol = input.gamag - input.rpmag
bprpcol = input.bpmag - input.rpmag
vjcol = input.vmag - input.jmag
vtjcol = input.vtmag - input.jmag
gjcol = input.gmag - input.jmag
rjcol = input.rmag - input.jmag
vkcol = input.vmag - input.kmag
vtkcol = input.vtmag - input.kmag
gkcol = input.gmag - input.kmag
rkcol = input.rmag - input.kmag
gajcol = input.gamag - input.jmag
gakcol = input.gamag - input.kmag
bvcole = np.sqrt(input.bmage**2 + input.vmage**2)
bvtcole = np.sqrt(input.btmage**2 + input.vtmage**2)
grcole = np.sqrt(input.gmage**2 + input.rmage**2)
ricole = np.sqrt(input.rmage**2 + input.image**2)
izcole = np.sqrt(input.image**2 + input.zmage**2)
gicole = np.sqrt(input.gmage**2 + input.image**2)
rzcole = np.sqrt(input.rmage**2 + input.zmage**2)
gzcole = np.sqrt(input.gmage**2 + input.zmage**2)
jhcole = np.sqrt(input.jmage**2 + input.hmage**2)
hkcole = np.sqrt(input.hmage**2 + input.kmage**2)
jkcole = np.sqrt(input.jmage**2 + input.kmage**2)
bpgacole = np.sqrt(input.bpmage**2 + input.gamage**2)
garpcole = np.sqrt(input.gamage**2 + input.rpmage**2)
bprpcole = np.sqrt(input.bpmage**2 + input.rpmage**2)
vjcole = np.sqrt(input.vmage**2 + input.jmage**2)
vtjcole = np.sqrt(input.vtmage**2 + input.jmage**2)
gjcole = np.sqrt(input.gmage**2 + input.jmage**2)
rjcole = np.sqrt(input.rmage**2 + input.jmage**2)
vkcole = np.sqrt(input.vmage**2 + input.kmage**2)
vtkcole = np.sqrt(input.vtmage**2 + input.kmage**2)
gkcole = np.sqrt(input.gmage**2 + input.kmage**2)
rkcole = np.sqrt(input.rmage**2 + input.kmage**2)
gajcole = np.sqrt(input.gamage**2 + input.jmage**2)
gakcole = np.sqrt(input.gamage**2 + input.kmage**2)
# Compute extra color error term based on underestimation of stellar teff errors with nominal 2% error floor:
if ((input.gmag > -99.0) & (input.kmag > -99.0)):
gkexcole = compute_extra_gk_color_error(gkcol)
# Determine which gK error term is greater and use that one:
print("g - K error from photometry: ",gkcole)
print("g - K error from best-fit polynomial: ",gkexcole)
gkcole = max(gkcole,gkexcole)
print("Using g - K error: ",gkcole)
# apparent mag to use for distance estimation. set by "band" input
redmap = -99.0
if (getattr(input,band) > -99.):
redmap = getattr(input,band)
redmape = getattr(input,band+'e')
model_mabs = model[band]
# correct for companion
if (input.dmag != -99.):
dx=-0.4*input.dmag
dxe=-0.4*input.dmage
cor=2.5*np.log10(1.+10**dx)
redmap = redmap+cor
redmape = np.sqrt( redmape**2 + (dxe*2.5*10**dx/(1.+10**dx))**2)
# absolute magnitude
if (input.plx > -99.0):
mabs = -5.0 * np.log10(1.0 / input.plx) + redmap + 5.0
mabse = np.sqrt(
(-5.0 / (input.plx * np.log(10)))**2 * input.plxe**2
+ redmape**2 + bcerr**2)
# Also compute extra error term for M-dwarfs with K band mags only:
if (mabs > 4.0) and (input.kmag > -99.0):
print("M-dwarf with K band magnitude detected!")
mabseex = compute_extra_MK_error(mabs)
print("M_K from photometry: ",mabse)
print("M_K error from best-fit polynomial: ",mabseex)
mabse = np.sqrt(mabse**2 + mabseex**2)
print("After adding in quadrature, using M_K error: ",mabse)
else:
mabs = -99.0
mabse = -99.0
# pre-select model grid; first only using reddening-independent quantities
sig = 4.0
um = np.arange(0,len(model['teff']),1)
if (input.teff > -99.0):
ut=np.where((model['teff'] > input.teff-sig*input.teffe) & \
(model['teff'] < input.teff+sig*input.teffe))[0]
um=np.intersect1d(um,ut)
print('teff',len(um))
if (input.lum > -99.0):
ut=np.where((model['lum'] > input.lum-sig*input.lume) & \
(model['lum'] < input.lum+sig*input.lume))[0]
um=np.intersect1d(um,ut)
print('lum',len(um))
if (input.dnu > 0.0):
model_dnu = dnusun*model['fdnu']*np.sqrt(10**model['rho'])
ut = np.where(
(model_dnu > input.dnu - sig*input.dnue)
& (model_dnu < input.dnu + sig*input.dnue)
)
ut = ut[0]
um = np.intersect1d(um, ut)
print('dnu', len(um))
if (input.numax > 0.0):
model_numax = (numaxsun
* (10**model['logg']/gsun)
* (model['teff']/teffsun)**(-0.5))
ut = np.where(
(model_numax > input.numax - sig*input.numaxe)
& (model_numax < input.numax + sig*input.numaxe)
)
ut = ut[0]
um = np.intersect1d(um, ut)
print('numax', len(um))
if (input.logg > -99.0):
ut = np.where(
(model['logg'] > input.logg - sig*input.logge)
& (model['logg'] < input.logg + sig*input.logge)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if (input.feh > -99.0):
ut = np.where(
(model['feh'] > input.feh - sig*input.fehe)
& (model['feh'] < input.feh + sig*input.fehe)
)
ut = ut[0]
um = np.intersect1d(um, ut)
print('feh', len(um))
print('number of models used within non-phot obsconstraints:', len(um))
# bail if there are not enough good models
if (len(um) < 10):
return result
# add reddening
if (redmap > -99.0):
# if no reddening map is provided, add Av as a new variable
# and fit for it
if (isinstance(dustmodel,pd.DataFrame) == False):
avs = np.arange(-0.3,1.0,0.01)
# user-specified reddening
#if (useav > -99.0):
# avs = np.zeros(1) + useav
mod = reddening(model, um, avs, extfactors)
# otherwise, just redden each model according to the provided map
else:
mod = reddening_map(
model, model_mabs, redmap, dustmodel, um, input, extfactors, band
)
# photometry to use for distance
mod_mabs = mod[band]
um = np.arange(0,len(mod['teff']),1)
mod['dis'] = 10**((redmap - mod_mabs + 5.0)/5.0)
print('number of models incl reddening:',len(um))
else:
mod = model
# next, another model down-select based on reddening-dependent quantities
# only do this if no spec constraints are available
if (mabs > -99.0):
ut = np.where(
(mod_mabs > mabs - sig*mabse)
& (mod_mabs < mabs + sig*mabse)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if (input.teff == -99.0):
if ((input.bmag > -99.0) & (input.vmag > -99.0)):
ut=np.where(
(mod['bmag'] - mod['vmag'] > bvcol - sig*bvcole)
& (mod['bmag'] - mod['vmag'] < bvcol + sig*bvcole))
ut = ut[0]
um = np.intersect1d(um,ut)
if ((input.btmag > -99.0) & (input.vtmag > -99.0)):
ut=np.where(
(mod['btmag'] - mod['vtmag'] > bvtcol - sig*bvtcole)
& (mod['btmag'] - mod['vtmag'] < bvtcol + sig*bvtcole))
ut = ut[0]
um = np.intersect1d(um,ut)
if ((input.gmag > -99.0) & (input.rmag > -99.0)):
ut = np.where(
(mod['gmag'] - mod['rmag'] > grcol-sig*grcole)
& (mod['gmag'] - mod['rmag'] < grcol+sig*grcole))
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.rmag > -99.0) & (input.imag > -99.0)):
ut = np.where(
(mod['rmag'] - mod['imag'] > ricol - sig*ricole)
& (mod['rmag'] - mod['imag'] < ricol + sig*ricole)
)
ut = ut[0]
um = np.intersect1d(um,ut)
if ((input.imag > -99.0) & (input.zmag > -99.0)):
ut = np.where(
(mod['imag'] - mod['zmag'] > izcol - sig*izcole)
& (mod['imag'] - mod['zmag'] < izcol + sig*izcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.gmag > -99.0) & (input.imag > -99.0)):
ut = np.where(
(mod['gmag'] - mod['imag'] > gicol-sig*gicole)
& (mod['gmag'] - mod['imag'] < gicol+sig*gicole))
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.rmag > -99.0) & (input.zmag > -99.0)):
ut = np.where(
(mod['rmag'] - mod['zmag'] > rzcol-sig*rzcole)
& (mod['rmag'] - mod['zmag'] < rzcol+sig*rzcole))
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.gmag > -99.0) & (input.zmag > -99.0)):
ut = np.where(
(mod['gmag'] - mod['zmag'] > gzcol-sig*gzcole)
& (mod['gmag'] - mod['zmag'] < gzcol+sig*gzcole))
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.jmag > -99.0) & (input.hmag > -99.0)):
ut = np.where(
(mod['jmag'] - mod['hmag'] > jhcol - sig*jhcole)
& (mod['jmag'] - mod['hmag'] < jhcol + sig*jhcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.hmag > -99.0) & (input.kmag > -99.0)):
ut = np.where(
(mod['hmag'] - mod['kmag'] > hkcol - sig*hkcole)
& (mod['hmag'] - mod['kmag'] < hkcol + sig*hkcole))
ut = ut[0]
um = np.intersect1d(um,ut)
if ((input.jmag > -99.0) & (input.kmag > -99.0)):
ut = np.where(
(mod['jmag'] - mod['kmag'] > jkcol - sig*jkcole)
& (mod['jmag'] - mod['kmag'] < jkcol + sig*jkcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.bpmag > -99.0) & (input.gamag > -99.0)):
ut = np.where(
(mod['bpmag'] - mod['gamag'] > bpgacol - sig*bpgacole)
& (mod['bpmag'] - mod['gamag'] < bpgacol + sig*bpgacole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.gamag > -99.0) & (input.rpmag > -99.0)):
ut = np.where(
(mod['gamag'] - mod['rpmag'] > garpcol - sig*garpcole)
& (mod['gamag'] - mod['rpmag'] < garpcol + sig*garpcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.bpmag > -99.0) & (input.rpmag > -99.0)):
ut = np.where(
(mod['bpmag'] - mod['rpmag'] > bprpcol - sig*bprpcole)
& (mod['bpmag'] - mod['rpmag'] < bprpcol + sig*bprpcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.vmag > -99.0) & (input.jmag > -99.0)):
ut = np.where(
(mod['vmag'] - mod['jmag'] > vjcol - sig*vjcole)
& (mod['vmag'] - mod['jmag'] < vjcol + sig*vjcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.vtmag > -99.0) & (input.jmag > -99.0)):
ut = np.where(
(mod['vtmag'] - mod['jmag'] > vtjcol - sig*vtjcole)
& (mod['vtmag'] - mod['jmag'] < vtjcol + sig*vtjcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.gmag > -99.0) & (input.jmag > -99.0)):
ut = np.where(
(mod['gmag'] - mod['jmag'] > gjcol - sig*gjcole)
& (mod['gmag'] - mod['jmag'] < gjcol + sig*gjcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.rmag > -99.0) & (input.jmag > -99.0)):
ut = np.where(
(mod['rmag'] - mod['jmag'] > rjcol - sig*rjcole)
& (mod['rmag'] - mod['jmag'] < rjcol + sig*rjcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.vmag > -99.0) & (input.kmag > -99.0)):
ut = np.where(
(mod['vmag'] - mod['kmag'] > vkcol - sig*vkcole)
& (mod['vmag'] - mod['kmag'] < vkcol + sig*vkcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.vtmag > -99.0) & (input.kmag > -99.0)):
ut = np.where(
(mod['vtmag'] - mod['kmag'] > vtkcol - sig*vtkcole)
& (mod['vtmag'] - mod['kmag'] < vtkcol + sig*vtkcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.gmag > -99.0) & (input.kmag > -99.0)):
ut = np.where(
(mod['gmag'] - mod['kmag'] > gkcol - sig*gkcole)
& (mod['gmag'] - mod['kmag'] < gkcol + sig*gkcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.rmag > -99.0) & (input.kmag > -99.0)):
ut = np.where(
(mod['rmag'] - mod['kmag'] > rkcol - sig*rkcole)
& (mod['rmag'] - mod['kmag'] < rkcol + sig*rkcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.gamag > -99.0) & (input.jmag > -99.0)):
ut = np.where(
(mod['gamag'] - mod['jmag'] > gajcol - sig*gajcole)
& (mod['gamag'] - mod['jmag'] < gajcol + sig*gajcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
if ((input.gamag > -99.0) & (input.kmag > -99.0)):
ut = np.where(
(mod['gamag'] - mod['kmag'] > gakcol - sig*gakcole)
& (mod['gamag'] - mod['kmag'] < gakcol + sig*gakcole)
)
ut = ut[0]
um = np.intersect1d(um, ut)
print('number of models after phot constraints:',len(um))
print('----')
# bail if there are not enough good models
if (len(um) < 10):
return result
def gaussian(x, mu, sig):
return np.exp(-(x-mu)**2./(2.*sig**2.))
# likelihoods
if ((input.gmag > -99.0) & (input.rmag > -99.0)):
lh_gr = gaussian(grcol, mod['gmag'][um]-mod['rmag'][um], grcole)
else:
lh_gr = np.ones(len(um))
if ((input.rmag > -99.0) & (input.imag > -99.0)):
lh_ri = gaussian(ricol, mod['rmag'][um]-mod['imag'][um], ricole)
else:
lh_ri = np.ones(len(um))
if ((input.imag > -99.0) & (input.zmag > -99.0)):
lh_iz = gaussian(izcol, mod['imag'][um]-mod['zmag'][um], izcole)
else:
lh_iz = np.ones(len(um))
if ((input.gmag > -99.0) & (input.imag > -99.0)):
lh_gi = gaussian(gicol, mod['gmag'][um]-mod['imag'][um], gicole)
else:
lh_gi = np.ones(len(um))
if ((input.rmag > -99.0) & (input.zmag > -99.0)):
lh_rz = gaussian(rzcol, mod['rmag'][um]-mod['zmag'][um], rzcole)
else:
lh_rz = np.ones(len(um))
if ((input.gmag > -99.0) & (input.zmag > -99.0)):
lh_gz = gaussian(gzcol, mod['gmag'][um]-mod['zmag'][um], gzcole)
else:
lh_gz = np.ones(len(um))
if ((input.jmag > -99.0) & (input.hmag > -99.0)):
lh_jh = gaussian(jhcol, mod['jmag'][um]-mod['hmag'][um], jhcole)
else:
lh_jh = np.ones(len(um))
if ((input.hmag > -99.0) & (input.kmag > -99.0)):
lh_hk = gaussian(hkcol, mod['hmag'][um]-mod['kmag'][um], hkcole)
else:
lh_hk = np.ones(len(um))
if ((input.jmag > -99.0) & (input.kmag > -99.0)):
lh_jk = gaussian(jkcol, mod['jmag'][um]-mod['kmag'][um], jkcole)
else:
lh_jk = np.ones(len(um))
if ((input.bpmag > -99.0) & (input.gamag > -99.0)):
lh_bpga = gaussian(bpgacol, mod['bpmag'][um]-mod['gamag'][um], bpgacole)
else:
lh_bpga = np.ones(len(um))
if ((input.gamag > -99.0) & (input.rpmag > -99.0)):
lh_garp = gaussian(garpcol, mod['gamag'][um]-mod['rpmag'][um], garpcole)
else:
lh_garp = np.ones(len(um))
if ((input.bpmag > -99.0) & (input.rpmag > -99.0)):
lh_bprp = gaussian(bprpcol, mod['bpmag'][um]-mod['rpmag'][um], bprpcole)
else:
lh_bprp = np.ones(len(um))
if ((input.bmag > -99.0) & (input.vmag > -99.0)):
lh_bv = gaussian(bvcol, mod['bmag'][um]-mod['vmag'][um], bvcole)
else:
lh_bv = np.ones(len(um))
if ((input.btmag > -99.0) & (input.vtmag > -99.0)):
lh_bvt = gaussian(bvtcol, mod['btmag'][um]-mod['vtmag'][um], bvtcole)
else:
lh_bvt = np.ones(len(um))
if ((input.vmag > -99.0) & (input.jmag > -99.0)):
lh_vj = gaussian(vjcol, mod['vmag'][um]-mod['jmag'][um], vjcole)
else:
lh_vj = np.ones(len(um))
if ((input.vtmag > -99.0) & (input.jmag > -99.0)):
lh_vtj = gaussian(vtjcol, mod['vtmag'][um]-mod['jmag'][um], vtjcole)
else:
lh_vtj = np.ones(len(um))
if ((input.gmag > -99.0) & (input.jmag > -99.0)):
lh_gj = gaussian(gjcol, mod['gmag'][um]-mod['jmag'][um], gjcole)
else:
lh_gj = np.ones(len(um))
if ((input.rmag > -99.0) & (input.jmag > -99.0)):
lh_rj = gaussian(rjcol, mod['rmag'][um]-mod['jmag'][um], rjcole)
else:
lh_rj = np.ones(len(um))
if ((input.vmag > -99.0) & (input.kmag > -99.0)):
lh_vk = gaussian(vkcol, mod['vmag'][um]-mod['kmag'][um], vkcole)
else:
lh_vk = np.ones(len(um))
if ((input.vtmag > -99.0) & (input.kmag > -99.0)):
lh_vtk = gaussian(vtkcol, mod['vtmag'][um]-mod['kmag'][um], vtkcole)
else:
lh_vtk = np.ones(len(um))
if ((input.gmag > -99.0) & (input.kmag > -99.0)):
lh_gk = gaussian(gkcol, mod['gmag'][um]-mod['kmag'][um], gkcole)
else:
lh_gk = np.ones(len(um))
if ((input.rmag > -99.0) & (input.kmag > -99.0)):
lh_rk = gaussian(rkcol, mod['rmag'][um]-mod['kmag'][um], rkcole)
else:
lh_rk = np.ones(len(um))
if ((input.gamag > -99.0) & (input.jmag > -99.0)):
lh_gaj = gaussian(gajcol, mod['gamag'][um]-mod['jmag'][um], gajcole)
else:
lh_gaj = np.ones(len(um))
if ((input.gamag > -99.0) & (input.kmag > -99.0)):
lh_gak = gaussian(gakcol, mod['gamag'][um]-mod['kmag'][um], gakcole)
else:
lh_gak = np.ones(len(um))
if (input.teff > -99):
lh_teff = gaussian(input.teff, mod['teff'][um], input.teffe)
else:
lh_teff = np.ones(len(um))
if (input.lum > -99):
lh_lum = gaussian(input.lum, mod['lum'][um], input.lume)
else:
lh_lum = np.ones(len(um))
if (input.logg > -99.0):
lh_logg = gaussian(input.logg, mod['logg'][um], input.logge)
else:
lh_logg = np.ones(len(um))
if (input.feh > -99.0):
lh_feh = gaussian(input.feh, mod['feh'][um], input.fehe)
else:
lh_feh = np.ones(len(um))
if (input.plx > -99.0):
# Compute most likely value of absolute magnitude:
mabsIndex = np.argmax(np.exp( (-1./(2.*input.plxe**2))*(input.plx-1./mod['dis'][um])**2))
# Only use downselected models based on input parameters:
downSelMagArr = mod_mabs[um]
# Compute the likelihood of the maximum magnitude given computed errors:
lh_mabs = gaussian(downSelMagArr[mabsIndex],mod_mabs[um],mabse)
else:
lh_mabs = np.ones(len(um))
if (input.dnu > 0.):
mod_dnu = dnusun*mod['fdnu']*np.sqrt(10**mod['rho'])
lh_dnu = np.exp( -(input.dnu-mod_dnu[um])**2.0 / (2.0*input.dnue**2.0))
else:
lh_dnu = np.ones(len(um))
if (input.numax > 0.):
mod_numax = (numaxsun
* (10**mod['logg']/gsun)
* (mod['teff']/teffsun)**(-0.5))
lh_numax = gaussian(input.numax,mod_numax[um],input.numaxe)
else:
lh_numax = np.ones(len(um))
tlh = (lh_gr*lh_ri*lh_iz*lh_gi*lh_rz*lh_gz*lh_jh*lh_hk*lh_jk*lh_bv*lh_bvt*lh_bpga*lh_garp*lh_bprp*
lh_vj*lh_vtj*lh_gj*lh_rj*lh_vk*lh_vtk*lh_gk*lh_rk*lh_gaj*lh_gak*
lh_teff*lh_logg*lh_feh*lh_mabs*lh_dnu*lh_numax*lh_lum)
# metallicity prior (only if no FeH input is given)
if (input.feh > -99.0):
fprior = np.ones(len(um))
else:
fprior = fehprior(mod['feh'][um])
# distance prior
if (input.plx > -99.0):
lscale = 1350.
dprior = ((mod['dis'][um]**2/(2.0*lscale**3.))
*np.exp(-mod['dis'][um]/lscale))
else:
dprior = np.ones(len(um))
# isochrone prior (weights)
tprior = mod['dage'][um]*mod['dmass'][um]*mod['dfeh'][um]
# posterior
prob = fprior*dprior*tprior*tlh
prob = prob/np.sum(prob)
if (isinstance(dustmodel,pd.DataFrame) == False):
names = ['teff', 'logg', 'feh', 'rad', 'mass', 'rho', 'lum', 'age']
steps = [0.001, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01]
fixes = [0, 1, 1, 0, 0, 1, 1, 0, 1]
if (redmap > -99.0):
names = [
'teff', 'logg', 'feh', 'rad', 'mass', 'rho', 'lum', 'age',
'avs'
]
steps = [0.001, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01]
fixes=[0, 1, 1, 0, 0, 1, 1, 0, 1]
if ((input.plx == -99.0) & (redmap > -99)):
names=[
'teff', 'logg', 'feh', 'rad', 'mass', 'rho', 'lum', 'age',
'avs', 'dis'
]
steps=[0.001, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01]
fixes=[0, 1, 1, 0, 0, 1, 1, 0, 1, 0]
#if ((input.plx == -99.0) & (map > -99) & (useav > -99.0)):
# names=['teff','logg','feh','rad','mass','rho','lum','age','dis']
# steps=[0.001,0.01,0.01,0.01,0.01,0.01,0.01,0.01,0.01]
# fixes=[0,1,1,0,0,1,1,0,0]
else:
#names=['teff','logg','feh','rad','mass','rho','lum','age']
#steps=[0.001,0.01,0.01,0.01,0.01,0.01,0.01,0.01]
#fixes=[0,1,1,0,0,1,1,0,1]
#if (input.plx == -99.0):
avstep=((np.max(mod['avs'][um])-np.min(mod['avs'][um]))/10.)
#pdb.set_trace()
names = [
'teff', 'logg', 'feh', 'rad', 'mass', 'rho', 'lum', 'age', 'avs',
'dis'
]
steps=[0.001, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, avstep, 0.01]
fixes=[0, 1, 1, 0, 0, 1, 1, 0, 1, 0]
# Provision figure
if plot:
plotinit()
ix = 1
iy = 2
npar = len(names)
for j in range(0,npar):
if fnmatch.fnmatch(names[j],'*lum*'):
lum=np.log10((mod['rad'][um]**2. * (mod['teff'][um]/5772.)**4.))
x, y, res, err1, err2 = getpdf(
lum, prob, name=names[j], step=steps[j], fixed=fixes[j],
dustmodel=dustmodel)
else:
if (len(np.unique(mod[names[j]][um])) > 1):
x, y, res, err1, err2 = getpdf(
mod[names[j]][um], prob, name=names[j], step=steps[j],
fixed=fixes[j],dustmodel=dustmodel
)
elif ((len(np.unique(mod[names[j]][um])) == 1) and (names[j] == 'avs')):
res = mod[names[j]][um[0]]
err1 = 0.0
err2 = 0.0
x = res
y = 1.0
else:
res = 0.0
err1 = 0.0
err2 = 0.0
print(names[j], res, err1, err2)
setattr(result, names[j], res)
setattr(result, names[j]+'ep', err1)
setattr(result, names[j]+'em', err2)
setattr(result, names[j]+'px', x)
setattr(result, names[j]+'py', y)
# Plot individual posteriors
if plot:
plotposterior(x, y, res, err1, err2, names, j, ix, iy)
ix += 2
iy += 2
# calculate posteriors for a secondary with a given delta_mag, assuming it has the same
# distance, age, and metallicity. to do this we'll interpolate the physical properties
# of the secondary given a delta_mag, and assign it the same posterior probabilities
# same procedure as used in Kraus+ 16
if (input.dmag > -99.):
print(' ')
print('calculating properties for secondary ...')
delta_k=input.dmag
delta_k_err=input.dmage
print('using dmag=',delta_k,'+/-',delta_k_err,' in ',band)
# interpolate across constant age and metallicity
feh_un=np.unique(mod['feh_init'][um])
age_un=np.unique(mod['age'][um])
#adding in the contrast error without sampling is tricky, because that uncertainty
# is not present in the primary posterior; instead, calculate the secondary
# posteriors 3 times for +/- contrast errors, and then add those in quadrature
# *explicitly assumes that the contrast errors are gaussian*
mds=[delta_k+delta_k_err,delta_k,delta_k-delta_k_err]
# the new model quantities for the secondary
mod_sec=np.zeros((5,3,len(prob)))
# Now reduce model to only those that match metallicity, age, and mass (must be less than max primary mass) conditions:
ufeh = np.in1d(model['feh_init'],feh_un) # Must match all potential primary initial metallicities
uage = np.in1d(model['age'],age_un) # Must match all potential primary ages
umass = np.where(model['mass'] < np.max(mod['mass'][um]))[0] # Must be less than max primary mass
ufa = np.where((ufeh == True) & (uage == True))[0] # Find intersection of age and feh
ufam = np.intersect1d(umass,ufa) # Find intersection of mass and ufa
modelMin = dict((k, model[k][ufam]) for k in model.keys()) # Define minimal model grid
# insanely inefficient triple loop follows
for s in range(0,len(mds)):
for r in range(0,len(feh_un)):
for k in range (0,len(age_un)):
# NB the next line uses model instead of mod, since the interpolation needs
# the full model grid rather than the pre-selected models returned by the
# reddening routine (which excludes secondary solutions). This may screw
# things up when trying to constrain reddening (i.e. dust="none")
ux=np.where((modelMin['feh_init'] == feh_un[r]) & (modelMin['age'] == age_un[k]))[0]
ux2=np.where((mod['feh_init'][um] == feh_un[r]) & (mod['age'][um] == age_un[k]))[0]
sr=np.argsort(modelMin[band][ux])
if ((len(ux) == 0) | (len(ux2) == 0)):
continue
mod_sec[0,s,ux2]=np.interp(mod[band][um[ux2]]+mds[s],modelMin[band][ux[sr]],modelMin['teff'][ux[sr]])
mod_sec[1,s,ux2]=np.interp(mod[band][um[ux2]]+mds[s],modelMin[band][ux[sr]],modelMin['logg'][ux[sr]])
mod_sec[2,s,ux2]=np.interp(mod[band][um[ux2]]+mds[s],modelMin[band][ux[sr]],modelMin['rad'][ux[sr]])
mod_sec[3,s,ux2]=np.interp(mod[band][um[ux2]]+mds[s],modelMin[band][ux[sr]],modelMin['mass'][ux[sr]])
mod_sec[4,s,ux2]=np.interp(mod[band][um[ux2]]+mds[s],modelMin[band][ux[sr]],modelMin['rho'][ux[sr]])
# now get PDFs across all delta mags, add errors in quadrature
names = ['teff', 'logg', 'rad', 'mass', 'rho']
steps=[0.001, 0.01, 0.01, 0.01, 0.01]
fixes=[0, 1, 0, 0, 1]
ix = 1
iy = 2
npar = len(names)
for j in range(0,5):
x, y, res_1, err1_1, err2_1 = getpdf(mod_sec[j,0,:], prob, name=names[j], step=steps[j], fixed=fixes[j],dustmodel=dustmodel)
xo, yo, res_2, err1_2, err2_2 = getpdf(mod_sec[j,1,:], prob, name=names[j], step=steps[j], fixed=fixes[j],dustmodel=dustmodel)
x, y, res_3, err1_3, err2_3 = getpdf(mod_sec[j,2,:], prob, name=names[j], step=steps[j], fixed=fixes[j],dustmodel=dustmodel)
finerr1=np.sqrt(err1_2**2 + (np.abs(res_2-res_1))**2)
finerr2=np.sqrt(err2_2**2 + (np.abs(res_2-res_3))**2)
print(names[j], res_2, finerr1, finerr2)
setattr(result, names[j]+'sec', res_2)
setattr(result, names[j]+'sec'+'ep', finerr1)
setattr(result, names[j]+'sec'+'em', finerr2)
setattr(result, names[j]+'sec'+'px', x)
setattr(result, names[j]+'sec'+'py', y)
# Plot individual posteriors
if plot:
plotposterior_sec(xo,yo, res_2, finerr1, finerr2, names, j, ix, iy)
ix += 2
iy += 2
# Plot HR diagrams
if plot:
plothrd(model,mod,um,input,mabs,mabse,ix,iy)
return result
# add extinction as a model parameter
def reddening(model,um,avs,extfactors):
model2=dict((k, model[k][um]) for k in model.keys())
nmodels=len(model2['teff'])*len(avs)
keys = [
'dage', 'dmass', 'dfeh', 'teff', 'logg', 'feh', 'rad', 'mass',
'rho', 'age', 'gmag', 'rmag', 'imag', 'zmag', 'jmag', 'hmag',
'bmag', 'vmag', 'btmag','vtmag', 'bpmag', 'gamag', 'rpmag',
'dis', 'kmag', 'avs', 'fdnu', 'feh_init'
]
dtype = [(key, float) for key in keys]
model3 = np.zeros(nmodels,dtype=dtype)
start=0
end=len(um)
#print start,end
for i in range(0,len(avs)):
ix = np.arange(start,end,1)
# NB: in reality, the model mags should also be Av-dependent;
# hopefully a small effect!
for c in 'b v g r i z j h k bt vt bp ga rp'.split():
cmag = c + 'mag'
ac = 'a' + c
av = extfactors['av']
model3[cmag][ix] = model2[cmag] + avs[i]*extfactors[ac]/av
keys = 'teff logg feh rad mass rho age feh_init dfeh dmass dage fdnu'.split()
for key in keys:
model3[key][ix]=model2[key]
model3['avs'][ix] = avs[i]
start = start + len(um)
end = end + len(um)
print(i)
return model3
# redden model given a reddening map
def reddening_map(model, model_mabs, redmap, dustmodel, um, input, extfactors,
band):
if (len(band) == 4):
bd = band[0:1]
else:
bd = band[0:2]
equ = ephem.Equatorial(
input.ra*np.pi/180.0, input.dec*np.pi/180.0, epoch=ephem.J2000
)
gal = ephem.Galactic(equ)
lon_deg = gal.lon*180./np.pi
lat_deg = gal.lat*180./np.pi
# zero-reddening distance
dis = 10**((redmap-model_mabs[um]+5)/5.)
# iterate distance and map a few times
for i in range(0,1):
xp = np.concatenate(
([0.0],np.array(dustmodel.columns[2:].str[3:],dtype='float'))
)
fp = np.concatenate(([0.0],np.array(dustmodel.iloc[0][2:])))
ebvs = np.interp(x=dis, xp=xp, fp = fp)
ext_band = extfactors['a'+bd]*ebvs
dis=10**((redmap-ext_band-model_mabs[um]+5)/5.)
# if no models have been pre-selected (i.e. input is
# photometry+parallax only), redden all models
if (len(um) == len(model['teff'])):
model3 = copy.deepcopy(model)
for c in 'b v g r i z j h k bt vt bp ga rp'.split():
cmag = c + 'mag'
ac = 'a' + c
av = extfactors['av']
model3[cmag] = model[cmag] + extfactors[ac] * ebvs
model3['dis'] = dis
model3['avs'] = extfactors['av']*ebvs
#pdb.set_trace()
# if models have been pre-selected, extract and only redden those
else:
model2 = dict((k, model[k][um]) for k in model.keys())
nmodels = len(model2['teff'])
keys = [
'dage', 'dmass', 'dfeh', 'teff', 'logg', 'feh', 'rad', 'mass',
'rho', 'age', 'gmag', 'rmag', 'imag', 'zmag', 'jmag', 'hmag',
'bmag', 'vmag', 'btmag','vtmag', 'bpmag', 'gamag', 'rpmag',
'dis', 'kmag', 'avs', 'fdnu', 'feh_init'
]
dtype = [(key, float) for key in keys]
model3 = np.zeros(nmodels,dtype=dtype)
for c in 'b v g r i z j h k bt vt bp ga rp'.split():
cmag = c + 'mag'
ac = 'a' + c
av = extfactors['av']
model3[cmag] = model2[cmag] + extfactors[ac] * ebvs
model3['dis']=dis
model3['avs']=extfactors['av']*ebvs
keys = 'teff logg feh rad mass rho age feh_init dfeh dmass dage fdnu'.split()
for key in keys:
model3[key] = model2[key]
return model3
########################### M-dwarf error computation and gK to 2% teff uncertainty computation:
def compute_extra_MK_error(abskmag):
massPoly = np.array([-1.218087354981032275e-04,3.202749540513295540e-03,
-2.649332720970200630e-02,5.491458806424324990e-02,6.102330369026183476e-02,
6.122397810371335014e-01])
massPolyDeriv = np.array([-6.090436774905161376e-04,1.281099816205318216e-02,
-7.947998162910602238e-02,1.098291761284864998e-01,6.102330369026183476e-02])
kmagExtraErr = abs(0.021*np.polyval(massPoly,abskmag)/np.polyval(massPolyDeriv,abskmag))
return kmagExtraErr
def compute_extra_gk_color_error(gk):
teffPoly = np.array([5.838899127633915245e-06,-4.579640759410575821e-04,
1.591988911769273360e-02,-3.229622768514631148e-01,4.234782988549875782e+00,
-3.752421323678526477e+01,2.279521336429464498e+02,-9.419602441779162518e+02,
2.570487048729761227e+03,-4.396474893847861495e+03,4.553858427460818348e+03,
-4.123317864249115701e+03,9.028586421378711748e+03])
teffPolyDeriv = np.array([7.006678953160697955e-05,-5.037604835351633566e-03,
1.591988911769273429e-01,-2.906660491663167978e+00,3.387826390839900625e+01,
-2.626694926574968463e+02,1.367712801857678642e+03,-4.709801220889581600e+03,
1.028194819491904491e+04,-1.318942468154358357e+04,9.107716854921636696e+03,
-4.123317864249115701e+03])
gkExtraColorErr = abs(0.02*np.polyval(teffPoly,gk)/np.polyval(teffPolyDeriv,gk))
return gkExtraColorErr
######################################### misc stuff
# calculate parallax for each model
def redden(redmap, mabs, gl, gb, dust):
logd = (redmap-mabs+5.)/5.
newd = logd
for i in range(0,1):
cur = 10**newd
ebv = dust(gl,gb,cur/1000.)
av = ebv*3.1
aj = av*1.2348743
newd = (redmap-mabs-aj+5.)/5.
s_newd = np.sqrt( (0.2*0.01)**2 + (0.2*0.03)**2 + (0.2*0.02)**2 )
plx=1./(10**newd)
s_plx=10**(-newd)*np.log(10)*s_newd
pdb.set_trace()
return 1./(10**newd)
def readinput(input):
input = ascii.read('input.txt')
ra = input['col1'][0]
dec = input['col2'][0]
bmag = input['col1'][1]
bmage = input['col2'][1]
vmag = input['col1'][2]
vmage = input['col2'][2]
gmag = input['col1'][3]
gmage = input['col2'][3]
rmag = input['col1'][4]
rmage = input['col2'][4]
imag = input['col1'][5]
image = input['col2'][5]
zmag = input['col1'][6]
zmage = input['col2'][6]
jmag = input['col1'][7]
jmage = input['col2'][7]
hmag = input['col1'][8]
hmage = input['col2'][8]
kmag = input['col1'][9]
kmage = input['col2'][9]
plx = input['col1'][10]
plxe = input['col2'][10]
teff = input['col1'][11]
teffe = input['col2'][11]
logg = input['col1'][12]
logge = input['col2'][12]
feh = input['col1'][13]
fehe = input['col2'][13]
out = (
ra, dec, bmag, bmage, vmag, vmage, gmag, gmage, rmag, rmage,
imag, image, zmag, zmage, jmag, jmage, hmag, hmage, kmag, kmage,
plx, plxe, teff, teffe, logg, logge, feh, fehe
)
return out
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition for inception v1 classification network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
slim = tf.contrib.slim
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
def inception_v1_base(inputs,
final_endpoint='Mixed_5c',
scope='InceptionV1'):
"""Defines the Inception V1 base architecture.
This architecture is defined in:
Going deeper with convolutions
Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.
http://arxiv.org/pdf/1409.4842v1.pdf.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',
'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e',
'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b', 'Mixed_5c']
scope: Optional variable_scope.
Returns:
A dictionary from components of the network to the corresponding activation.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values.
"""
end_points = {}
with tf.variable_scope(scope, 'InceptionV1', [inputs]):
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
weights_initializer=trunc_normal(0.01)):
with slim.arg_scope([slim.conv2d, slim.max_pool2d],
stride=1, padding='SAME'):
end_point = 'Conv2d_1a_7x7'
net = slim.conv2d(inputs, 64, [7, 7], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'MaxPool_2a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Conv2d_2b_1x1'
net = slim.conv2d(net, 64, [1, 1], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Conv2d_2c_3x3'
net = slim.conv2d(net, 192, [3, 3], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'MaxPool_3a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_3b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 128, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 32, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 32, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_3c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 192, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'MaxPool_4a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 208, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 48, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 112, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 224, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 24, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 256, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 24, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4e'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 112, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 144, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 288, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4f'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 320, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'MaxPool_5a_2x2'
net = slim.max_pool2d(net, [2, 2], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_5b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 320, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0a_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_5c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 384, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 384, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 48, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def inception_v1(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.8,
prediction_fn=slim.softmax,
spatial_squeeze=True,
reuse=None,
scope='InceptionV1'):
"""Defines the Inception V1 architecture.
This architecture is defined in:
Going deeper with convolutions
Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.
http://arxiv.org/pdf/1409.4842v1.pdf.
The default image size used to train this network is 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: the percentage of activation values that are retained.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape is [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, num_classes]
end_points: a dictionary from components of the network to the corresponding
activation.
"""
# Final pooling and prediction
with tf.variable_scope(scope, 'InceptionV1', [inputs, num_classes],
reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = inception_v1_base(inputs, scope=scope)
with tf.variable_scope('Logits'):
net = slim.avg_pool2d(net, [7, 7], stride=1, scope='MaxPool_0a_7x7')
net = slim.dropout(net,
dropout_keep_prob, scope='Dropout_0b')
logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='Conv2d_0c_1x1')
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
inception_v1.default_image_size = 224
def inception_v1_arg_scope(weight_decay=0.00004,
use_batch_norm=True):
"""Defines the default InceptionV1 arg scope.
Note: Althougth the original paper didn't use batch_norm we found it useful.
Args:
weight_decay: The weight decay to use for regularizing the model.
use_batch_norm: "If `True`, batch_norm is applied after each convolution.
Returns:
An `arg_scope` to use for the inception v3 model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.9997,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# collection containing update_ops.
'updates_collections': tf.GraphKeys.UPDATE_OPS,
}
if use_batch_norm:
normalizer_fn = slim.batch_norm
normalizer_params = batch_norm_params
else:
normalizer_fn = None
normalizer_params = {}
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay)):
with slim.arg_scope(
[slim.conv2d],
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=tf.nn.relu,
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params) as sc:
return sc
|
|
# $Id: roles.py 6451 2010-10-25 08:02:43Z milde $
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# Copyright: This module has been placed in the public domain.
"""
This module defines standard interpreted text role functions, a registry for
interpreted text roles, and an API for adding to and retrieving from the
registry.
The interface for interpreted role functions is as follows::
def role_fn(name, rawtext, text, lineno, inliner,
options={}, content=[]):
code...
# Set function attributes for customization:
role_fn.options = ...
role_fn.content = ...
Parameters:
- ``name`` is the local name of the interpreted text role, the role name
actually used in the document.
- ``rawtext`` is a string containing the entire interpreted text construct.
Return it as a ``problematic`` node linked to a system message if there is a
problem.
- ``text`` is the interpreted text content, with backslash escapes converted
to nulls (``\x00``).
- ``lineno`` is the line number where the interpreted text beings.
- ``inliner`` is the Inliner object that called the role function.
It defines the following useful attributes: ``reporter``,
``problematic``, ``memo``, ``parent``, ``document``.
- ``options``: A dictionary of directive options for customization, to be
interpreted by the role function. Used for additional attributes for the
generated elements and other functionality.
- ``content``: A list of strings, the directive content for customization
("role" directive). To be interpreted by the role function.
Function attributes for customization, interpreted by the "role" directive:
- ``options``: A dictionary, mapping known option names to conversion
functions such as `int` or `float`. ``None`` or an empty dict implies no
options to parse. Several directive option conversion functions are defined
in the `directives` module.
All role functions implicitly support the "class" option, unless disabled
with an explicit ``{'class': None}``.
- ``content``: A boolean; true if content is allowed. Client code must handle
the case where content is required but not supplied (an empty content list
will be supplied).
Note that unlike directives, the "arguments" function attribute is not
supported for role customization. Directive arguments are handled by the
"role" directive itself.
Interpreted role functions return a tuple of two values:
- A list of nodes which will be inserted into the document tree at the
point where the interpreted role was encountered (can be an empty
list).
- A list of system messages, which will be inserted into the document tree
immediately after the end of the current inline block (can also be empty).
"""
__docformat__ = 'reStructuredText'
from docutils import nodes, utils
from docutils.parsers.rst import directives
from docutils.parsers.rst.languages import en as _fallback_language_module
DEFAULT_INTERPRETED_ROLE = 'title-reference'
"""
The canonical name of the default interpreted role. This role is used
when no role is specified for a piece of interpreted text.
"""
_role_registry = {}
"""Mapping of canonical role names to role functions. Language-dependent role
names are defined in the ``language`` subpackage."""
_roles = {}
"""Mapping of local or language-dependent interpreted text role names to role
functions."""
def role(role_name, language_module, lineno, reporter):
"""
Locate and return a role function from its language-dependent name, along
with a list of system messages. If the role is not found in the current
language, check English. Return a 2-tuple: role function (``None`` if the
named role cannot be found) and a list of system messages.
"""
normname = role_name.lower()
messages = []
msg_text = []
if normname in _roles:
return _roles[normname], messages
if role_name:
canonicalname = None
try:
canonicalname = language_module.roles[normname]
except AttributeError, error:
msg_text.append('Problem retrieving role entry from language '
'module %r: %s.' % (language_module, error))
except KeyError:
msg_text.append('No role entry for "%s" in module "%s".'
% (role_name, language_module.__name__))
else:
canonicalname = DEFAULT_INTERPRETED_ROLE
# If we didn't find it, try English as a fallback.
if not canonicalname:
try:
canonicalname = _fallback_language_module.roles[normname]
msg_text.append('Using English fallback for role "%s".'
% role_name)
except KeyError:
msg_text.append('Trying "%s" as canonical role name.'
% role_name)
# The canonical name should be an English name, but just in case:
canonicalname = normname
# Collect any messages that we generated.
if msg_text:
message = reporter.info('\n'.join(msg_text), line=lineno)
messages.append(message)
# Look the role up in the registry, and return it.
if canonicalname in _role_registry:
role_fn = _role_registry[canonicalname]
register_local_role(normname, role_fn)
return role_fn, messages
else:
return None, messages # Error message will be generated by caller.
def register_canonical_role(name, role_fn):
"""
Register an interpreted text role by its canonical name.
:Parameters:
- `name`: The canonical name of the interpreted role.
- `role_fn`: The role function. See the module docstring.
"""
set_implicit_options(role_fn)
_role_registry[name] = role_fn
def register_local_role(name, role_fn):
"""
Register an interpreted text role by its local or language-dependent name.
:Parameters:
- `name`: The local or language-dependent name of the interpreted role.
- `role_fn`: The role function. See the module docstring.
"""
set_implicit_options(role_fn)
_roles[name] = role_fn
def set_implicit_options(role_fn):
"""
Add customization options to role functions, unless explicitly set or
disabled.
"""
if not hasattr(role_fn, 'options') or role_fn.options is None:
role_fn.options = {'class': directives.class_option}
elif 'class' not in role_fn.options:
role_fn.options['class'] = directives.class_option
def register_generic_role(canonical_name, node_class):
"""For roles which simply wrap a given `node_class` around the text."""
role = GenericRole(canonical_name, node_class)
register_canonical_role(canonical_name, role)
class GenericRole:
"""
Generic interpreted text role, where the interpreted text is simply
wrapped with the provided node class.
"""
def __init__(self, role_name, node_class):
self.name = role_name
self.node_class = node_class
def __call__(self, role, rawtext, text, lineno, inliner,
options={}, content=[]):
set_classes(options)
return [self.node_class(rawtext, utils.unescape(text), **options)], []
class CustomRole:
"""
Wrapper for custom interpreted text roles.
"""
def __init__(self, role_name, base_role, options={}, content=[]):
self.name = role_name
self.base_role = base_role
self.options = None
if hasattr(base_role, 'options'):
self.options = base_role.options
self.content = None
if hasattr(base_role, 'content'):
self.content = base_role.content
self.supplied_options = options
self.supplied_content = content
def __call__(self, role, rawtext, text, lineno, inliner,
options={}, content=[]):
opts = self.supplied_options.copy()
opts.update(options)
cont = list(self.supplied_content)
if cont and content:
cont += '\n'
cont.extend(content)
return self.base_role(role, rawtext, text, lineno, inliner,
options=opts, content=cont)
def generic_custom_role(role, rawtext, text, lineno, inliner,
options={}, content=[]):
""""""
# Once nested inline markup is implemented, this and other methods should
# recursively call inliner.nested_parse().
set_classes(options)
return [nodes.inline(rawtext, utils.unescape(text), **options)], []
generic_custom_role.options = {'class': directives.class_option}
######################################################################
# Define and register the standard roles:
######################################################################
register_generic_role('abbreviation', nodes.abbreviation)
register_generic_role('acronym', nodes.acronym)
register_generic_role('emphasis', nodes.emphasis)
register_generic_role('literal', nodes.literal)
register_generic_role('strong', nodes.strong)
register_generic_role('subscript', nodes.subscript)
register_generic_role('superscript', nodes.superscript)
register_generic_role('title-reference', nodes.title_reference)
def pep_reference_role(role, rawtext, text, lineno, inliner,
options={}, content=[]):
try:
pepnum = int(text)
if pepnum < 0 or pepnum > 9999:
raise ValueError
except ValueError:
msg = inliner.reporter.error(
'PEP number must be a number from 0 to 9999; "%s" is invalid.'
% text, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
# Base URL mainly used by inliner.pep_reference; so this is correct:
ref = (inliner.document.settings.pep_base_url
+ inliner.document.settings.pep_file_url_template % pepnum)
set_classes(options)
return [nodes.reference(rawtext, 'PEP ' + utils.unescape(text), refuri=ref,
**options)], []
register_canonical_role('pep-reference', pep_reference_role)
def rfc_reference_role(role, rawtext, text, lineno, inliner,
options={}, content=[]):
try:
rfcnum = int(text)
if rfcnum <= 0:
raise ValueError
except ValueError:
msg = inliner.reporter.error(
'RFC number must be a number greater than or equal to 1; '
'"%s" is invalid.' % text, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
# Base URL mainly used by inliner.rfc_reference, so this is correct:
ref = inliner.document.settings.rfc_base_url + inliner.rfc_url % rfcnum
set_classes(options)
node = nodes.reference(rawtext, 'RFC ' + utils.unescape(text), refuri=ref,
**options)
return [node], []
register_canonical_role('rfc-reference', rfc_reference_role)
def raw_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
if not inliner.document.settings.raw_enabled:
msg = inliner.reporter.warning('raw (and derived) roles disabled')
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
if 'format' not in options:
msg = inliner.reporter.error(
'No format (Writer name) is associated with this role: "%s".\n'
'The "raw" role cannot be used directly.\n'
'Instead, use the "role" directive to create a new role with '
'an associated format.' % role, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
set_classes(options)
node = nodes.raw(rawtext, utils.unescape(text, 1), **options)
return [node], []
raw_role.options = {'format': directives.unchanged}
register_canonical_role('raw', raw_role)
def math_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
i = rawtext.find('`')
text = rawtext.split('`')[1]
node = nodes.math(rawtext, text)
return [node], []
register_canonical_role('math', math_role)
######################################################################
# Register roles that are currently unimplemented.
######################################################################
def unimplemented_role(role, rawtext, text, lineno, inliner, attributes={}):
msg = inliner.reporter.error(
'Interpreted text role "%s" not implemented.' % role, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
register_canonical_role('index', unimplemented_role)
register_canonical_role('named-reference', unimplemented_role)
register_canonical_role('anonymous-reference', unimplemented_role)
register_canonical_role('uri-reference', unimplemented_role)
register_canonical_role('footnote-reference', unimplemented_role)
register_canonical_role('citation-reference', unimplemented_role)
register_canonical_role('substitution-reference', unimplemented_role)
register_canonical_role('target', unimplemented_role)
# This should remain unimplemented, for testing purposes:
register_canonical_role('restructuredtext-unimplemented-role',
unimplemented_role)
def set_classes(options):
"""
Auxiliary function to set options['classes'] and delete
options['class'].
"""
if 'class' in options:
assert 'classes' not in options
options['classes'] = options['class']
del options['class']
|
|
from datetime import datetime
from decimal import Decimal
from libmt94x.currency_codes import CurrencyCodes
from libmt94x.info_acct_owner_subfields import BeneficiaryParty
from libmt94x.info_acct_owner_subfields import BusinessPurpose
from libmt94x.info_acct_owner_subfields import Charges
from libmt94x.info_acct_owner_subfields import ClientReference
from libmt94x.info_acct_owner_subfields import CounterPartyID
from libmt94x.info_acct_owner_subfields import CounterPartyIdentification
from libmt94x.info_acct_owner_subfields import CreditorID
from libmt94x.info_acct_owner_subfields import EndToEndReference
from libmt94x.info_acct_owner_subfields import ExchangeRate
from libmt94x.info_acct_owner_subfields import InfoToAcccountOwnerSubField
from libmt94x.info_acct_owner_subfields import InstructionID
from libmt94x.info_acct_owner_subfields import MandateReference
from libmt94x.info_acct_owner_subfields import OrderingParty
from libmt94x.info_acct_owner_subfields import PaymentInformationID
from libmt94x.info_acct_owner_subfields import PurposeCode
from libmt94x.info_acct_owner_subfields import RemittanceInformation
from libmt94x.info_acct_owner_subfields import ReturnReason
from libmt94x.info_acct_owner_subfields import UltimateBeneficiary
from libmt94x.info_acct_owner_subfields import UltimateCreditor
from libmt94x.info_acct_owner_subfields import UltimateDebtor
from libmt94x.remittance_info import DutchStructuredRemittanceInfo
from libmt94x.remittance_info import IsoStructuredRemittanceInfo
from libmt94x.remittance_info import UnstructuredRemittanceInfo
from libmt94x.statement_line_subfields import OriginalAmountOfTransaction
from libmt94x.transaction_codes import IngTransactionCodes
from libmt94x.transaction_codes import SwiftTransactionCodes
# NOTE: Module level binding since we want to use the name "type" in method
# signatures
builtin_type = type
class Field(object):
'''Abstract base class for all fields'''
pass
class AbstractBalance(Field):
tag = None
TYPE_CREDIT = 1
TYPE_DEBIT = 2
def __init__(self, type, date, currency, amount):
if type not in (self.TYPE_CREDIT, self.TYPE_DEBIT):
raise ValueError(
"The `type` value must be TYPE_CREDIT or TYPE_DEBIT")
if not builtin_type(date) is datetime:
raise ValueError("The `date` value must be a datetime")
currency_codes = CurrencyCodes.get_instance()
if not currency_codes.code_is_valid(currency):
raise ValueError("Value `currency` is invalid: %s" % currency)
if not builtin_type(amount) is Decimal:
raise ValueError("The `amount` value must be a Decimal")
self.type = type
self.date = date
self.currency = currency
self.amount = amount
class AccountIdentification(Field):
tag = '25'
def __init__(self, iban, iso_currency_code=None):
currency_codes = CurrencyCodes.get_instance()
if (iso_currency_code is not None and
not currency_codes.code_is_valid(iso_currency_code)):
raise ValueError(
"Value `iso_currency_code` is invalid: %s" % iso_currency_code)
self.iban = iban
self.iso_currency_code = iso_currency_code
class ClosingAvailableBalance(AbstractBalance):
tag = '64'
class ClosingBalance(AbstractBalance):
tag = '62F'
class ExportInformation(Field):
'''This is part of the IBP header'''
def __init__(self, export_address, export_number, export_time=None, export_day=None):
self.export_address = export_address
self.export_number = export_number
self.export_time = export_time
self.export_day = export_day
class ForwardAvailableBalance(AbstractBalance):
tag = '65'
class ImportInformation(Field):
'''This is part of the IBP header'''
def __init__(self, import_address, import_number, import_time=None, import_day=None):
self.import_address = import_address
self.import_number = import_number
self.import_time = import_time
self.import_day = import_day
class InformationToAccountOwner(Field):
tag = '86'
def __init__(self, code_words=None, free_form_text=None):
'''The parameters `code_words` and `free_form_text` are exclusive,
meaning the content of this field is either structured (code_words) or
unstructured. The unstructured form is commonly used in the IBP
dialect.'''
if all((code_words, free_form_text)):
raise ValueError("Only one of `code_words` or `free_form_text` may be provided")
code_words = code_words or []
for code_word in code_words:
if not isinstance(code_word, InfoToAcccountOwnerSubField):
raise ValueError(
"All values for `code_words` must be "
"instances of InfoToAcccountOwnerSubField")
self.code_words = code_words
self.free_form_text = free_form_text
# Build dictionary mapping the class -> code_word
by_class = {}
for code_word in code_words:
by_class[code_word.__class__] = code_word
self.by_class = by_class
def flatten(self):
'''Transform code_words to free_form_text of values delimited by a
space (from IBP structured to IBP unstructured). Note that this is a
destructive update.'''
def maybe_add(elems, value):
if value:
elems.append(value)
elems = []
for code_word in self.code_words:
if isinstance(code_word, BeneficiaryParty):
maybe_add(elems, code_word.account_number)
maybe_add(elems, code_word.bic)
maybe_add(elems, code_word.name)
maybe_add(elems, code_word.city)
elif isinstance(code_word, BusinessPurpose):
maybe_add(elems, code_word.id_code)
maybe_add(elems, code_word.sepa_transaction_type)
elif isinstance(code_word, Charges):
maybe_add(elems, code_word.charges)
elif isinstance(code_word, ClientReference):
maybe_add(elems, code_word.client_reference)
elif isinstance(code_word, CounterPartyID):
maybe_add(elems, code_word.account_number)
maybe_add(elems, code_word.bic)
maybe_add(elems, code_word.name)
maybe_add(elems, code_word.city)
elif isinstance(code_word, CounterPartyIdentification):
maybe_add(elems, code_word.id_code)
elif isinstance(code_word, CreditorID):
maybe_add(elems, code_word.creditor_id)
elif isinstance(code_word, EndToEndReference):
maybe_add(elems, code_word.end_to_end_reference)
elif isinstance(code_word, ExchangeRate):
maybe_add(elems, code_word.exchange_rate)
elif isinstance(code_word, InstructionID):
maybe_add(elems, code_word.instruction_id)
elif isinstance(code_word, MandateReference):
maybe_add(elems, code_word.mandate_reference)
elif isinstance(code_word, OrderingParty):
maybe_add(elems, code_word.account_number)
maybe_add(elems, code_word.bic)
maybe_add(elems, code_word.name)
maybe_add(elems, code_word.city)
elif isinstance(code_word, PaymentInformationID):
maybe_add(elems, code_word.payment_information_id)
elif isinstance(code_word, PurposeCode):
maybe_add(elems, code_word.purpose_of_collection)
elif isinstance(code_word, RemittanceInformation):
if isinstance(code_word.remittance_info, UnstructuredRemittanceInfo):
maybe_add(elems, code_word.remittance_info.remittance_info)
elif isinstance(code_word.remittance_info, DutchStructuredRemittanceInfo):
maybe_add(elems, code_word.remittance_info.payment_reference)
elif isinstance(code_word.remittance_info, IsoStructuredRemittanceInfo):
maybe_add(elems, code_word.remittance_info.iso_reference)
elif isinstance(code_word, ReturnReason):
maybe_add(elems, code_word.reason_code)
elif isinstance(code_word, UltimateBeneficiary):
maybe_add(elems, code_word.name)
elif isinstance(code_word, UltimateCreditor):
maybe_add(elems, code_word.name)
maybe_add(elems, code_word.id)
elif isinstance(code_word, UltimateDebtor):
maybe_add(elems, code_word.name)
maybe_add(elems, code_word.id)
line = ' '.join(elems)
self.free_form_text = line
self.code_words = []
def get_code_word_by_cls(self, cls_obj):
return self.by_class.get(cls_obj)
class InformationToAccountOwnerTotals(Field):
tag = '86'
def __init__(self, num_debit, num_credit, amount_debit, amount_credit):
if not builtin_type(num_debit) is int:
raise ValueError("The `num_debit` value must be an int")
if not builtin_type(num_credit) is int:
raise ValueError("The `num_credit` value must be an int")
if not builtin_type(amount_debit) is Decimal:
raise ValueError("The `amount_debit` value must be a Decimal")
if not builtin_type(amount_credit) is Decimal:
raise ValueError("The `amount_credit` value must be a Decimal")
self.num_debit = num_debit
self.num_credit = num_credit
self.amount_debit = amount_debit
self.amount_credit = amount_credit
class OpeningBalance(AbstractBalance):
tag = '60F'
class StatementLine(Field):
tag = '61'
TYPE_CREDIT = 1
TYPE_DEBIT = 2
def __init__(self,
value_date,
type,
amount,
transaction_code,
reference_for_account_owner,
supplementary_details=None,
book_date=None,
ing_transaction_code=None,
transaction_reference=None,
account_servicing_institutions_reference=None,
original_amount_of_transaction=None):
'''
EN/NL terms from specs:
- value_date - Valutadatum
- book_date - Boekdatum
- type - Credit/debet
- amount - Bedrag
- transaction_code - Transactietype
- reference_for_account_owner - Betalingskenmerk
- ing_transaction_code - ING transactiecode
- transaction_reference - Transactiereferentie
- supplementary_details - Aanvullende gegevens
Only MING:
- book_date
- transaction_reference
Only IBP:
- account_servicing_institutions_reference
- original_amount_of_transaction
'''
if not builtin_type(value_date) is datetime:
raise ValueError("The `value_date` value must be a datetime")
if book_date is not None and not builtin_type(book_date) is datetime:
raise ValueError("The `book_date` value must be a datetime")
if type not in (self.TYPE_CREDIT, self.TYPE_DEBIT):
raise ValueError("The `type` value must be TYPE_CREDIT or TYPE_DEBIT")
if not builtin_type(amount) is Decimal:
raise ValueError("The `amount` value must be a Decimal")
swift_transaction_codes = SwiftTransactionCodes.get_instance()
if not swift_transaction_codes.code_is_valid(transaction_code):
raise ValueError(
"Value `transaction_code` is invalid: %s" % transaction_code)
if ing_transaction_code is not None:
ing_transaction_codes = IngTransactionCodes.get_instance()
if not ing_transaction_codes.code_is_valid(ing_transaction_code):
raise ValueError(
"Value `ing_transaction_code` is invalid: %s" % ing_transaction_code)
if (original_amount_of_transaction is not None and
not builtin_type(original_amount_of_transaction) is OriginalAmountOfTransaction):
raise ValueError("The `original_amount_of_transaction` value must "
"be an instance of OriginalAmountOfTransaction")
self.value_date = value_date
self.type = type
self.amount = amount
self.transaction_code = transaction_code
self.reference_for_account_owner = reference_for_account_owner
self.supplementary_details = supplementary_details # not actually used
self.book_date = book_date
self.ing_transaction_code = ing_transaction_code
self.transaction_reference = transaction_reference
self.account_servicing_institutions_reference = account_servicing_institutions_reference
self.original_amount_of_transaction = original_amount_of_transaction
class StatementNumber(Field):
tag = '28C'
def __init__(self, statement_number):
self.statement_number = statement_number
class TransactionReferenceNumber(Field):
tag = '20'
def __init__(self, transaction_reference_number=None):
self.transaction_reference_number = transaction_reference_number
|
|
"""Testing for K-means"""
import re
import sys
import numpy as np
from scipy import sparse as sp
from threadpoolctl import threadpool_limits
import pytest
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils.fixes import _astype_copy_false
from sklearn.base import clone
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.extmath import row_norms
from sklearn.metrics import pairwise_distances
from sklearn.metrics import pairwise_distances_argmin
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means, kmeans_plusplus
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster._kmeans import _labels_inertia
from sklearn.cluster._kmeans import _mini_batch_step
from sklearn.cluster._k_means_fast import _relocate_empty_clusters_dense
from sklearn.cluster._k_means_fast import _relocate_empty_clusters_sparse
from sklearn.cluster._k_means_fast import _euclidean_dense_dense_wrapper
from sklearn.cluster._k_means_fast import _euclidean_sparse_dense_wrapper
from sklearn.cluster._k_means_fast import _inertia_dense
from sklearn.cluster._k_means_fast import _inertia_sparse
from sklearn.datasets import make_blobs
from io import StringIO
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
@pytest.mark.parametrize("array_constr", [np.array, sp.csr_matrix],
ids=["dense", "sparse"])
@pytest.mark.parametrize("algo", ["full", "elkan"])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_kmeans_results(array_constr, algo, dtype):
# Checks that KMeans works as intended on toy dataset by comparing with
# expected results computed by hand.
X = array_constr([[0, 0], [0.5, 0], [0.5, 1], [1, 1]], dtype=dtype)
sample_weight = [3, 1, 1, 3]
init_centers = np.array([[0, 0], [1, 1]], dtype=dtype)
expected_labels = [0, 0, 1, 1]
expected_inertia = 0.375
expected_centers = np.array([[0.125, 0], [0.875, 1]], dtype=dtype)
expected_n_iter = 2
kmeans = KMeans(n_clusters=2, n_init=1, init=init_centers, algorithm=algo)
kmeans.fit(X, sample_weight=sample_weight)
assert_array_equal(kmeans.labels_, expected_labels)
assert_allclose(kmeans.inertia_, expected_inertia)
assert_allclose(kmeans.cluster_centers_, expected_centers)
assert kmeans.n_iter_ == expected_n_iter
@pytest.mark.parametrize("array_constr", [np.array, sp.csr_matrix],
ids=['dense', 'sparse'])
@pytest.mark.parametrize("algo", ['full', 'elkan'])
def test_kmeans_relocated_clusters(array_constr, algo):
# check that empty clusters are relocated as expected
X = array_constr([[0, 0], [0.5, 0], [0.5, 1], [1, 1]])
# second center too far from others points will be empty at first iter
init_centers = np.array([[0.5, 0.5], [3, 3]])
expected_labels = [0, 0, 1, 1]
expected_inertia = 0.25
expected_centers = [[0.25, 0], [0.75, 1]]
expected_n_iter = 3
kmeans = KMeans(n_clusters=2, n_init=1, init=init_centers, algorithm=algo)
kmeans.fit(X)
assert_array_equal(kmeans.labels_, expected_labels)
assert_allclose(kmeans.inertia_, expected_inertia)
assert_allclose(kmeans.cluster_centers_, expected_centers)
assert kmeans.n_iter_ == expected_n_iter
@pytest.mark.parametrize("array_constr", [np.array, sp.csr_matrix],
ids=["dense", "sparse"])
def test_relocate_empty_clusters(array_constr):
# test for the _relocate_empty_clusters_(dense/sparse) helpers
# Synthetic dataset with 3 obvious clusters of different sizes
X = np.array(
[-10., -9.5, -9, -8.5, -8, -1, 1, 9, 9.5, 10]).reshape(-1, 1)
X = array_constr(X)
sample_weight = np.ones(10)
# centers all initialized to the first point of X
centers_old = np.array([-10., -10, -10]).reshape(-1, 1)
# With this initialization, all points will be assigned to the first center
# At this point a center in centers_new is the weighted sum of the points
# it contains if it's not empty, otherwise it is the same as before.
centers_new = np.array([-16.5, -10, -10]).reshape(-1, 1)
weight_in_clusters = np.array([10., 0, 0])
labels = np.zeros(10, dtype=np.int32)
if array_constr is np.array:
_relocate_empty_clusters_dense(X, sample_weight, centers_old,
centers_new, weight_in_clusters, labels)
else:
_relocate_empty_clusters_sparse(X.data, X.indices, X.indptr,
sample_weight, centers_old,
centers_new, weight_in_clusters,
labels)
# The relocation scheme will take the 2 points farthest from the center and
# assign them to the 2 empty clusters, i.e. points at 10 and at 9.9. The
# first center will be updated to contain the other 8 points.
assert_array_equal(weight_in_clusters, [8, 1, 1])
assert_allclose(centers_new, [[-36], [10], [9.5]])
@pytest.mark.parametrize("distribution", ["normal", "blobs"])
@pytest.mark.parametrize("array_constr", [np.array, sp.csr_matrix],
ids=["dense", "sparse"])
@pytest.mark.parametrize("tol", [1e-2, 1e-8, 1e-100, 0])
def test_kmeans_elkan_results(distribution, array_constr, tol):
# Check that results are identical between lloyd and elkan algorithms
rnd = np.random.RandomState(0)
if distribution == "normal":
X = rnd.normal(size=(5000, 10))
else:
X, _ = make_blobs(random_state=rnd)
X[X < 0] = 0
X = array_constr(X)
km_full = KMeans(algorithm="full", n_clusters=5,
random_state=0, n_init=1, tol=tol)
km_elkan = KMeans(algorithm="elkan", n_clusters=5,
random_state=0, n_init=1, tol=tol)
km_full.fit(X)
km_elkan.fit(X)
assert_allclose(km_elkan.cluster_centers_, km_full.cluster_centers_)
assert_array_equal(km_elkan.labels_, km_full.labels_)
assert km_elkan.n_iter_ == km_full.n_iter_
assert km_elkan.inertia_ == pytest.approx(km_full.inertia_, rel=1e-6)
@pytest.mark.parametrize("algorithm", ["full", "elkan"])
def test_kmeans_convergence(algorithm):
# Check that KMeans stops when convergence is reached when tol=0. (#16075)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(5000, 10))
max_iter = 300
km = KMeans(algorithm=algorithm, n_clusters=5, random_state=0,
n_init=1, tol=0, max_iter=max_iter).fit(X)
assert km.n_iter_ < max_iter
def test_minibatch_update_consistency():
# Check that dense and sparse minibatch update give the same results
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
weight_sums = np.zeros(new_centers.shape[0], dtype=np.double)
weight_sums_csr = np.zeros(new_centers.shape[0], dtype=np.double)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
sample_weight_mb = np.ones(X_mb.shape[0], dtype=np.double)
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, sample_weight_mb, x_mb_squared_norms, new_centers, weight_sums,
buffer, 1, None, random_reassign=False)
assert old_inertia > 0.0
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, sample_weight_mb, x_mb_squared_norms, new_centers)
assert new_inertia > 0.0
assert new_inertia < old_inertia
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, sample_weight_mb, x_mb_squared_norms_csr, new_centers_csr,
weight_sums_csr, buffer_csr, 1, None, random_reassign=False)
assert old_inertia_csr > 0.0
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, sample_weight_mb, x_mb_squared_norms_csr, new_centers_csr)
assert new_inertia_csr > 0.0
assert new_inertia_csr < old_inertia_csr
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert centers.shape == (n_clusters, n_features)
labels = km.labels_
assert np.unique(labels).shape[0] == n_clusters
# check that the labels assignment are perfect (up to a permutation)
assert v_measure_score(true_labels, labels) == 1.0
assert km.inertia_ > 0.0
@pytest.mark.parametrize("data", [X, X_csr], ids=["dense", "sparse"])
@pytest.mark.parametrize("init", ["random", "k-means++", centers,
lambda X, k, random_state: centers],
ids=["random", "k-means++", "ndarray", "callable"])
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_all_init(Estimator, data, init):
# Check KMeans and MiniBatchKMeans with all possible init.
n_init = 10 if isinstance(init, str) else 1
km = Estimator(init=init, n_clusters=n_clusters, random_state=42,
n_init=n_init).fit(data)
_check_fitted_model(km)
@pytest.mark.parametrize("init", ["random", "k-means++", centers,
lambda X, k, random_state: centers],
ids=["random", "k-means++", "ndarray", "callable"])
def test_minibatch_kmeans_partial_fit_init(init):
# Check MiniBatchKMeans init with partial_fit
n_init = 10 if isinstance(init, str) else 1
km = MiniBatchKMeans(init=init, n_clusters=n_clusters, random_state=0,
n_init=n_init)
for i in range(100):
# "random" init requires many batches to recover the true labels.
km.partial_fit(X)
_check_fitted_model(km)
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_fortran_aligned_data(Estimator):
# Check that KMeans works with fortran-aligned data.
X_fortran = np.asfortranarray(X)
centers_fortran = np.asfortranarray(centers)
km_c = Estimator(n_clusters=n_clusters, init=centers, n_init=1,
random_state=42).fit(X)
km_f = Estimator(n_clusters=n_clusters, init=centers_fortran, n_init=1,
random_state=42).fit(X_fortran)
assert_allclose(km_c.cluster_centers_, km_f.cluster_centers_)
assert_array_equal(km_c.labels_, km_f.labels_)
@pytest.mark.parametrize('algo', ['full', 'elkan'])
@pytest.mark.parametrize('dtype', [np.float32, np.float64])
@pytest.mark.parametrize('constructor', [np.asarray, sp.csr_matrix])
@pytest.mark.parametrize('seed, max_iter, tol', [
(0, 2, 1e-7), # strict non-convergence
(1, 2, 1e-1), # loose non-convergence
(3, 300, 1e-7), # strict convergence
(4, 300, 1e-1), # loose convergence
])
def test_k_means_fit_predict(algo, dtype, constructor, seed, max_iter, tol):
# check that fit.predict gives same result as fit_predict
# There's a very small chance of failure with elkan on unstructured dataset
# because predict method uses fast euclidean distances computation which
# may cause small numerical instabilities.
# NB: This test is largely redundant with respect to test_predict and
# test_predict_equal_labels. This test has the added effect of
# testing idempotence of the fittng procesdure which appears to
# be where it fails on some MacOS setups.
if sys.platform == "darwin":
pytest.xfail(
"Known failures on MacOS, See "
"https://github.com/scikit-learn/scikit-learn/issues/12644")
rng = np.random.RandomState(seed)
X = make_blobs(n_samples=1000, n_features=10, centers=10,
random_state=rng)[0].astype(dtype, copy=False)
X = constructor(X)
kmeans = KMeans(algorithm=algo, n_clusters=10, random_state=seed,
tol=tol, max_iter=max_iter)
labels_1 = kmeans.fit(X).predict(X)
labels_2 = kmeans.fit_predict(X)
# Due to randomness in the order in which chunks of data are processed when
# using more than one thread, the absolute values of the labels can be
# different between the 2 strategies but they should correspond to the same
# clustering.
assert v_measure_score(labels_1, labels_2) == pytest.approx(1, abs=1e-15)
def test_minibatch_kmeans_verbose():
# Check verbose mode of MiniBatchKMeans for better coverage.
km = MiniBatchKMeans(n_clusters=n_clusters, random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
km.fit(X)
finally:
sys.stdout = old_stdout
@pytest.mark.parametrize("algorithm", ["full", "elkan"])
@pytest.mark.parametrize("tol", [1e-2, 0])
def test_kmeans_verbose(algorithm, tol, capsys):
# Check verbose mode of KMeans for better coverage.
X = np.random.RandomState(0).normal(size=(5000, 10))
KMeans(algorithm=algorithm, n_clusters=n_clusters, random_state=42,
init="random", n_init=1, tol=tol, verbose=1).fit(X)
captured = capsys.readouterr()
assert re.search(r"Initialization complete", captured.out)
assert re.search(r"Iteration [0-9]+, inertia", captured.out)
if tol == 0:
assert re.search(r"strict convergence", captured.out)
else:
assert re.search(r"center shift .* within tolerance", captured.out)
def test_minibatch_kmeans_warning_init_size():
# Check that a warning is raised when init_size is smaller than n_clusters
with pytest.warns(RuntimeWarning,
match=r"init_size.* should be larger than n_clusters"):
MiniBatchKMeans(init_size=10, n_clusters=20).fit(X)
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_warning_n_init_precomputed_centers(Estimator):
# Check that a warning is raised when n_init > 1 and an array is passed for
# the init parameter.
with pytest.warns(RuntimeWarning,
match="Explicit initial center position passed: "
"performing only one init"):
Estimator(init=centers, n_clusters=n_clusters, n_init=10).fit(X)
def test_minibatch_sensible_reassign():
# check that identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
random_state=42)
zeroed_X[::2, :] = 0
km = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
init="random").fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert km.cluster_centers_.any(axis=1).sum() > 10
# do the same with batch-size > X.shape[0] (regression test)
km = MiniBatchKMeans(n_clusters=20, batch_size=200, random_state=42,
init="random").fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert km.cluster_centers_.any(axis=1).sum() > 10
# do the same with partial_fit API
km = MiniBatchKMeans(n_clusters=20, random_state=42, init="random")
for i in range(100):
km.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert km.cluster_centers_.any(axis=1).sum() > 10
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should no longer be good
sample_weight = np.ones(X.shape[0], dtype=X.dtype)
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
random_state=42)
mb_k_means.fit(this_X)
score_before = mb_k_means.score(this_X)
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, sample_weight, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means._counts,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
assert score_before > mb_k_means.score(this_X)
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
init=centers.copy(),
random_state=42, n_init=1)
mb_k_means.fit(this_X)
clusters_before = mb_k_means.cluster_centers_
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, sample_weight, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means._counts,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size
n_samples = 550
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(n_samples, 10))
# Check that the fit works if n_clusters is bigger than the batch_size.
# Run the test with 550 clusters and 550 samples, because it turned out
# that this values ensure that the number of clusters to reassign
# is always bigger than the batch_size
n_clusters = 550
MiniBatchKMeans(n_clusters=n_clusters,
batch_size=100,
init_size=n_samples,
random_state=42).fit(X)
def test_minibatch_kmeans_init_size():
# Check the internal _init_size attribute of MiniBatchKMeans
# default init size should be 3 * batch_size
km = MiniBatchKMeans(n_clusters=10, batch_size=5, n_init=1).fit(X)
assert km._init_size == 15
# if 3 * batch size < n_clusters, it should then be 3 * n_clusters
km = MiniBatchKMeans(n_clusters=10, batch_size=1, n_init=1).fit(X)
assert km._init_size == 30
# it should not be larger than n_samples
km = MiniBatchKMeans(n_clusters=10, batch_size=5, n_init=1,
init_size=n_samples + 1).fit(X)
assert km._init_size == n_samples
def test_kmeans_copyx():
# Check that copy_x=False returns nearly equal X after de-centering.
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check that my_X is de-centered
assert_allclose(my_X, X)
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_score_max_iter(Estimator):
# Check that fitting KMeans or MiniBatchKMeans with more iterations gives
# better score
X = np.random.RandomState(0).randn(100, 10)
km1 = Estimator(n_init=1, random_state=42, max_iter=1)
s1 = km1.fit(X).score(X)
km2 = Estimator(n_init=1, random_state=42, max_iter=10)
s2 = km2.fit(X).score(X)
assert s2 > s1
@pytest.mark.parametrize("array_constr", [np.array, sp.csr_matrix],
ids=["dense", "sparse"])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("init", ["random", "k-means++"])
@pytest.mark.parametrize("Estimator, algorithm", [
(KMeans, "full"),
(KMeans, "elkan"),
(MiniBatchKMeans, None)
])
def test_predict(Estimator, algorithm, init, dtype, array_constr):
# Check the predict method and the equivalence between fit.predict and
# fit_predict.
# There's a very small chance of failure with elkan on unstructured dataset
# because predict method uses fast euclidean distances computation which
# may cause small numerical instabilities.
if sys.platform == "darwin":
pytest.xfail(
"Known failures on MacOS, See "
"https://github.com/scikit-learn/scikit-learn/issues/12644")
X, _ = make_blobs(n_samples=500, n_features=10, centers=10, random_state=0)
X = array_constr(X)
# With n_init = 1
km = Estimator(n_clusters=10, init=init, n_init=1, random_state=0)
if algorithm is not None:
km.set_params(algorithm=algorithm)
km.fit(X)
labels = km.labels_
# re-predict labels for training set using predict
pred = km.predict(X)
assert_array_equal(pred, labels)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, labels)
# predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(10))
# With n_init > 1
# Due to randomness in the order in which chunks of data are processed when
# using more than one thread, there might be different rounding errors for
# the computation of the inertia between 2 runs. This might result in a
# different ranking of 2 inits, hence a different labeling, even if they
# give the same clustering. We only check the labels up to a permutation.
km = Estimator(n_clusters=10, init=init, n_init=10, random_state=0)
if algorithm is not None:
km.set_params(algorithm=algorithm)
km.fit(X)
labels = km.labels_
# re-predict labels for training set using predict
pred = km.predict(X)
assert_allclose(v_measure_score(pred, labels), 1)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_allclose(v_measure_score(pred, labels), 1)
# predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_allclose(v_measure_score(pred, np.arange(10)), 1)
@pytest.mark.parametrize("init", ["random", "k-means++", centers],
ids=["random", "k-means++", "ndarray"])
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_predict_dense_sparse(Estimator, init):
# check that models trained on sparse input also works for dense input at
# predict time and vice versa.
n_init = 10 if isinstance(init, str) else 1
km = Estimator(n_clusters=n_clusters, init=init, n_init=n_init,
random_state=0)
km.fit(X_csr)
assert_array_equal(km.predict(X), km.labels_)
km.fit(X)
assert_array_equal(km.predict(X_csr), km.labels_)
@pytest.mark.parametrize("array_constr", [np.array, sp.csr_matrix],
ids=["dense", "sparse"])
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
@pytest.mark.parametrize("init", ["k-means++", "ndarray"])
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_integer_input(Estimator, array_constr, dtype, init):
# Check that KMeans and MiniBatchKMeans work with integer input.
X_dense = np.array([[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]])
X = array_constr(X_dense, dtype=dtype)
n_init = 1 if init == "ndarray" else 10
init = X_dense[:2] if init == "ndarray" else init
km = Estimator(n_clusters=2, init=init, n_init=n_init, random_state=0)
if Estimator is MiniBatchKMeans:
km.set_params(batch_size=2)
km.fit(X)
# Internally integer input should be converted to float64
assert km.cluster_centers_.dtype == np.float64
expected_labels = [0, 1, 1, 0, 0, 1]
assert_allclose(v_measure_score(km.labels_, expected_labels), 1)
# Same with partial_fit (#14314)
if Estimator is MiniBatchKMeans:
km = clone(km).partial_fit(X)
assert km.cluster_centers_.dtype == np.float64
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_transform(Estimator):
# Check the transform method
km = Estimator(n_clusters=n_clusters).fit(X)
# Transorfming cluster_centers_ should return the pairwise distances
# between centers
Xt = km.transform(km.cluster_centers_)
assert_allclose(Xt, pairwise_distances(km.cluster_centers_))
# In particular, diagonal must be 0
assert_array_equal(Xt.diagonal(), np.zeros(n_clusters))
# Transorfming X should return the pairwise distances between X and the
# centers
Xt = km.transform(X)
assert_allclose(Xt, pairwise_distances(X, km.cluster_centers_))
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_fit_transform(Estimator):
# Check equivalence between fit.transform and fit_transform
X1 = Estimator(random_state=0, n_init=1).fit(X).transform(X)
X2 = Estimator(random_state=0, n_init=1).fit_transform(X)
assert_allclose(X1, X2)
def test_n_init():
# Check that increasing the number of init increases the quality
previous_inertia = np.inf
for n_init in [1, 5, 10]:
# set max_iter=1 to avoid finding the global minimum and get the same
# inertia each time
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=0, max_iter=1).fit(X)
assert km.inertia_ <= previous_inertia
def test_k_means_function():
# test calling the k_means function directly
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
sample_weight=None)
assert cluster_centers.shape == (n_clusters, n_features)
assert np.unique(labels).shape[0] == n_clusters
# check that the labels assignment are perfect (up to a permutation)
assert_allclose(v_measure_score(true_labels, labels), 1.0)
assert inertia > 0.0
@pytest.mark.parametrize("data", [X, X_csr], ids=["dense", "sparse"])
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_float_precision(Estimator, data):
# Check that the results are the same for single and double precision.
km = Estimator(n_init=1, random_state=0)
inertia = {}
Xt = {}
centers = {}
labels = {}
for dtype in [np.float64, np.float32]:
X = data.astype(dtype, **_astype_copy_false(data))
km.fit(X)
inertia[dtype] = km.inertia_
Xt[dtype] = km.transform(X)
centers[dtype] = km.cluster_centers_
labels[dtype] = km.labels_
# dtype of cluster centers has to be the dtype of the input data
assert km.cluster_centers_.dtype == dtype
# same with partial_fit
if Estimator is MiniBatchKMeans:
km.partial_fit(X[0:3])
assert km.cluster_centers_.dtype == dtype
# compare arrays with low precision since the difference between 32 and
# 64 bit comes from an accumulation of rounding errors.
assert_allclose(inertia[np.float32], inertia[np.float64], rtol=1e-5)
assert_allclose(Xt[np.float32], Xt[np.float64], rtol=1e-5)
assert_allclose(centers[np.float32], centers[np.float64], rtol=1e-5)
assert_array_equal(labels[np.float32], labels[np.float64])
@pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32, np.float64])
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_centers_not_mutated(Estimator, dtype):
# Check that KMeans and MiniBatchKMeans won't mutate the user provided
# init centers silently even if input data and init centers have the same
# type.
X_new_type = X.astype(dtype, copy=False)
centers_new_type = centers.astype(dtype, copy=False)
km = Estimator(init=centers_new_type, n_clusters=n_clusters, n_init=1)
km.fit(X_new_type)
assert not np.may_share_memory(km.cluster_centers_, centers_new_type)
@pytest.mark.parametrize("data", [X, X_csr], ids=["dense", "sparse"])
def test_kmeans_init_fitted_centers(data):
# Check that starting fitting from a local optimum shouldn't change the
# solution
km1 = KMeans(n_clusters=n_clusters).fit(data)
km2 = KMeans(n_clusters=n_clusters, init=km1.cluster_centers_,
n_init=1).fit(data)
assert_allclose(km1.cluster_centers_, km2.cluster_centers_)
def test_kmeans_warns_less_centers_than_unique_points():
# Check KMeans when the number of found clusters is smaller than expected
X = np.asarray([[0, 0],
[0, 1],
[1, 0],
[1, 0]]) # last point is duplicated
km = KMeans(n_clusters=4)
# KMeans should warn that fewer labels than cluster centers have been used
msg = (r"Number of distinct clusters \(3\) found smaller than "
r"n_clusters \(4\). Possibly due to duplicate points in X.")
with pytest.warns(ConvergenceWarning, match=msg):
km.fit(X)
# only three distinct points, so only three clusters
# can have points assigned to them
assert set(km.labels_) == set(range(3))
def _sort_centers(centers):
return np.sort(centers, axis=0)
def test_weighted_vs_repeated():
# Check that a sample weight of N should yield the same result as an N-fold
# repetition of the sample. Valid only if init is precomputed, otherwise
# rng produces different results. Not valid for MinibatchKMeans due to rng
# to extract minibatches.
sample_weight = np.random.RandomState(0).randint(1, 5, size=n_samples)
X_repeat = np.repeat(X, sample_weight, axis=0)
km = KMeans(init=centers, n_init=1, n_clusters=n_clusters, random_state=0)
km_weighted = clone(km).fit(X, sample_weight=sample_weight)
repeated_labels = np.repeat(km_weighted.labels_, sample_weight)
km_repeated = clone(km).fit(X_repeat)
assert_array_equal(km_repeated.labels_, repeated_labels)
assert_allclose(km_weighted.inertia_, km_repeated.inertia_)
assert_allclose(_sort_centers(km_weighted.cluster_centers_),
_sort_centers(km_repeated.cluster_centers_))
@pytest.mark.parametrize("data", [X, X_csr], ids=["dense", "sparse"])
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_unit_weights_vs_no_weights(Estimator, data):
# Check that not passing sample weights should be equivalent to passing
# sample weights all equal to one.
sample_weight = np.ones(n_samples)
km = Estimator(n_clusters=n_clusters, random_state=42, n_init=1)
km_none = clone(km).fit(data, sample_weight=None)
km_ones = clone(km).fit(data, sample_weight=sample_weight)
assert_array_equal(km_none.labels_, km_ones.labels_)
assert_allclose(km_none.cluster_centers_, km_ones.cluster_centers_)
def test_scaled_weights():
# scaling all sample weights by a common factor
# shouldn't change the result
sample_weight = np.ones(n_samples)
for estimator in [KMeans(n_clusters=n_clusters, random_state=42),
MiniBatchKMeans(n_clusters=n_clusters, random_state=42)]:
est_1 = clone(estimator).fit(X)
est_2 = clone(estimator).fit(X, sample_weight=0.5*sample_weight)
assert_almost_equal(v_measure_score(est_1.labels_, est_2.labels_), 1.0)
assert_almost_equal(_sort_centers(est_1.cluster_centers_),
_sort_centers(est_2.cluster_centers_))
def test_kmeans_elkan_iter_attribute():
# Regression test on bad n_iter_ value. Previous bug n_iter_ was one off
# it's right value (#11340).
km = KMeans(algorithm="elkan", max_iter=1).fit(X)
assert km.n_iter_ == 1
@pytest.mark.parametrize("array_constr", [np.array, sp.csr_matrix],
ids=["dense", "sparse"])
def test_kmeans_empty_cluster_relocated(array_constr):
# check that empty clusters are correctly relocated when using sample
# weights (#13486)
X = array_constr([[-1], [1]])
sample_weight = [1.9, 0.1]
init = np.array([[-1], [10]])
km = KMeans(n_clusters=2, init=init, n_init=1)
km.fit(X, sample_weight=sample_weight)
assert len(set(km.labels_)) == 2
assert_allclose(km.cluster_centers_, [[-1], [1]])
def test_result_of_kmeans_equal_in_diff_n_threads():
# Check that KMeans gives the same results in parallel mode than in
# sequential mode.
rnd = np.random.RandomState(0)
X = rnd.normal(size=(50, 10))
with threadpool_limits(limits=1, user_api="openmp"):
result_1 = KMeans(
n_clusters=3, random_state=0).fit(X).labels_
with threadpool_limits(limits=2, user_api="openmp"):
result_2 = KMeans(
n_clusters=3, random_state=0).fit(X).labels_
assert_array_equal(result_1, result_2)
@pytest.mark.parametrize("precompute_distances", ["auto", False, True])
def test_precompute_distance_deprecated(precompute_distances):
# FIXME: remove in 0.25
depr_msg = ("'precompute_distances' was deprecated in version 0.23 and "
"will be removed in 0.25.")
X, _ = make_blobs(n_samples=10, n_features=2, centers=2, random_state=0)
kmeans = KMeans(n_clusters=2, n_init=1, init='random', random_state=0,
precompute_distances=precompute_distances)
with pytest.warns(FutureWarning, match=depr_msg):
kmeans.fit(X)
@pytest.mark.parametrize("n_jobs", [None, 1])
def test_n_jobs_deprecated(n_jobs):
# FIXME: remove in 0.25
depr_msg = ("'n_jobs' was deprecated in version 0.23 and will be removed "
"in 0.25.")
X, _ = make_blobs(n_samples=10, n_features=2, centers=2, random_state=0)
kmeans = KMeans(n_clusters=2, n_init=1, init='random', random_state=0,
n_jobs=n_jobs)
with pytest.warns(FutureWarning, match=depr_msg):
kmeans.fit(X)
@pytest.mark.parametrize("attr", ["counts_", "init_size_", "random_state_"])
def test_minibatch_kmeans_deprecated_attributes(attr):
# check that we raise a deprecation warning when accessing `init_size_`
# FIXME: remove in 0.26
depr_msg = (f"The attribute '{attr}' is deprecated in 0.24 and will be "
f"removed in 0.26.")
km = MiniBatchKMeans(n_clusters=2, n_init=1, init='random', random_state=0)
km.fit(X)
with pytest.warns(FutureWarning, match=depr_msg):
getattr(km, attr)
def test_warning_elkan_1_cluster():
# Check warning messages specific to KMeans
with pytest.warns(RuntimeWarning,
match="algorithm='elkan' doesn't make sense for a single"
" cluster"):
KMeans(n_clusters=1, algorithm="elkan").fit(X)
@pytest.mark.parametrize("array_constr", [np.array, sp.csr_matrix],
ids=["dense", "sparse"])
@pytest.mark.parametrize("algo", ["full", "elkan"])
def test_k_means_1_iteration(array_constr, algo):
# check the results after a single iteration (E-step M-step E-step) by
# comparing against a pure python implementation.
X = np.random.RandomState(0).uniform(size=(100, 5))
init_centers = X[:5]
X = array_constr(X)
def py_kmeans(X, init):
new_centers = init.copy()
labels = pairwise_distances_argmin(X, init)
for label in range(init.shape[0]):
new_centers[label] = X[labels == label].mean(axis=0)
labels = pairwise_distances_argmin(X, new_centers)
return labels, new_centers
py_labels, py_centers = py_kmeans(X, init_centers)
cy_kmeans = KMeans(n_clusters=5, n_init=1, init=init_centers,
algorithm=algo, max_iter=1).fit(X)
cy_labels = cy_kmeans.labels_
cy_centers = cy_kmeans.cluster_centers_
assert_array_equal(py_labels, cy_labels)
assert_allclose(py_centers, cy_centers)
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("squared", [True, False])
def test_euclidean_distance(dtype, squared):
# Check that the _euclidean_(dense/sparse)_dense helpers produce correct
# results
rng = np.random.RandomState(0)
a_sparse = sp.random(1, 100, density=0.5, format="csr", random_state=rng,
dtype=dtype)
a_dense = a_sparse.toarray().reshape(-1)
b = rng.randn(100).astype(dtype, copy=False)
b_squared_norm = (b**2).sum()
expected = ((a_dense - b)**2).sum()
expected = expected if squared else np.sqrt(expected)
distance_dense_dense = _euclidean_dense_dense_wrapper(a_dense, b, squared)
distance_sparse_dense = _euclidean_sparse_dense_wrapper(
a_sparse.data, a_sparse.indices, b, b_squared_norm, squared)
assert_allclose(distance_dense_dense, distance_sparse_dense, rtol=1e-6)
assert_allclose(distance_dense_dense, expected, rtol=1e-6)
assert_allclose(distance_sparse_dense, expected, rtol=1e-6)
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_inertia(dtype):
rng = np.random.RandomState(0)
X_sparse = sp.random(100, 10, density=0.5, format="csr", random_state=rng,
dtype=dtype)
X_dense = X_sparse.toarray()
sample_weight = rng.randn(100).astype(dtype, copy=False)
centers = rng.randn(5, 10).astype(dtype, copy=False)
labels = rng.randint(5, size=100, dtype=np.int32)
distances = ((X_dense - centers[labels])**2).sum(axis=1)
expected = np.sum(distances * sample_weight)
inertia_dense = _inertia_dense(X_dense, sample_weight, centers, labels)
inertia_sparse = _inertia_sparse(X_sparse, sample_weight, centers, labels)
assert_allclose(inertia_dense, inertia_sparse, rtol=1e-6)
assert_allclose(inertia_dense, expected, rtol=1e-6)
assert_allclose(inertia_sparse, expected, rtol=1e-6)
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_sample_weight_unchanged(Estimator):
# Check that sample_weight is not modified in place by KMeans (#17204)
X = np.array([[1], [2], [4]])
sample_weight = np.array([0.5, 0.2, 0.3])
Estimator(n_clusters=2, random_state=0).fit(X, sample_weight=sample_weight)
assert_array_equal(sample_weight, np.array([0.5, 0.2, 0.3]))
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
@pytest.mark.parametrize("param, match", [
({"n_init": 0}, r"n_init should be > 0"),
({"max_iter": 0}, r"max_iter should be > 0"),
({"n_clusters": n_samples + 1}, r"n_samples.* should be >= n_clusters"),
({"init": X[:2]},
r"The shape of the initial centers .* does not match "
r"the number of clusters"),
({"init": lambda X_, k, random_state: X_[:2]},
r"The shape of the initial centers .* does not match "
r"the number of clusters"),
({"init": X[:8, :2]},
r"The shape of the initial centers .* does not match "
r"the number of features of the data"),
({"init": lambda X_, k, random_state: X_[:8, :2]},
r"The shape of the initial centers .* does not match "
r"the number of features of the data"),
({"init": "wrong"},
r"init should be either 'k-means\+\+', 'random', "
r"a ndarray or a callable")]
)
def test_wrong_params(Estimator, param, match):
# Check that error are raised with clear error message when wrong values
# are passed for the parameters
# Set n_init=1 by default to avoid warning with precomputed init
km = Estimator(n_init=1)
with pytest.raises(ValueError, match=match):
km.set_params(**param).fit(X)
@pytest.mark.parametrize("param, match", [
({"algorithm": "wrong"}, r"Algorithm must be 'auto', 'full' or 'elkan'")]
)
def test_kmeans_wrong_params(param, match):
# Check that error are raised with clear error message when wrong values
# are passed for the KMeans specific parameters
with pytest.raises(ValueError, match=match):
KMeans(**param).fit(X)
@pytest.mark.parametrize("param, match", [
({"max_no_improvement": -1}, r"max_no_improvement should be >= 0"),
({"batch_size": -1}, r"batch_size should be > 0"),
({"init_size": -1}, r"init_size should be > 0"),
({"reassignment_ratio": -1}, r"reassignment_ratio should be >= 0")]
)
def test_minibatch_kmeans_wrong_params(param, match):
# Check that error are raised with clear error message when wrong values
# are passed for the MiniBatchKMeans specific parameters
with pytest.raises(ValueError, match=match):
MiniBatchKMeans(**param).fit(X)
@pytest.mark.parametrize("param, match", [
({"n_local_trials": 0},
r"n_local_trials is set to 0 but should be an "
r"integer value greater than zero"),
({"x_squared_norms": X[:2]},
r"The length of x_squared_norms .* should "
r"be equal to the length of n_samples")]
)
def test_kmeans_plusplus_wrong_params(param, match):
with pytest.raises(ValueError, match=match):
kmeans_plusplus(X, n_clusters, **param)
@pytest.mark.parametrize("data", [X, X_csr])
@pytest.mark.parametrize("dtype", [np.float64, np.float32])
def test_kmeans_plusplus_output(data, dtype):
# Check for the correct number of seeds and all positive values
data = data.astype(dtype)
centers, indices = kmeans_plusplus(data, n_clusters)
# Check there are the correct number of indices and that all indices are
# positive and within the number of samples
assert indices.shape[0] == n_clusters
assert (indices >= 0).all()
assert (indices <= data.shape[0]).all()
# Check for the correct number of seeds and that they are bound by the data
assert centers.shape[0] == n_clusters
assert (centers.max(axis=0) <= data.max(axis=0)).all()
assert (centers.min(axis=0) >= data.min(axis=0)).all()
# Check that indices correspond to reported centers
# Use X for comparison rather than data, test still works against centers
# calculated with sparse data.
assert_allclose(X[indices].astype(dtype), centers)
@pytest.mark.parametrize("x_squared_norms", [row_norms(X, squared=True), None])
def test_kmeans_plusplus_norms(x_squared_norms):
# Check that defining x_squared_norms returns the same as default=None.
centers, indices = kmeans_plusplus(X, n_clusters,
x_squared_norms=x_squared_norms)
assert_allclose(X[indices], centers)
def test_kmeans_plusplus_dataorder():
# Check that memory layout does not effect result
centers_c, _ = kmeans_plusplus(X, n_clusters, random_state=0)
X_fortran = np.asfortranarray(X)
centers_fortran, _ = kmeans_plusplus(X_fortran, n_clusters, random_state=0)
assert_allclose(centers_c, centers_fortran)
|
|
from __future__ import unicode_literals, division, absolute_import
import logging
import math
from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.entry import Entry
from flexget.event import event
from flexget.plugin import PluginError
from flexget.utils.cached_input import cached
from flexget.utils.requests import Session
log = logging.getLogger('whatcd')
class InputWhatCD(object):
"""A plugin that searches what.cd
== Usage:
All parameters except `username` and `password` are optional.
whatcd:
username:
password:
user_agent: (A custom user-agent for the client to report.
It is NOT A GOOD IDEA to spoof a browser with
this. You are responsible for your account.)
search: (general search filter)
artist: (artist name)
album: (album name)
year: (album year)
encoding: (encoding specifics - 192, 320, lossless, etc.)
format: (MP3, FLAC, AAC, etc.)
media: (CD, DVD, vinyl, Blu-ray, etc.)
release_type: (album, soundtrack, EP, etc.)
log: (log specification - true, false, '100%', or '<100%')
hascue: (has a cue file - true or false)
scene: (is a scene release - true or false)
vanityhouse: (is a vanity house release - true or false)
leech_type: ('freeleech', 'neutral', 'either', or 'normal')
tags: (a list of tags to match - drum.and.bass, new.age, blues, etc.)
tag_type: (match 'any' or 'all' of the items in `tags`)
"""
# Aliases for config -> api params
ALIASES = {
"artist": "artistname",
"album": "groupname",
"leech_type": "freetorrent",
"release_type": "releaseType",
"tags": "taglist",
"tag_type": "tags_type",
"search": "searchstr",
"log": "haslog",
}
# API parameters
# None means a raw value entry (no validation)
# A dict means a choice with a mapping for the API
# A list is just a choice with no mapping
PARAMS = {
"searchstr": None,
"taglist": None,
"artistname": None,
"groupname": None,
"year": None,
"tags_type": {
"any": 0,
"all": 1,
},
"encoding": [
"192", "APS (VBR)", "V2 (VBR)", "V1 (VBR)", "256", "APX (VBR)",
"V0 (VBR)", "320", "lossless", "24bit lossless", "V8 (VBR)"
],
"format": [
"MP3", "FLAC", "AAC", "AC3", "DTS"
],
"media": [
"CD", "DVD", "vinyl", "soundboard", "SACD", "DAT", "cassette",
"WEB", "Blu-ray"
],
"releaseType": {
"album": 1,
"soundtrack": 3,
"EP": 5,
"anthology": 6,
"compilation": 7,
"DJ mix": 8,
"single": 9,
"live album": 11,
"remix": 13,
"bootleg": 14,
"interview": 15,
"mixtape": 16,
"unknown": 21,
"concert recording": 22,
"demo": 23
},
"haslog": {
"False": 0,
"True": 1,
"100%": 100,
"<100%": -1
},
"freetorrent": {
"freeleech": 1,
"neutral": 2,
"either": 3,
"normal": 0,
},
"hascue": {
"False": 0,
"True": 1,
},
"scene": {
"False": 0,
"True": 1,
},
"vanityhouse": {
"False": 0,
"True": 1,
}
}
def _key(self, key):
"""Gets the API key name from the entered key"""
if key in self.ALIASES:
return self.ALIASES[key]
return key
def _opts(self, key):
"""Gets the options for the specified key"""
return self.PARAMS[self._key(key)]
def _getval(self, key, val):
"""Gets the value for the specified key based on a config option"""
opts = self._opts(key)
if isinstance(opts, dict):
# Translate the input value to the What.CD API value
# The str cast converts bools to 'True'/'False' for use as keys
# This allows for options that have True/False/Other values
return opts[str(val)]
elif isinstance(val, list):
# Fix yaml parser making a list out of a string
return ",".join(val)
return val
def __init__(self):
"""Set up the schema"""
self.schema = {
'type': 'object',
'properties': {
'username': {'type': 'string'},
'password': {'type': 'string'},
'user_agent': {'type': 'string'},
'search': {'type': 'string'},
'artist': {'type': 'string'},
'album': {'type': 'string'},
'year': {'type': ['string', 'integer']},
'tags': one_or_more({'type': 'string'}),
'tag_type': {'type': 'string', 'enum': self._opts('tag_type').keys()},
'encoding': {'type': 'string', 'enum': self._opts('encoding')},
'format': {'type': 'string', 'enum': self._opts('format')},
'media': {'type': 'string', 'enum': self._opts('media')},
'release_type': {'type': 'string', 'enum': self._opts('release_type').keys()},
'log': {'oneOf': [{'type': 'string', 'enum': self._opts('log').keys()}, {'type': 'boolean'}]},
'leech_type': {'type': 'string', 'enum': self._opts('leech_type').keys()},
'hascue': {'type': 'boolean'},
'scene': {'type': 'boolean'},
'vanityhouse': {'type': 'boolean'},
},
'required': ['username', 'password'],
'additionalProperties': False
}
def _login(self, user, passwd):
"""
Log in and store auth data from the server
Adapted from https://github.com/isaaczafuta/whatapi
"""
data = {
'username': user,
'password': passwd,
'keeplogged': 1,
}
r = self.session.post("https://ssl.what.cd/login.php", data=data,
allow_redirects=False)
if r.status_code != 302 or r.headers.get('location') != "index.php":
raise PluginError("Failed to log in to What.cd")
accountinfo = self._request('index')
self.authkey = accountinfo['authkey']
self.passkey = accountinfo['passkey']
log.info("Logged in to What.cd")
def _request(self, action, page=None, **kwargs):
"""
Make an AJAX request to a given action page
Adapted from https://github.com/isaaczafuta/whatapi
"""
ajaxpage = "https://ssl.what.cd/ajax.php"
params = {}
# Filter params and map config values -> api values
for k, v in kwargs.items():
params[self._key(k)] = self._getval(k, v)
# Params other than the searching ones
params['action'] = action
if page:
params['page'] = page
r = self.session.get(ajaxpage, params=params, allow_redirects=False)
if r.status_code != 200:
raise PluginError("What.cd returned a non-200 status code")
try:
json_response = r.json()
if json_response['status'] != "success":
# Try to deal with errors returned by the API
error = json_response.get('error', json_response.get('status'))
if not error or error == "failure":
error = json_response.get('response', str(json_response))
raise PluginError("What.cd gave a failure response: "
"'{}'".format(error))
return json_response['response']
except (ValueError, TypeError, KeyError) as e:
raise PluginError("What.cd returned an invalid response")
def _search_results(self, config):
"""Generator that yields search results"""
page = 1
pages = None
while True:
if pages and page >= pages:
break
log.debug("Attempting to get page {} of search results".format(page))
result = self._request('browse', page=page, **config)
if not result['results']:
break
for x in result['results']:
yield x
pages = result.get('pages', pages)
page += 1
def _get_entries(self, search_results):
"""Genertor that yields Entry objects from search results"""
for result in search_results:
# Get basic information on the release
info = dict((k, result[k]) for k in ('artist', 'groupName', 'groupYear'))
# Releases can have multiple download options
for tor in result['torrents']:
temp = info.copy()
temp.update(dict((k, tor[k]) for k in ('media', 'encoding', 'format', 'torrentId')))
yield Entry(
title="{artist} - {groupName} - {groupYear} "
"({media} - {format} - {encoding})-{torrentId}.torrent".format(**temp),
url="https://what.cd/torrents.php?action=download&"
"id={}&authkey={}&torrent_pass={}".format(temp['torrentId'], self.authkey, self.passkey),
torrent_seeds=tor['seeders'],
torrent_leeches=tor['leechers'],
# Size is returned in bytes, convert to MB for compat with the content_size plugin
content_size=math.floor(tor['size'] / (1024**2))
)
@cached('whatcd')
@plugin.internet(log)
def on_task_input(self, task, config):
"""Search on What.cd"""
self.session = Session()
# From the API docs: "Refrain from making more than five (5) requests every ten (10) seconds"
self.session.set_domain_delay('ssl.what.cd', '2 seconds')
# Custom user agent
user_agent = config.pop('user_agent', None)
if user_agent:
self.session.headers.update({"User-Agent": user_agent})
# Login
self._login(config.pop('username'), config.pop('password'))
# Logged in successfully, it's ok if nothing matches
task.no_entries_ok = True
# NOTE: Any values still in config at this point MUST be valid search parameters
# Perform the search and parse the needed information out of the response
results = self._search_results(config)
return list(self._get_entries(results))
@event('plugin.register')
def register_plugin():
plugin.register(InputWhatCD, 'whatcd', groups=['search'], api_ver=2)
|
|
#!/usr/bin/env python
"""Implementations of various collections."""
import cStringIO
import itertools
import struct
import logging
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import data_store
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib import stats
from grr.lib import utils
from grr.lib.rdfvalues import anomaly as rdf_anomaly
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import crypto as rdf_crypto
from grr.lib.rdfvalues import protodict as rdf_protodict
from grr.lib.rdfvalues import structs as rdf_structs
from grr.proto import jobs_pb2
class AFF4CollectionView(rdf_protodict.RDFValueArray):
"""A view specifies how an AFF4Collection is seen."""
class RDFValueCollectionView(rdf_protodict.RDFValueArray):
"""A view specifies how an RDFValueCollection is seen."""
class RDFValueCollection(aff4.AFF4Object):
"""This is a collection of RDFValues."""
# If this is set to an RDFValue class implementation, all the contained
# objects must be instances of this class.
_rdf_type = None
_behaviours = set()
size = 0
# The file object for the underlying AFF4Image stream.
fd = None
class SchemaCls(aff4.AFF4Object.SchemaCls):
SIZE = aff4.AFF4Stream.SchemaCls.SIZE
DESCRIPTION = aff4.Attribute("aff4:description", rdfvalue.RDFString,
"This collection's description", "description")
VIEW = aff4.Attribute("aff4:rdfview", RDFValueCollectionView,
"The list of attributes which will show up in "
"the table.", default="")
def Initialize(self):
"""Initialize the internal storage stream."""
self.stream_dirty = False
try:
self.fd = aff4.FACTORY.Open(self.urn.Add("UnversionedStream"),
aff4_type="AFF4UnversionedImage",
mode=self.mode,
token=self.token)
self.size = int(self.Get(self.Schema.SIZE))
return
except IOError:
pass
# We still have many collections which were created with a versioned stream,
# which wastes space. Check if this is such a collection and revert to the
# old behavior if necessary.
try:
self.fd = aff4.FACTORY.Open(self.urn.Add("Stream"),
aff4_type="AFF4Image", mode=self.mode,
token=self.token)
self.size = int(self.Get(self.Schema.SIZE))
return
except IOError:
pass
# If we get here, the stream does not already exist - we create a new
# stream.
self.fd = aff4.FACTORY.Create(self.urn.Add("UnversionedStream"),
"AFF4UnversionedImage",
mode=self.mode, token=self.token)
self.fd.seek(0, 2)
self.size = 0
def SetChunksize(self, chunk_size):
if self.fd.size != 0:
raise ValueError("Cannot set chunk size on an existing collection.")
self.fd.SetChunksize(chunk_size)
def Flush(self, sync=False):
if self.stream_dirty:
self.Set(self.Schema.SIZE(self.size))
self.fd.Flush(sync=sync)
super(RDFValueCollection, self).Flush(sync=sync)
def Close(self, sync=False):
if self.locked:
sync = True
self.Flush(sync=sync)
super(RDFValueCollection, self).Close(sync=sync)
def Add(self, rdf_value=None, **kwargs):
"""Add the rdf value to the collection."""
if rdf_value is None:
if self._rdf_type:
rdf_value = self._rdf_type(**kwargs) # pylint: disable=not-callable
else:
raise ValueError("RDFValueCollection doesn't accept None values.")
if self._rdf_type and not isinstance(rdf_value, self._rdf_type):
raise ValueError("This collection only accepts values of type %s" %
self._rdf_type.__name__)
if not rdf_value.age:
rdf_value.age.Now()
data = rdf_protodict.EmbeddedRDFValue(payload=rdf_value).SerializeToString()
self.fd.Seek(0, 2)
self.fd.Write(struct.pack("<i", len(data)))
self.fd.Write(data)
self.stream_dirty = True
self.size += 1
def AddAll(self, rdf_values, callback=None):
"""Adds a list of rdfvalues to the collection."""
for rdf_value in rdf_values:
if rdf_value is None:
raise ValueError("Can't add None to the collection via AddAll.")
if self._rdf_type and not isinstance(rdf_value, self._rdf_type):
raise ValueError("This collection only accepts values of type %s" %
self._rdf_type.__name__)
if not rdf_value.age:
rdf_value.age.Now()
buf = cStringIO.StringIO()
for index, rdf_value in enumerate(rdf_values):
data = rdf_protodict.EmbeddedRDFValue(
payload=rdf_value).SerializeToString()
buf.write(struct.pack("<i", len(data)))
buf.write(data)
self.size += 1
if callback:
callback(index, rdf_value)
self.fd.Seek(0, 2)
self.fd.Write(buf.getvalue())
self.stream_dirty = True
def __len__(self):
return self.size
def __nonzero__(self):
return self.size != 0
def __iter__(self):
"""Iterate over all contained RDFValues.
Returns:
Generator of RDFValues stored in the collection.
Raises:
RuntimeError: if we are in write mode.
"""
return self.GenerateItems()
@property
def deprecated_current_offset(self):
return self.fd.Tell()
def _GenerateItems(self, byte_offset=0):
"""Generates items starting from a given byte offset."""
if not self.fd:
return
if self.mode == "w":
raise RuntimeError("Can not read when in write mode.")
self.fd.seek(byte_offset)
count = 0
while True:
offset = self.fd.Tell()
try:
length = struct.unpack("<i", self.fd.Read(4))[0]
serialized_event = self.fd.Read(length)
except struct.error:
break
result = rdf_protodict.EmbeddedRDFValue(serialized_event)
payload = result.payload
if payload is not None:
# Mark the RDFValue with important information relating to the
# collection it is from.
payload.id = count
payload.collection_offset = offset
yield payload
else:
logging.warning("payload=None was encountered in a collection %s "
"(index %d), this may mean a logical bug or corrupt "
"data. Ignoring...", self.urn, count)
count += 1
def GenerateItems(self, offset=0):
"""Iterate over all contained RDFValues.
Args:
offset: The offset in the stream to start reading from.
Returns:
Generator for values stored in the collection.
Raises:
RuntimeError: if we are in write mode.
"""
return itertools.islice(self._GenerateItems(), offset, self.size)
def GetItem(self, offset=0):
for item in self.GenerateItems(offset=offset):
return item
def __getitem__(self, index):
if index >= 0:
for item in self.GenerateItems(offset=index):
return item
else:
raise RuntimeError("Index must be >= 0")
class AFF4Collection(aff4.AFF4Volume, RDFValueCollection):
"""A collection of AFF4 objects.
The AFF4 objects themselves are opened on demand from the data store. The
collection simply stores the RDFURNs of all aff4 objects in the collection.
"""
_rdf_type = rdf_client.AFF4ObjectSummary
_behaviours = frozenset(["Collection"])
class SchemaCls(aff4.AFF4Volume.SchemaCls, RDFValueCollection.SchemaCls):
VIEW = aff4.Attribute("aff4:view", AFF4CollectionView,
"The list of attributes which will show up in "
"the table.", default="")
def CreateView(self, attributes):
"""Given a list of attributes, update our view.
Args:
attributes: is a list of attribute names.
"""
self.Set(self.Schema.VIEW(attributes))
def Query(self, filter_string="", subjects=None, limit=100):
"""Filter the objects contained within this collection."""
if subjects is None:
subjects = set()
for obj in self:
if len(subjects) < limit:
subjects.add(obj.urn)
else:
break
else:
subjects = set(subjects[:limit])
if filter_string:
# Parse the query string
ast = aff4.AFF4QueryParser(filter_string).Parse()
# Query our own data store
filter_obj = ast.Compile(aff4.AFF4Filter)
# We expect RDFURN objects to be stored in this collection.
for subject in aff4.FACTORY.MultiOpen(subjects, token=self.token):
if filter_string and not filter_obj.FilterOne(subject):
continue
yield subject
def ListChildren(self, **_):
for aff4object_summary in self:
yield aff4object_summary.urn
class GRRSignedBlobCollection(RDFValueCollection):
_rdf_type = rdf_crypto.SignedBlob
class GRRSignedBlob(aff4.AFF4MemoryStream):
"""A container for storing a signed binary blob such as a driver."""
def Initialize(self):
self.collection = aff4.FACTORY.Create(
self.urn.Add("collection"), "GRRSignedBlobCollection", mode=self.mode,
token=self.token)
self.fd = cStringIO.StringIO()
if "r" in self.mode:
for x in self.collection:
self.fd.write(x.data)
self.size = self.fd.tell()
self.fd.seek(0)
# How many chunks we have?
self.chunks = len(self.collection)
def Add(self, item):
self.collection.Add(item)
def __iter__(self):
return iter(self.collection)
def Close(self):
super(GRRSignedBlob, self).Close()
self.collection.Close()
class GRRMemoryDriver(GRRSignedBlob):
"""A driver for acquiring memory."""
class SchemaCls(GRRSignedBlob.SchemaCls):
INSTALLATION = aff4.Attribute(
"aff4:driver/installation", rdf_client.DriverInstallTemplate,
"The driver installation control protobuf.", "installation",
default=rdf_client.DriverInstallTemplate(
driver_name="pmem", device_path=r"\\.\pmem"))
class GrepResultsCollection(RDFValueCollection):
"""A collection of grep results."""
_rdf_type = rdf_client.BufferReference
class ClientAnomalyCollection(RDFValueCollection):
"""A collection of anomalies related to a client.
This class is a normal collection, but with additional methods for making
viewing and working with anomalies easier.
"""
_rdf_type = rdf_anomaly.Anomaly
class SeekIndexPair(rdf_structs.RDFProtoStruct):
"""Index offset <-> byte offset pair used in seek index."""
protobuf = jobs_pb2.SeekIndexPair
class SeekIndex(rdf_structs.RDFProtoStruct):
"""Seek index (collection of SeekIndexPairs, essentially)."""
protobuf = jobs_pb2.SeekIndex
class PackedVersionedCollection(RDFValueCollection):
"""A collection which uses the data store's version properties.
This collection is very efficient for writing to - we can insert new values by
blind writing them into the data store - using the timestamping features of
the data store.
Unfortunately reading from versioned data store attributes is slow. Therefore
this object implements a compaction strategy, where writes are versioned,
until they can be compacted into a regular RDFValueCollection by the
VersionedCollectionCompactor cron job.
"""
notification_queue = "aff4:/cron/versioned_collection_compactor"
index_prefix = "index:changed/"
index_format = index_prefix + "%s"
@classmethod
def ScheduleNotification(cls, urn, sync=False, token=None):
"""Schedule notification for a given urn."""
data_store.DB.Set(cls.notification_queue, cls.index_format % urn,
urn, replace=True, token=token, sync=sync)
@classmethod
def QueryNotifications(cls, timestamp=None, token=None):
"""Query all the notifications for the given type of collections."""
if token is None:
raise ValueError("token can't be None")
if timestamp is None:
timestamp = rdfvalue.RDFDatetime().Now()
for _, urn, urn_timestamp in data_store.DB.ResolvePrefix(
cls.notification_queue, cls.index_prefix,
timestamp=(0, timestamp), token=token):
yield rdfvalue.RDFURN(urn, age=urn_timestamp)
@classmethod
def DeleteNotifications(cls, urns, end=None, token=None):
"""Delete notifications for given urns."""
if token is None:
raise ValueError("token can't be None")
predicates = [cls.index_format % urn for urn in urns]
data_store.DB.DeleteAttributes(cls.notification_queue, predicates,
end=end, token=token, sync=True)
@classmethod
def AddToCollection(cls, collection_urn, rdf_values, sync=True,
token=None):
"""Adds RDFValues to the collection with a given urn."""
if token is None:
raise ValueError("Token can't be None.")
data_attrs = []
for rdf_value in rdf_values:
if rdf_value is None:
raise ValueError("Can't add None to the collection.")
if cls._rdf_type and not isinstance(rdf_value, cls._rdf_type):
raise ValueError("This collection only accepts values of type %s" %
cls._rdf_type.__name__)
if not rdf_value.age:
rdf_value.age.Now()
data_attrs.append(cls.SchemaCls.DATA(
rdf_protodict.EmbeddedRDFValue(payload=rdf_value)))
attrs_to_set = {cls.SchemaCls.DATA: data_attrs}
if cls.IsJournalingEnabled():
journal_entry = cls.SchemaCls.ADDITION_JOURNAL(len(rdf_values))
attrs_to_set[cls.SchemaCls.ADDITION_JOURNAL] = [journal_entry]
aff4.FACTORY.SetAttributes(collection_urn, attrs_to_set, set(),
add_child_index=False, sync=sync,
token=token)
cls.ScheduleNotification(collection_urn, token=token)
# Update system-wide stats.
stats.STATS.IncrementCounter("packed_collection_added",
delta=len(rdf_values))
class SchemaCls(RDFValueCollection.SchemaCls):
"""Schema for PackedVersionedCollection."""
DATA = aff4.Attribute("aff4:data", rdf_protodict.EmbeddedRDFValue,
"The embedded semantic value.", versioned=True)
SEEK_INDEX = aff4.Attribute("aff4:seek_index", SeekIndex,
"Index for seek operations.", versioned=False)
ADDITION_JOURNAL = aff4.Attribute("aff4:addition_journal",
rdfvalue.RDFInteger,
"Journal of Add(), AddAll(), and "
"AddToCollection() operations. Every "
"element in the journal is the number of "
"items added to collection when Add*() "
"was called.", versioned=True)
COMPACTION_JOURNAL = aff4.Attribute("aff4:compaction_journal",
rdfvalue.RDFInteger,
"Journal of compactions. Every item in "
"the journal is number of elements "
"that were compacted during particular "
"compaction.")
INDEX_INTERVAL = 10000
COMPACTION_BATCH_SIZE = 10000
MAX_REVERSED_RESULTS = 10000
@staticmethod
def IsJournalingEnabled():
return config_lib.CONFIG[
"Worker.enable_packed_versioned_collection_journaling"]
def Flush(self, sync=True):
send_notification = self._dirty and self.Schema.DATA in self.new_attributes
super(PackedVersionedCollection, self).Flush(sync=sync)
if send_notification:
self.ScheduleNotification(self.urn, token=self.token)
def Close(self, sync=True):
send_notification = self._dirty and self.Schema.DATA in self.new_attributes
super(PackedVersionedCollection, self).Close(sync=sync)
if send_notification:
self.ScheduleNotification(self.urn, token=self.token)
def Add(self, rdf_value=None, **kwargs):
"""Add the rdf value to the collection."""
if rdf_value is None and self._rdf_type:
rdf_value = self._rdf_type(**kwargs) # pylint: disable=not-callable
if not rdf_value.age:
rdf_value.age.Now()
self.Set(self.Schema.DATA(payload=rdf_value))
if self.IsJournalingEnabled():
self.Set(self.Schema.ADDITION_JOURNAL(1))
# Update system-wide stats.
stats.STATS.IncrementCounter("packed_collection_added")
def AddAll(self, rdf_values, callback=None):
"""Adds a list of rdfvalues to the collection."""
for rdf_value in rdf_values:
if rdf_value is None:
raise ValueError("Can't add None to the collection via AddAll.")
if self._rdf_type and not isinstance(rdf_value, self._rdf_type):
raise ValueError("This collection only accepts values of type %s" %
self._rdf_type.__name__)
if not rdf_value.age:
rdf_value.age.Now()
for index, rdf_value in enumerate(rdf_values):
self.Set(self.Schema.DATA(payload=rdf_value))
if callback:
callback(index, rdf_value)
if self.IsJournalingEnabled():
self.Set(self.Schema.ADDITION_JOURNAL(len(rdf_values)))
# Update system-wide stats.
stats.STATS.IncrementCounter("packed_collection_added",
delta=len(rdf_values))
def GenerateUncompactedItems(self, max_reversed_results=0,
timestamp=None):
if self.IsAttributeSet(self.Schema.DATA):
freeze_timestamp = timestamp or rdfvalue.RDFDatetime().Now()
results = []
for _, value, _ in data_store.DB.ResolvePrefix(
self.urn, self.Schema.DATA.predicate, token=self.token,
timestamp=(0, freeze_timestamp)):
if results is not None:
results.append(self.Schema.DATA(value).payload)
if max_reversed_results and len(results) > max_reversed_results:
for result in results:
yield result
results = None
else:
yield self.Schema.DATA(value).payload
if results is not None:
for result in reversed(results):
yield result
def GenerateItems(self, offset=0):
"""First iterate over the versions, and then iterate over the stream."""
freeze_timestamp = rdfvalue.RDFDatetime().Now()
index = 0
byte_offset = 0
if offset >= self.INDEX_INTERVAL and self.IsAttributeSet(
self.Schema.SEEK_INDEX):
seek_index = self.Get(self.Schema.SEEK_INDEX)
for value in reversed(seek_index.checkpoints):
if value.index_offset <= offset:
index = value.index_offset
byte_offset = value.byte_offset
break
for x in self._GenerateItems(byte_offset=byte_offset):
if index >= offset:
yield x
index += 1
for x in self.GenerateUncompactedItems(
max_reversed_results=self.MAX_REVERSED_RESULTS,
timestamp=freeze_timestamp):
if index >= offset:
yield x
index += 1
def GetIndex(self):
"""Return the seek index (in the reversed chronological order)."""
if not self.IsAttributeSet(self.Schema.SEEK_INDEX):
return []
else:
seek_index = self.Get(self.Schema.SEEK_INDEX)
return [(v.index_offset, v.byte_offset)
for v in reversed(seek_index.checkpoints)]
@utils.Synchronized
def Compact(self, callback=None, timestamp=None):
"""Compacts versioned attributes into the collection stream.
Versioned attributes come from the datastore sorted by the timestamp
in the decreasing order. This is the opposite of what we want in
the collection (as items in the collection should be in chronological
order).
Compact's implementation can handle very large collections that can't
be reversed in memory. It reads them in batches, reverses every batch
individually, and then reads batches back in the reversed order and
write their contents to the collection stream.
Args:
callback: An optional function without arguments that gets called
periodically while processing is done. Useful in flows
that have to heartbeat.
timestamp: Only items added before this timestamp will be compacted.
Raises:
RuntimeError: if problems are encountered when reading back temporary
saved data.
Returns:
Number of compacted results.
"""
if not self.locked:
raise aff4.LockError("Collection must be locked before compaction.")
compacted_count = 0
batches_urns = []
current_batch = []
# This timestamp will be used to delete attributes. We don't want
# to delete anything that was added after we started the compaction.
freeze_timestamp = timestamp or rdfvalue.RDFDatetime().Now()
def UpdateIndex():
seek_index = self.Get(self.Schema.SEEK_INDEX, SeekIndex())
prev_index_pair = seek_index.checkpoints and seek_index.checkpoints[-1]
if (not prev_index_pair or
self.size - prev_index_pair.index_offset >= self.INDEX_INTERVAL):
new_index_pair = SeekIndexPair(index_offset=self.size,
byte_offset=self.fd.Tell())
seek_index.checkpoints.Append(new_index_pair)
self.Set(self.Schema.SEEK_INDEX, seek_index)
def DeleteVersionedDataAndFlush():
"""Removes versioned attributes and flushes the stream."""
data_store.DB.DeleteAttributes(self.urn, [self.Schema.DATA.predicate],
end=freeze_timestamp,
token=self.token, sync=True)
if self.IsJournalingEnabled():
journal_entry = self.Schema.COMPACTION_JOURNAL(compacted_count,
age=freeze_timestamp)
attrs_to_set = {self.Schema.COMPACTION_JOURNAL: [journal_entry]}
aff4.FACTORY.SetAttributes(self.urn, attrs_to_set, set(),
add_child_index=False, sync=True,
token=self.token)
if self.Schema.DATA in self.synced_attributes:
del self.synced_attributes[self.Schema.DATA]
self.Flush(sync=True)
def HeartBeat():
"""Update the lock lease if needed and call the callback."""
lease_time = config_lib.CONFIG["Worker.compaction_lease_time"]
if self.CheckLease() < lease_time / 2:
logging.info("%s: Extending compaction lease.", self.urn)
self.UpdateLease(lease_time)
stats.STATS.IncrementCounter("packed_collection_lease_extended")
if callback:
callback()
HeartBeat()
# We iterate over all versioned attributes. If we get more than
# self.COMPACTION_BATCH_SIZE, we write the data to temporary
# stream in the reversed order.
for _, value, _ in data_store.DB.ResolvePrefix(
self.urn, self.Schema.DATA.predicate, token=self.token,
timestamp=(0, freeze_timestamp)):
HeartBeat()
current_batch.append(value)
compacted_count += 1
if len(current_batch) >= self.COMPACTION_BATCH_SIZE:
batch_urn = rdfvalue.RDFURN("aff4:/tmp").Add(
"%X" % utils.PRNG.GetULong())
batches_urns.append(batch_urn)
buf = cStringIO.StringIO()
for data in reversed(current_batch):
buf.write(struct.pack("<i", len(data)))
buf.write(data)
# We use AFF4Image to avoid serializing/deserializing data stored
# in versioned attributes.
with aff4.FACTORY.Create(batch_urn, "AFF4Image", mode="w",
token=self.token) as batch_stream:
batch_stream.Write(buf.getvalue())
current_batch = []
# If there are no versioned attributes, we have nothing to do.
if not current_batch and not batches_urns:
return 0
# The last batch of results can be written to our collection's stream
# immediately, because we have to reverse the order of all the data
# stored in versioned attributes.
if current_batch:
buf = cStringIO.StringIO()
for data in reversed(current_batch):
buf.write(struct.pack("<i", len(data)))
buf.write(data)
self.fd.Seek(0, 2)
self.fd.Write(buf.getvalue())
self.stream_dirty = True
self.size += len(current_batch)
UpdateIndex()
# If current_batch was the only available batch, just write everything
# and return.
if not batches_urns:
DeleteVersionedDataAndFlush()
return compacted_count
batches = {}
for batch in aff4.FACTORY.MultiOpen(batches_urns, aff4_type="AFF4Image",
token=self.token):
batches[batch.urn] = batch
if len(batches_urns) != len(batches):
raise RuntimeError("Internal inconsistency can't read back all the "
"temporary batches.")
# We read all the temporary batches in reverse order (batches itself
# were reversed when they were written).
self.fd.Seek(0, 2)
for batch_urn in reversed(batches_urns):
batch = batches[batch_urn]
HeartBeat()
data = batch.Read(len(batch))
self.fd.Write(data)
self.stream_dirty = True
self.size += self.COMPACTION_BATCH_SIZE
UpdateIndex()
aff4.FACTORY.Delete(batch_urn, token=self.token)
DeleteVersionedDataAndFlush()
# Update system-wide stats.
stats.STATS.IncrementCounter("packed_collection_compacted",
delta=compacted_count)
return compacted_count
def CalculateLength(self):
length = super(PackedVersionedCollection, self).__len__()
if self.IsAttributeSet(self.Schema.DATA):
if self.age_policy == aff4.ALL_TIMES:
length += len(list(self.GetValuesForAttribute(self.Schema.DATA)))
else:
length += len(list(data_store.DB.ResolveMulti(
self.urn, [self.Schema.DATA.predicate], token=self.token,
timestamp=data_store.DB.ALL_TIMESTAMPS)))
return length
def __len__(self):
return self.CalculateLength()
def __nonzero__(self):
if "r" not in self.mode:
raise AttributeError(
"Cannot determine collection length in write only mode.")
# This checks if there is data in the stream.
if super(PackedVersionedCollection, self).__nonzero__():
return True
# if there is not, we might have some uncompacted data.
return self.IsAttributeSet(self.Schema.DATA)
class ResultsOutputCollection(PackedVersionedCollection):
"""Collection for hunt results storage.
This collection is essentially a PackedVersionedCollection with a
separate notification queue. Therefore, all new results are written
as versioned attributes. ProcessHuntResultsCronFlow reads notifications,
processes new results, and then writes them to the main collection stream.
"""
notification_queue = "aff4:/_notifications/results_output"
class SchemaCls(PackedVersionedCollection.SchemaCls):
RESULTS_SOURCE = aff4.Attribute("aff4:results_source", rdfvalue.RDFURN,
"URN of a hunt where results came from.")
def Initialize(self):
super(ResultsOutputCollection, self).Initialize()
if "w" in self.mode and self.fd.size == 0:
# We want bigger chunks as we usually expect large number of results.
self.fd.SetChunksize(1024 * 1024)
class CollectionsInitHook(registry.InitHook):
pre = ["StatsInit"]
def RunOnce(self):
"""Register collections-related metrics."""
stats.STATS.RegisterCounterMetric("packed_collection_added")
stats.STATS.RegisterCounterMetric("packed_collection_compacted")
stats.STATS.RegisterCounterMetric("packed_collection_lease_extended")
|
|
""" DarkWorldsMetricMountianOsteric.py
Custom evaluation metric for the 'Observing Dark Worlds' competition.
[Description of metric, or reference to documentation.]
Update: Made for the training set only so users can check there results from the training c
@Author: David Harvey
Created: 22 August 2012
"""
import numpy as np
import math as mt
import itertools as it
import csv as c
import getopt as gt
import sys as sys
import argparse as ap
import string as st
import random as rd
def calc_delta_r(x_predicted,y_predicted,x_true,y_true):
""" Compute the scalar distance between predicted halo centers
and the true halo centers. Predictions are matched to the closest
halo center.
Notes: It takes in the predicted and true positions, and then loops over each possible configuration and finds the most optimal one.
Arguments:
x_predicted, y_predicted: vector for predicted x- and y-positions (1 to 3 elements)
x_true, y_true: vector for known x- and y-positions (1 to 3 elements)
Returns:
radial_distance: vector containing the scalar distances between the predicted halo centres and the true halo centres (1 to 3 elements)
true_halo_idexes: vector containing indexes of the input true halos which matches the predicted halo indexes (1 to 3 elements)
measured_halo_indexes: vector containing indexes of the predicted halo position with the reference to the true halo position.
e.g if true_halo_indexes=[0,1] and measured_halo_indexes=[1,0] then the first x,y coordinates of the true halo position matches the second input of the predicted x,y coordinates.
"""
num_halos=len(x_true) #Only works for number of halos > 1
num_configurations=mt.factorial(num_halos) #The number of possible different comb
configurations=np.zeros([num_halos,num_configurations],int) #The array of combinations
#I will pass back
distances = np.zeros([num_configurations],float) #The array of the distances
#for all possible combinations
radial_distance=[] #The vector of distances
#I will pass back
#Pick a combination of true and predicted
a=['01','012'] #Input for the permutatiosn, 01 number halos or 012
count=0 #For the index of the distances array
true_halo_indexes=[] #The tuples which will show the order of halos picked
predicted_halo_indexes=[]
distances_perm=np.zeros([num_configurations,num_halos],float) #The distance between each
#true and predicted
#halo for every comb
true_halo_indexes_perm=[] #log of all the permutations of true halos used
predicted_halo_indexes_perm=[] #log of all the predicted permutations
for perm in it.permutations(a[num_halos-2],num_halos):
which_true_halos=[]
which_predicted_halos=[]
for j in xrange(num_halos): #loop through all the true halos with the
distances_perm[count,j]=np.sqrt((x_true[j]-x_predicted[int(perm[j])])**2\
+(y_true[j]-y_predicted[int(perm[j])])**2)
#This array logs the distance between true and
#predicted halo for ALL configurations
which_true_halos.append(j) #log the order in which I try each true halo
which_predicted_halos.append(int(perm[j])) #log the order in which I true
#each predicted halo
true_halo_indexes_perm.append(which_true_halos) #this is a tuple of tuples of
#all of thifferent config
#true halo indexes
predicted_halo_indexes_perm.append(which_predicted_halos)
distances[count]=sum(distances_perm[count,0::]) #Find what the total distances
#are for each configuration
count=count+1
config = np.where(distances == min(distances))[0][0] #The configuration used is the one
#which has the smallest distance
radial_distance.append(distances_perm[config,0::]) #Find the tuple of distances that
#correspond to this smallest distance
true_halo_indexes=true_halo_indexes_perm[config] #Find the tuple of the index which refers
#to the smallest distance
predicted_halo_indexes=predicted_halo_indexes_perm[config]
return radial_distance,true_halo_indexes,predicted_halo_indexes
def calc_theta(x_predicted, y_predicted, x_true, y_true, x_ref, y_ref):
""" Calculate the angle the predicted position and the true position, where the zero degree corresponds to the line joing the true halo position and the reference point given.
Arguments:
x_predicted, y_predicted: vector for predicted x- and y-positions (1 to 3 elements)
x_true, y_true: vector for known x- and y-positions (1 to 3 elements)
Note that the input of these are matched up so that the first elements of each
vector are associated with one another
x_ref, y_ref: scalars of the x,y coordinate of reference point
Returns:
Theta: A vector containing the angles of the predicted halo w.r.t the true halo
with the vector joining the reference point and the halo as the zero line.
"""
num_halos=len(x_predicted)
theta=np.zeros([num_halos+1],float) #Set up the array which will pass back the values
phi = np.zeros([num_halos],float)
psi = np.arctan( (y_true-y_ref)/(x_true-x_ref) )
# Angle at which the halo is at
#with respect to the reference point
phi[x_true != x_ref] = np.arctan((y_predicted[x_true != x_predicted]-\
y_true[x_true != x_predicted])\
/(x_predicted[x_true != x_predicted]-\
x_true[x_true != x_predicted])) # Angle of the estimate
#wrt true halo centre
#Before finding the angle with the zero line as the line joiing the halo and the reference
#point I need to convert the angle produced by Python to an angle between 0 and 2pi
phi =convert_to_360(phi, x_predicted-x_true,\
y_predicted-y_true)
psi = convert_to_360(psi, x_true-x_ref,\
y_true-y_ref)
theta = phi-psi #The angle with the baseline as the line joing the ref and the halo
theta[theta< 0.0]=theta[theta< 0.0]+2.0*mt.pi #If the angle of the true pos wrt the ref is
#greater than the angle of predicted pos
#and the true pos then add 2pi
return theta
def convert_to_360(angle, x_in, y_in):
""" Convert the given angle to the true angle in the range 0:2pi
Arguments:
angle:
x_in, y_in: the x and y coordinates used to determine the quartile
the coordinate lies in so to add of pi or 2pi
Returns:
theta: the angle in the range 0:2pi
"""
n = len(x_in)
for i in xrange(n):
if x_in[i] < 0 and y_in[i] > 0:
angle[i] = angle[i]+mt.pi
elif x_in[i] < 0 and y_in[i] < 0:
angle[i] = angle[i]+mt.pi
elif x_in[i] > 0 and y_in[i] < 0:
angle[i] = angle[i]+2.0*mt.pi
elif x_in[i] == 0 and y_in[i] == 0:
angle[i] = 0
elif x_in[i] == 0 and y_in[i] > 0:
angle[i] = mt.pi/2.
elif x_in[i] < 0 and y_in[i] == 0:
angle[i] = mt.pi
elif x_in[i] == 0 and y_in[i] < 0:
angle[i] = 3.*mt.pi/2.
return angle
def get_ref(x_halo,y_halo,weight):
""" Gets the reference point of the system of halos by weighted averaging the x and y
coordinates.
Arguments:
x_halo, y_halo: Vector num_halos referring to the coordinates of the halos
weight: the weight which will be assigned to the position of the halo
num_halos: number of halos in the system
Returns:
x_ref, y_ref: The coordinates of the reference point for the metric
"""
#Find the weighted average of the x and y coordinates
x_ref = np.sum([x_halo*weight])/np.sum([weight])
y_ref = np.sum([y_halo*weight])/np.sum([weight])
return x_ref,y_ref
def main_score( nhalo_all, x_true_all, y_true_all, x_ref_all, y_ref_all, sky_prediction):
"""abstracts the score from the old command-line interface.
sky_prediction is a dx2 array of predicted x,y positions
-camdp"""
r=np.array([],dtype=float) # The array which I will log all the calculated radial distances
angle=np.array([],dtype=float) #The array which I will log all the calculated angles
#Load in the sky_ids from the true
num_halos_total=0 #Keep track of how many halos are input into the metric
for selectskyinsolutions, sky in enumerate(sky_prediction): #Loop through each line in result.csv and analyse each one
nhalo=int(nhalo_all[selectskyinsolutions])#How many halos in the
#selected sky?
x_true=x_true_all[selectskyinsolutions][0:nhalo]
y_true=y_true_all[selectskyinsolutions][0:nhalo]
x_predicted=np.array([],dtype=float)
y_predicted=np.array([],dtype=float)
for i in xrange(nhalo):
x_predicted=np.append(x_predicted,float(sky[0])) #get the predicted values
y_predicted=np.append(y_predicted,float(sky[1]))
#The solution file for the test data provides masses
#to calculate the centre of mass where as the Training_halo.csv
#direct provides x_ref y_ref. So in the case of test data
#we need to calculate the ref point from the masses using
#Get_ref()
x_ref=x_ref_all[selectskyinsolutions]
y_ref=y_ref_all[selectskyinsolutions]
num_halos_total=num_halos_total+nhalo
#Single halo case, this needs to be separately calculated since
#x_ref = x_true
if nhalo == 1:
#What is the radial distance between the true and predicted position
r=np.append(r,np.sqrt( (x_predicted-x_true)**2 \
+ (y_predicted-y_true)**2))
#What is the angle between the predicted position and true halo position
if (x_predicted-x_true) != 0:
psi = np.arctan((y_predicted-y_true)/(x_predicted-x_true))
else: psi=0.
theta = convert_to_360([psi], [x_predicted-x_true], [y_predicted-y_true])
angle=np.append(angle,theta)
else:
#r_index_index, contains the radial distances of the predicted to
#true positions. These are found by matching up the true halos to
#the predicted halos such that the average of all the radial distances
#is optimal. it also contains indexes of the halos used which are used to
#show which halo has been mathced to which.
r_index_index = calc_delta_r(x_predicted, y_predicted, x_true, \
y_true)
r=np.append(r,r_index_index[0][0])
halo_index= r_index_index[1] #The true halos indexes matched with the
predicted_index=r_index_index[2] #predicted halo index
angle=np.append(angle,calc_theta\
(x_predicted[predicted_index],\
y_predicted[predicted_index],\
x_true[halo_index],\
y_true[halo_index],x_ref,\
y_ref)) # Find the angles of the predicted
#position wrt to the halo and
# add to the vector angle
# Find what the average distance the estimate is from the halo position
av_r=sum(r)/len(r)
#In order to quantify the orientation invariance we will express each angle
# as a vector and find the average vector
#R_bar^2=(1/N Sum^Ncos(theta))^2+(1/N Sum^Nsin(theta))**2
N = float(num_halos_total)
angle_vec = np.sqrt(( 1.0/N * sum(np.cos(angle)) )**2 + \
( 1.0/N * sum(np.sin(angle)) )**2)
W1=1./1000. #Weight the av_r such that < 1 is a good score > 1 is not so good.
W2=1.
metric = W1*av_r + W2*angle_vec #Weighted metric, weights TBD
print 'Your average distance in pixels you are away from the true halo is', av_r
print 'Your average angular vector is', angle_vec
print 'Your score for the training data is', metric
return metric
def main(user_fname, fname):
""" Script to compute the evaluation metric for the Observing Dark Worlds competition. You can run it on your training data to understand how well you have done with the training data.
"""
r=np.array([],dtype=float) # The array which I will log all the calculated radial distances
angle=np.array([],dtype=float) #The array which I will log all the calculated angles
#Load in the sky_ids from the true
true_sky_id=[]
sky_loader = c.reader(open(fname, 'rb')) #Load in the sky_ids from the solution file
for row in sky_loader:
true_sky_id.append(row[0])
#Load in the true values from the solution file
nhalo_all=np.loadtxt(fname,usecols=(1,),delimiter=',',skiprows=1)
x_true_all=np.loadtxt(fname,usecols=(4,6,8),delimiter=',',skiprows=1)
y_true_all=np.loadtxt(fname,usecols=(5,7,9),delimiter=',',skiprows=1)
x_ref_all=np.loadtxt(fname,usecols=(2,),delimiter=',',skiprows=1)
y_ref_all=np.loadtxt(fname,usecols=(3,),delimiter=',',skiprows=1)
for row in sky_loader:
true_sky_id.append(row[1])
num_halos_total=0 #Keep track of how many halos are input into the metric
sky_prediction = c.reader(open(user_fname, 'rb')) #Open the result.csv
try: #See if the input file from user has a header on it
#with open('JoyceTest/trivialUnitTest_Pred.txt', 'r') as f:
with open(user_fname, 'r') as f:
header = float((f.readline()).split(',')[1]) #try and make where the
#first input would be
#a float, if succeed it
#is not a header
print 'THE INPUT FILE DOES NOT APPEAR TO HAVE A HEADER'
except :
print 'THE INPUT FILE APPEARS TO HAVE A HEADER, SKIPPING THE FIRST LINE'
skip_header = sky_prediction.next()
for sky in sky_prediction: #Loop through each line in result.csv and analyse each one
sky_id = str(sky[0]) #Get the sky_id of the input
does_it_exist=true_sky_id.count(sky_id) #Is the input sky_id
#from user a real one?
if does_it_exist > 0: #If it does then find the matching solutions to the sky_id
selectskyinsolutions=true_sky_id.index(sky_id)-1
else: #Otherwise exit
print 'Sky_id does not exist, formatting problem: ',sky_id
sys.exit(2)
nhalo=int(nhalo_all[selectskyinsolutions])#How many halos in the
#selected sky?
x_true=x_true_all[selectskyinsolutions][0:nhalo]
y_true=y_true_all[selectskyinsolutions][0:nhalo]
x_predicted=np.array([],dtype=float)
y_predicted=np.array([],dtype=float)
for i in xrange(nhalo):
x_predicted=np.append(x_predicted,float(sky[2*i+1])) #get the predicted values
y_predicted=np.append(y_predicted,float(sky[2*i+2]))
#The solution file for the test data provides masses
#to calculate the centre of mass where as the Training_halo.csv
#direct provides x_ref y_ref. So in the case of test data
#we need to calculae the ref point from the masses using
#Get_ref()
x_ref=x_ref_all[selectskyinsolutions]
y_ref=y_ref_all[selectskyinsolutions]
num_halos_total=num_halos_total+nhalo
#Single halo case, this needs to be separately calculated since
#x_ref = x_true
if nhalo == 1:
#What is the radial distance between the true and predicted position
r=np.append(r,np.sqrt( (x_predicted-x_true)**2 \
+ (y_predicted-y_true)**2))
#What is the angle between the predicted position and true halo position
if (x_predicted-x_true) != 0:
psi = np.arctan((y_predicted-y_true)/(x_predicted-x_true))
else: psi=0.
theta = convert_to_360([psi], [x_predicted-x_true], [y_predicted-y_true])
angle=np.append(angle,theta)
else:
#r_index_index, contains the radial distances of the predicted to
#true positions. These are found by matching up the true halos to
#the predicted halos such that the average of all the radial distances
#is optimal. it also contains indexes of the halos used which are used to
#show which halo has been mathced to which.
r_index_index = calc_delta_r(x_predicted, y_predicted, x_true, \
y_true)
r=np.append(r,r_index_index[0][0])
halo_index= r_index_index[1] #The true halos indexes matched with the
predicted_index=r_index_index[2] #predicted halo index
angle=np.append(angle,calc_theta\
(x_predicted[predicted_index],\
y_predicted[predicted_index],\
x_true[halo_index],\
y_true[halo_index],x_ref,\
y_ref)) # Find the angles of the predicted
#position wrt to the halo and
# add to the vector angle
# Find what the average distance the estimate is from the halo position
av_r=sum(r)/len(r)
#In order to quantify the orientation invariance we will express each angle
# as a vector and find the average vector
#R_bar^2=(1/N Sum^Ncos(theta))^2+(1/N Sum^Nsin(theta))**2
N = float(num_halos_total)
angle_vec = np.sqrt(( 1.0/N * sum(np.cos(angle)) )**2 + \
( 1.0/N * sum(np.sin(angle)) )**2)
W1=1./1000. #Weight the av_r such that < 1 is a good score > 1 is not so good.
W2=1.
metric = W1*av_r + W2*angle_vec #Weighted metric, weights TBD
print 'Your average distance in pixels you are away from the true halo is', av_r
print 'Your average angular vector is', angle_vec
print 'Your score for the training data is', metric
if __name__ == "__main__":
#For help just typed 'python DarkWorldsMetric.py -h'
parser = ap.ArgumentParser(description='Work out the Metric for your input file')
parser.add_argument('inputfile',type=str,nargs=1,help='Input file of halo positions. Needs to be in the format SkyId,halo_x1,haloy1,halox_2,halo_y2,halox3,halo_y3 ')
parser.add_argument('reffile',type=str,nargs=1,help='This should point to Training_halos.csv')
args = parser.parse_args()
user_fname=args.inputfile[0]
filename = (args.reffile[0]).count('Training_halos.csv')
if filename == 0:
fname=args.reffile[0]+str('Training_halos.csv')
else:
fname=args.reffile[0]
main(user_fname, fname)
|
|
from .base import TestBase
from nbgrader.api import Gradebook
from nose.tools import assert_equal
import os
import shutil
import datetime
class TestNbgraderAutograde(TestBase):
def _setup_db(self):
dbpath = self._init_db()
gb = Gradebook(dbpath)
gb.add_assignment("ps1", duedate="2015-02-02 14:58:23.948203 PST")
gb.add_student("foo")
gb.add_student("bar")
return dbpath
def test_help(self):
"""Does the help display without error?"""
with self._temp_cwd():
self._run_command("nbgrader autograde --help-all")
def test_missing_student(self):
"""Is an error thrown when the student is missing?"""
with self._temp_cwd(["files/submitted-changed.ipynb"]):
dbpath = self._setup_db()
os.makedirs('source/ps1')
shutil.copy('submitted-changed.ipynb', 'source/ps1/p1.ipynb')
self._run_command('nbgrader assign ps1 --db="{}" '.format(dbpath))
os.makedirs('submitted/baz/ps1')
shutil.move('submitted-changed.ipynb', 'submitted/baz/ps1/p1.ipynb')
self._run_command('nbgrader autograde ps1 --db="{}" '.format(dbpath), retcode=1)
def test_add_missing_student(self):
"""Can a missing student be added?"""
with self._temp_cwd(["files/submitted-changed.ipynb"]):
dbpath = self._setup_db()
os.makedirs('source/ps1')
shutil.copy('submitted-changed.ipynb', 'source/ps1/p1.ipynb')
self._run_command('nbgrader assign ps1 --db="{}" '.format(dbpath))
os.makedirs('submitted/baz/ps1')
shutil.move('submitted-changed.ipynb', 'submitted/baz/ps1/p1.ipynb')
self._run_command('nbgrader autograde ps1 --db="{}" --create'.format(dbpath))
assert os.path.isfile("autograded/baz/ps1/p1.ipynb")
def test_missing_assignment(self):
"""Is an error thrown when the assignment is missing?"""
with self._temp_cwd(["files/submitted-changed.ipynb"]):
dbpath = self._setup_db()
os.makedirs('source/ps1')
shutil.copy('submitted-changed.ipynb', 'source/ps1/p1.ipynb')
self._run_command('nbgrader assign ps1 --db="{}" '.format(dbpath))
os.makedirs('submitted/ps2/foo')
shutil.move('submitted-changed.ipynb', 'submitted/ps2/foo/p1.ipynb')
self._run_command('nbgrader autograde ps2 --db="{}" '.format(dbpath), retcode=1)
def test_grade(self):
"""Can files be graded?"""
with self._temp_cwd(["files/submitted-unchanged.ipynb", "files/submitted-changed.ipynb"]):
dbpath = self._setup_db()
os.makedirs('source/ps1')
shutil.copy('submitted-unchanged.ipynb', 'source/ps1/p1.ipynb')
self._run_command('nbgrader assign ps1 --db="{}" '.format(dbpath))
os.makedirs('submitted/foo/ps1')
shutil.move('submitted-unchanged.ipynb', 'submitted/foo/ps1/p1.ipynb')
os.makedirs('submitted/bar/ps1')
shutil.move('submitted-changed.ipynb', 'submitted/bar/ps1/p1.ipynb')
self._run_command('nbgrader autograde ps1 --db="{}"'.format(dbpath))
assert os.path.isfile("autograded/foo/ps1/p1.ipynb")
assert not os.path.isfile("autograded/foo/ps1/timestamp.txt")
assert os.path.isfile("autograded/bar/ps1/p1.ipynb")
assert not os.path.isfile("autograded/bar/ps1/timestamp.txt")
gb = Gradebook(dbpath)
notebook = gb.find_submission_notebook("p1", "ps1", "foo")
assert_equal(notebook.score, 1)
assert_equal(notebook.max_score, 4)
assert_equal(notebook.needs_manual_grade, False)
comment1 = gb.find_comment(0, "p1", "ps1", "foo")
comment2 = gb.find_comment(1, "p1", "ps1", "foo")
assert_equal(comment1.comment, "No response.")
assert_equal(comment2.comment, "No response.")
notebook = gb.find_submission_notebook("p1", "ps1", "bar")
assert_equal(notebook.score, 2)
assert_equal(notebook.max_score, 4)
assert_equal(notebook.needs_manual_grade, True)
comment1 = gb.find_comment(0, "p1", "ps1", "bar")
comment2 = gb.find_comment(1, "p1", "ps1", "bar")
assert_equal(comment1.comment, None)
assert_equal(comment2.comment, None)
def test_grade_timestamp(self):
"""Is a timestamp correctly read in?"""
with self._temp_cwd(["files/submitted-unchanged.ipynb", "files/submitted-changed.ipynb"]):
dbpath = self._setup_db()
os.makedirs('source/ps1')
shutil.copy('submitted-unchanged.ipynb', 'source/ps1/p1.ipynb')
self._run_command('nbgrader assign ps1 --db="{}" '.format(dbpath))
os.makedirs('submitted/foo/ps1')
shutil.move('submitted-unchanged.ipynb', 'submitted/foo/ps1/p1.ipynb')
with open('submitted/foo/ps1/timestamp.txt', 'w') as fh:
fh.write("2015-02-02 15:58:23.948203 PST")
os.makedirs('submitted/bar/ps1')
shutil.move('submitted-changed.ipynb', 'submitted/bar/ps1/p1.ipynb')
with open('submitted/bar/ps1/timestamp.txt', 'w') as fh:
fh.write("2015-02-01 14:58:23.948203 PST")
self._run_command('nbgrader autograde ps1 --db="{}"'.format(dbpath))
assert os.path.isfile("autograded/foo/ps1/p1.ipynb")
assert os.path.isfile("autograded/foo/ps1/timestamp.txt")
assert os.path.isfile("autograded/bar/ps1/p1.ipynb")
assert os.path.isfile("autograded/bar/ps1/timestamp.txt")
gb = Gradebook(dbpath)
submission = gb.find_submission('ps1', 'foo')
assert submission.total_seconds_late > 0
submission = gb.find_submission('ps1', 'bar')
assert submission.total_seconds_late == 0
# make sure it still works to run it a second time
self._run_command('nbgrader autograde ps1 --db="{}"'.format(dbpath))
def test_force(self):
"""Ensure the force option works properly"""
with self._temp_cwd(["files/submitted-unchanged.ipynb"]):
dbpath = self._setup_db()
os.makedirs('source/ps1/data')
shutil.copy('submitted-unchanged.ipynb', 'source/ps1/p1.ipynb')
with open("source/ps1/foo.txt", "w") as fh:
fh.write("foo")
with open("source/ps1/data/bar.txt", "w") as fh:
fh.write("bar")
self._run_command('nbgrader assign ps1 --db="{}" '.format(dbpath))
os.makedirs('submitted/foo/ps1/data')
shutil.move('submitted-unchanged.ipynb', 'submitted/foo/ps1/p1.ipynb')
with open("submitted/foo/ps1/foo.txt", "w") as fh:
fh.write("foo")
with open("submitted/foo/ps1/data/bar.txt", "w") as fh:
fh.write("bar")
with open("submitted/foo/ps1/blah.pyc", "w") as fh:
fh.write("asdf")
self._run_command('nbgrader autograde ps1 --db="{}"'.format(dbpath))
assert os.path.isfile("autograded/foo/ps1/p1.ipynb")
assert os.path.isfile("autograded/foo/ps1/foo.txt")
assert os.path.isfile("autograded/foo/ps1/data/bar.txt")
assert not os.path.isfile("autograded/foo/ps1/blah.pyc")
# check that it skips the existing directory
os.remove("autograded/foo/ps1/foo.txt")
self._run_command('nbgrader autograde ps1 --db="{}"'.format(dbpath))
assert not os.path.isfile("autograded/foo/ps1/foo.txt")
# force overwrite the supplemental files
self._run_command('nbgrader autograde ps1 --db="{}" --force'.format(dbpath))
assert os.path.isfile("autograded/foo/ps1/foo.txt")
# force overwrite
os.remove("submitted/foo/ps1/foo.txt")
self._run_command('nbgrader autograde ps1 --db="{}" --force'.format(dbpath))
assert os.path.isfile("autograded/foo/ps1/p1.ipynb")
assert not os.path.isfile("autograded/foo/ps1/foo.txt")
assert os.path.isfile("autograded/foo/ps1/data/bar.txt")
assert not os.path.isfile("autograded/foo/ps1/blah.pyc")
def test_filter_notebook(self):
"""Does autograding filter by notebook properly?"""
with self._temp_cwd(["files/submitted-unchanged.ipynb"]):
dbpath = self._setup_db()
os.makedirs('source/ps1/data')
shutil.copy('submitted-unchanged.ipynb', 'source/ps1/p1.ipynb')
with open("source/ps1/foo.txt", "w") as fh:
fh.write("foo")
with open("source/ps1/data/bar.txt", "w") as fh:
fh.write("bar")
self._run_command('nbgrader assign ps1 --db="{}" '.format(dbpath))
os.makedirs('submitted/foo/ps1/data')
shutil.move('submitted-unchanged.ipynb', 'submitted/foo/ps1/p1.ipynb')
with open("submitted/foo/ps1/foo.txt", "w") as fh:
fh.write("foo")
with open("submitted/foo/ps1/data/bar.txt", "w") as fh:
fh.write("bar")
with open("submitted/foo/ps1/blah.pyc", "w") as fh:
fh.write("asdf")
self._run_command('nbgrader autograde ps1 --db="{}" --notebook "p1"'.format(dbpath))
assert os.path.isfile("autograded/foo/ps1/p1.ipynb")
assert os.path.isfile("autograded/foo/ps1/foo.txt")
assert os.path.isfile("autograded/foo/ps1/data/bar.txt")
assert not os.path.isfile("autograded/foo/ps1/blah.pyc")
# check that removing the notebook still causes the autograder to run
os.remove("autograded/foo/ps1/p1.ipynb")
os.remove("autograded/foo/ps1/foo.txt")
self._run_command('nbgrader autograde ps1 --db="{}" --notebook "p1"'.format(dbpath))
assert os.path.isfile("autograded/foo/ps1/p1.ipynb")
assert os.path.isfile("autograded/foo/ps1/foo.txt")
assert os.path.isfile("autograded/foo/ps1/data/bar.txt")
assert not os.path.isfile("autograded/foo/ps1/blah.pyc")
# check that running it again doesn't do anything
os.remove("autograded/foo/ps1/foo.txt")
self._run_command('nbgrader autograde ps1 --db="{}" --notebook "p1"'.format(dbpath))
assert os.path.isfile("autograded/foo/ps1/p1.ipynb")
assert not os.path.isfile("autograded/foo/ps1/foo.txt")
assert os.path.isfile("autograded/foo/ps1/data/bar.txt")
assert not os.path.isfile("autograded/foo/ps1/blah.pyc")
# check that removing the notebook doesn't caus the autograder to run
os.remove("autograded/foo/ps1/p1.ipynb")
self._run_command('nbgrader autograde ps1 --db="{}"'.format(dbpath))
assert not os.path.isfile("autograded/foo/ps1/p1.ipynb")
assert not os.path.isfile("autograded/foo/ps1/foo.txt")
assert os.path.isfile("autograded/foo/ps1/data/bar.txt")
assert not os.path.isfile("autograded/foo/ps1/blah.pyc")
|
|
import distutils
import glob
import os
import sys
import textwrap
from os.path import curdir, join, pardir
import pytest
from pip._internal import pep425tags
from pip._internal.status_codes import ERROR
from pip._internal.utils.misc import rmtree
from tests.lib import (
_create_svn_repo, _create_test_package, create_test_package_with_setup,
need_bzr, need_mercurial, path_to_url, pyversion, pyversion_tuple,
requirements_file,
)
from tests.lib.local_repos import local_checkout
from tests.lib.path import Path
@pytest.mark.parametrize('command', ('install', 'wheel'))
@pytest.mark.parametrize('variant', ('missing_setuptools', 'bad_setuptools'))
def test_pep518_uses_build_env(script, data, common_wheels, command, variant):
if variant == 'missing_setuptools':
script.pip("uninstall", "-y", "setuptools")
elif variant == 'bad_setuptools':
setuptools_init_path = script.site_packages_path.join(
"setuptools", "__init__.py")
with open(setuptools_init_path, 'a') as f:
f.write('\nraise ImportError("toto")')
else:
raise ValueError(variant)
script.pip(
command, '--no-index', '-f', common_wheels, '-f', data.packages,
data.src.join("pep518-3.0"), use_module=True
)
def test_pep518_with_user_pip(script, virtualenv, pip_src,
data, common_wheels):
virtualenv.system_site_packages = True
script.pip("install", "--ignore-installed", "--user", pip_src,
use_module=True)
system_pip_dir = script.site_packages_path / 'pip'
system_pip_dir.rmtree()
system_pip_dir.mkdir()
with open(system_pip_dir / '__init__.py', 'w') as fp:
fp.write('raise ImportError\n')
script.pip(
'wheel', '--no-index', '-f', common_wheels, '-f', data.packages,
data.src.join("pep518-3.0"), use_module=True,
)
def test_pep518_with_extra_and_markers(script, data, common_wheels):
script.pip(
'wheel', '--no-index',
'-f', common_wheels,
'-f', data.find_links,
# Add tests/data/packages4, which contains a wheel for
# simple==1.0 (needed by requires_simple_extra[extra]).
'-f', data.find_links4,
data.src.join("pep518_with_extra_and_markers-1.0"),
use_module=True,
)
@pytest.mark.network
def test_pip_second_command_line_interface_works(script, data):
"""
Check if ``pip<PYVERSION>`` commands behaves equally
"""
# On old versions of Python, urllib3/requests will raise a warning about
# the lack of an SSLContext.
kwargs = {}
if pyversion_tuple < (2, 7, 9):
kwargs['expect_stderr'] = True
args = ['pip%s' % pyversion]
args.extend(['install', 'INITools==0.2'])
args.extend(['-f', data.packages])
result = script.run(*args, **kwargs)
egg_info_folder = (
script.site_packages / 'INITools-0.2-py%s.egg-info' % pyversion
)
initools_folder = script.site_packages / 'initools'
assert egg_info_folder in result.files_created, str(result)
assert initools_folder in result.files_created, str(result)
def test_install_exit_status_code_when_no_requirements(script):
"""
Test install exit status code when no requirements specified
"""
result = script.pip('install', expect_error=True)
assert "You must give at least one requirement to install" in result.stderr
assert result.returncode == ERROR
def test_install_exit_status_code_when_blank_requirements_file(script):
"""
Test install exit status code when blank requirements file specified
"""
script.scratch_path.join("blank.txt").write("\n")
script.pip('install', '-r', 'blank.txt')
@pytest.mark.network
def test_basic_install_from_pypi(script):
"""
Test installing a package from PyPI.
"""
result = script.pip('install', '-vvv', 'INITools==0.2')
egg_info_folder = (
script.site_packages / 'INITools-0.2-py%s.egg-info' % pyversion
)
initools_folder = script.site_packages / 'initools'
assert egg_info_folder in result.files_created, str(result)
assert initools_folder in result.files_created, str(result)
# Should not display where it's looking for files
assert "Looking in indexes: " not in result.stdout
assert "Looking in links: " not in result.stdout
def test_basic_editable_install(script):
"""
Test editable installation.
"""
result = script.pip('install', '-e', 'INITools==0.2', expect_error=True)
assert (
"INITools==0.2 should either be a path to a local project or a VCS url"
in result.stderr
)
assert not result.files_created
assert not result.files_updated
@pytest.mark.svn
def test_basic_install_editable_from_svn(script):
"""
Test checking out from svn.
"""
checkout_path = _create_test_package(script)
repo_url = _create_svn_repo(script, checkout_path)
result = script.pip(
'install',
'-e', 'svn+' + repo_url + '#egg=version-pkg'
)
result.assert_installed('version-pkg', with_files=['.svn'])
def _test_install_editable_from_git(script, tmpdir):
"""Test cloning from Git."""
pkg_path = _create_test_package(script, name='testpackage', vcs='git')
args = ['install', '-e', 'git+%s#egg=testpackage' % path_to_url(pkg_path)]
result = script.pip(*args, **{"expect_error": True})
result.assert_installed('testpackage', with_files=['.git'])
def test_basic_install_editable_from_git(script, tmpdir):
_test_install_editable_from_git(script, tmpdir)
@pytest.mark.network
def test_install_editable_from_git_autobuild_wheel(
script, tmpdir, common_wheels):
script.pip('install', 'wheel', '--no-index', '-f', common_wheels)
_test_install_editable_from_git(script, tmpdir)
@pytest.mark.network
def test_install_editable_uninstalls_existing(data, script, tmpdir):
"""
Test that installing an editable uninstalls a previously installed
non-editable version.
https://github.com/pypa/pip/issues/1548
https://github.com/pypa/pip/pull/1552
"""
to_install = data.packages.join("pip-test-package-0.1.tar.gz")
result = script.pip_install_local(to_install)
assert 'Successfully installed pip-test-package' in result.stdout
result.assert_installed('piptestpackage', editable=False)
result = script.pip(
'install', '-e',
'%s#egg=pip-test-package' %
local_checkout(
'git+https://github.com/pypa/pip-test-package.git',
tmpdir.join("cache"),
),
)
result.assert_installed('pip-test-package', with_files=['.git'])
assert 'Found existing installation: pip-test-package 0.1' in result.stdout
assert 'Uninstalling pip-test-package-' in result.stdout
assert 'Successfully uninstalled pip-test-package' in result.stdout
def test_install_editable_uninstalls_existing_from_path(script, data):
"""
Test that installing an editable uninstalls a previously installed
non-editable version from path
"""
to_install = data.src.join('simplewheel-1.0')
result = script.pip_install_local(to_install)
assert 'Successfully installed simplewheel' in result.stdout
simple_folder = script.site_packages / 'simplewheel'
result.assert_installed('simplewheel', editable=False)
assert simple_folder in result.files_created, str(result.stdout)
result = script.pip(
'install', '-e',
to_install,
)
install_path = script.site_packages / 'simplewheel.egg-link'
assert install_path in result.files_created, str(result)
assert 'Found existing installation: simplewheel 1.0' in result.stdout
assert 'Uninstalling simplewheel-' in result.stdout
assert 'Successfully uninstalled simplewheel' in result.stdout
assert simple_folder in result.files_deleted, str(result.stdout)
@need_mercurial
def test_basic_install_editable_from_hg(script, tmpdir):
"""Test cloning from Mercurial."""
pkg_path = _create_test_package(script, name='testpackage', vcs='hg')
args = ['install', '-e', 'hg+%s#egg=testpackage' % path_to_url(pkg_path)]
result = script.pip(*args, **{"expect_error": True})
result.assert_installed('testpackage', with_files=['.hg'])
@need_mercurial
def test_vcs_url_final_slash_normalization(script, tmpdir):
"""
Test that presence or absence of final slash in VCS URL is normalized.
"""
pkg_path = _create_test_package(script, name='testpackage', vcs='hg')
args = ['install', '-e', 'hg+%s/#egg=testpackage' % path_to_url(pkg_path)]
result = script.pip(*args, **{"expect_error": True})
result.assert_installed('testpackage', with_files=['.hg'])
@need_bzr
def test_install_editable_from_bazaar(script, tmpdir):
"""Test checking out from Bazaar."""
pkg_path = _create_test_package(script, name='testpackage', vcs='bazaar')
args = ['install', '-e', 'bzr+%s/#egg=testpackage' % path_to_url(pkg_path)]
result = script.pip(*args, **{"expect_error": True})
result.assert_installed('testpackage', with_files=['.bzr'])
@pytest.mark.network
@need_bzr
def test_vcs_url_urlquote_normalization(script, tmpdir):
"""
Test that urlquoted characters are normalized for repo URL comparison.
"""
script.pip(
'install', '-e',
'%s/#egg=django-wikiapp' %
local_checkout(
'bzr+http://bazaar.launchpad.net/%7Edjango-wikiapp/django-wikiapp'
'/release-0.1',
tmpdir.join("cache"),
),
)
def test_basic_install_from_local_directory(script, data):
"""
Test installing from a local directory.
"""
to_install = data.packages.join("FSPkg")
result = script.pip('install', to_install, expect_error=False)
fspkg_folder = script.site_packages / 'fspkg'
egg_info_folder = (
script.site_packages / 'FSPkg-0.1.dev0-py%s.egg-info' % pyversion
)
assert fspkg_folder in result.files_created, str(result.stdout)
assert egg_info_folder in result.files_created, str(result)
def test_basic_install_relative_directory(script, data):
"""
Test installing a requirement using a relative path.
"""
egg_info_file = (
script.site_packages / 'FSPkg-0.1.dev0-py%s.egg-info' % pyversion
)
egg_link_file = (
script.site_packages / 'FSPkg.egg-link'
)
package_folder = script.site_packages / 'fspkg'
# Compute relative install path to FSPkg from scratch path.
full_rel_path = data.packages.join('FSPkg') - script.scratch_path
full_rel_url = (
'file:' + full_rel_path.replace(os.path.sep, '/') + '#egg=FSPkg'
)
embedded_rel_path = script.scratch_path.join(full_rel_path)
# For each relative path, install as either editable or not using either
# URLs with egg links or not.
for req_path in (full_rel_path, full_rel_url, embedded_rel_path):
# Regular install.
result = script.pip('install', req_path,
cwd=script.scratch_path)
assert egg_info_file in result.files_created, str(result)
assert package_folder in result.files_created, str(result)
script.pip('uninstall', '-y', 'fspkg')
# Editable install.
result = script.pip('install', '-e' + req_path,
cwd=script.scratch_path)
assert egg_link_file in result.files_created, str(result)
script.pip('uninstall', '-y', 'fspkg')
def test_install_quiet(script, data):
"""
Test that install -q is actually quiet.
"""
# Apparently if pip install -q is not actually quiet, then it breaks
# everything. See:
# https://github.com/pypa/pip/issues/3418
# https://github.com/docker-library/python/issues/83
to_install = data.packages.join("FSPkg")
result = script.pip('install', '-qqq', to_install, expect_error=False)
assert result.stdout == ""
assert result.stderr == ""
def test_hashed_install_success(script, data, tmpdir):
"""
Test that installing various sorts of requirements with correct hashes
works.
Test file URLs and index packages (which become HTTP URLs behind the
scenes).
"""
file_url = path_to_url(
(data.packages / 'simple-1.0.tar.gz').abspath)
with requirements_file(
'simple2==1.0 --hash=sha256:9336af72ca661e6336eb87bc7de3e8844d853e'
'3848c2b9bbd2e8bf01db88c2c7\n'
'{simple} --hash=sha256:393043e672415891885c9a2a0929b1af95fb866d6c'
'a016b42d2e6ce53619b653'.format(simple=file_url),
tmpdir) as reqs_file:
script.pip_install_local('-r', reqs_file.abspath, expect_error=False)
def test_hashed_install_failure(script, tmpdir):
"""Test that wrong hashes stop installation.
This makes sure prepare_files() is called in the course of installation
and so has the opportunity to halt if hashes are wrong. Checks on various
kinds of hashes are in test_req.py.
"""
with requirements_file('simple2==1.0 --hash=sha256:9336af72ca661e6336eb87b'
'c7de3e8844d853e3848c2b9bbd2e8bf01db88c2c\n',
tmpdir) as reqs_file:
result = script.pip_install_local('-r',
reqs_file.abspath,
expect_error=True)
assert len(result.files_created) == 0
def test_install_from_local_directory_with_symlinks_to_directories(
script, data):
"""
Test installing from a local directory containing symlinks to directories.
"""
to_install = data.packages.join("symlinks")
result = script.pip('install', to_install, expect_error=False)
pkg_folder = script.site_packages / 'symlinks'
egg_info_folder = (
script.site_packages / 'symlinks-0.1.dev0-py%s.egg-info' % pyversion
)
assert pkg_folder in result.files_created, str(result.stdout)
assert egg_info_folder in result.files_created, str(result)
def test_install_from_local_directory_with_no_setup_py(script, data):
"""
Test installing from a local directory with no 'setup.py'.
"""
result = script.pip('install', data.root, expect_error=True)
assert not result.files_created
assert "is not installable. File 'setup.py' not found." in result.stderr
def test_editable_install_from_local_directory_with_no_setup_py(script, data):
"""
Test installing from a local directory with no 'setup.py'.
"""
result = script.pip('install', '-e', data.root, expect_error=True)
assert not result.files_created
assert "is not installable. File 'setup.py' not found." in result.stderr
@pytest.mark.skipif("sys.version_info >= (3,4)")
@pytest.mark.xfail
def test_install_argparse_shadowed(script):
# When argparse is in the stdlib, we support installing it
# even though that's pretty useless because older packages did need to
# depend on it, and not having its metadata will cause pkg_resources
# requirements checks to fail // trigger easy-install, both of which are
# bad.
# XXX: Note, this test hits the outside-environment check, not the
# in-stdlib check, because our tests run in virtualenvs...
result = script.pip('install', 'argparse>=1.4')
assert "Not uninstalling argparse" in result.stdout
@pytest.mark.skipif("sys.version_info < (3,4)")
def test_upgrade_argparse_shadowed(script):
# If argparse is installed - even if shadowed for imported - we support
# upgrading it and properly remove the older versions files.
script.pip('install', 'argparse==1.3')
result = script.pip('install', 'argparse>=1.4')
assert "Not uninstalling argparse" not in result.stdout
def test_install_curdir(script, data):
"""
Test installing current directory ('.').
"""
run_from = data.packages.join("FSPkg")
# Python 2.4 Windows balks if this exists already
egg_info = join(run_from, "FSPkg.egg-info")
if os.path.isdir(egg_info):
rmtree(egg_info)
result = script.pip('install', curdir, cwd=run_from, expect_error=False)
fspkg_folder = script.site_packages / 'fspkg'
egg_info_folder = (
script.site_packages / 'FSPkg-0.1.dev0-py%s.egg-info' % pyversion
)
assert fspkg_folder in result.files_created, str(result.stdout)
assert egg_info_folder in result.files_created, str(result)
def test_install_pardir(script, data):
"""
Test installing parent directory ('..').
"""
run_from = data.packages.join("FSPkg", "fspkg")
result = script.pip('install', pardir, cwd=run_from, expect_error=False)
fspkg_folder = script.site_packages / 'fspkg'
egg_info_folder = (
script.site_packages / 'FSPkg-0.1.dev0-py%s.egg-info' % pyversion
)
assert fspkg_folder in result.files_created, str(result.stdout)
assert egg_info_folder in result.files_created, str(result)
@pytest.mark.network
def test_install_global_option(script):
"""
Test using global distutils options.
(In particular those that disable the actual install action)
"""
result = script.pip(
'install', '--global-option=--version', "INITools==0.1",
expect_stderr=True)
assert '0.1\n' in result.stdout
def test_install_with_pax_header(script, data):
"""
test installing from a tarball with pax header for python<2.6
"""
script.pip('install', 'paxpkg.tar.bz2', cwd=data.packages)
def test_install_with_hacked_egg_info(script, data):
"""
test installing a package which defines its own egg_info class
"""
run_from = data.packages.join("HackedEggInfo")
result = script.pip('install', '.', cwd=run_from)
assert 'Successfully installed hackedegginfo-0.0.0\n' in result.stdout
@pytest.mark.network
def test_install_using_install_option_and_editable(script, tmpdir):
"""
Test installing a tool using -e and --install-option
"""
folder = 'script_folder'
script.scratch_path.join(folder).mkdir()
url = 'git+git://github.com/pypa/pip-test-package'
result = script.pip(
'install', '-e', '%s#egg=pip-test-package' %
local_checkout(url, tmpdir.join("cache")),
'--install-option=--script-dir=%s' % folder,
expect_stderr=True)
script_file = (
script.venv / 'src' / 'pip-test-package' /
folder / 'pip-test-package' + script.exe
)
assert script_file in result.files_created
@pytest.mark.network
@need_mercurial
def test_install_global_option_using_editable(script, tmpdir):
"""
Test using global distutils options, but in an editable installation
"""
url = 'hg+http://bitbucket.org/runeh/anyjson'
result = script.pip(
'install', '--global-option=--version', '-e',
'%s@0.2.5#egg=anyjson' % local_checkout(url, tmpdir.join("cache")),
expect_stderr=True)
assert 'Successfully installed anyjson' in result.stdout
@pytest.mark.network
def test_install_package_with_same_name_in_curdir(script):
"""
Test installing a package with the same name of a local folder
"""
script.scratch_path.join("mock==0.6").mkdir()
result = script.pip('install', 'mock==0.6')
egg_folder = script.site_packages / 'mock-0.6.0-py%s.egg-info' % pyversion
assert egg_folder in result.files_created, str(result)
mock100_setup_py = textwrap.dedent('''\
from setuptools import setup
setup(name='mock',
version='100.1')''')
def test_install_folder_using_dot_slash(script):
"""
Test installing a folder using pip install ./foldername
"""
script.scratch_path.join("mock").mkdir()
pkg_path = script.scratch_path / 'mock'
pkg_path.join("setup.py").write(mock100_setup_py)
result = script.pip('install', './mock')
egg_folder = script.site_packages / 'mock-100.1-py%s.egg-info' % pyversion
assert egg_folder in result.files_created, str(result)
def test_install_folder_using_slash_in_the_end(script):
r"""
Test installing a folder using pip install foldername/ or foldername\
"""
script.scratch_path.join("mock").mkdir()
pkg_path = script.scratch_path / 'mock'
pkg_path.join("setup.py").write(mock100_setup_py)
result = script.pip('install', 'mock' + os.path.sep)
egg_folder = script.site_packages / 'mock-100.1-py%s.egg-info' % pyversion
assert egg_folder in result.files_created, str(result)
def test_install_folder_using_relative_path(script):
"""
Test installing a folder using pip install folder1/folder2
"""
script.scratch_path.join("initools").mkdir()
script.scratch_path.join("initools", "mock").mkdir()
pkg_path = script.scratch_path / 'initools' / 'mock'
pkg_path.join("setup.py").write(mock100_setup_py)
result = script.pip('install', Path('initools') / 'mock')
egg_folder = script.site_packages / 'mock-100.1-py%s.egg-info' % pyversion
assert egg_folder in result.files_created, str(result)
@pytest.mark.network
def test_install_package_which_contains_dev_in_name(script):
"""
Test installing package from pypi which contains 'dev' in name
"""
result = script.pip('install', 'django-devserver==0.0.4')
devserver_folder = script.site_packages / 'devserver'
egg_info_folder = (
script.site_packages / 'django_devserver-0.0.4-py%s.egg-info' %
pyversion
)
assert devserver_folder in result.files_created, str(result.stdout)
assert egg_info_folder in result.files_created, str(result)
def test_install_package_with_target(script):
"""
Test installing a package using pip install --target
"""
target_dir = script.scratch_path / 'target'
result = script.pip_install_local('-t', target_dir, "simple==1.0")
assert Path('scratch') / 'target' / 'simple' in result.files_created, (
str(result)
)
# Test repeated call without --upgrade, no files should have changed
result = script.pip_install_local(
'-t', target_dir, "simple==1.0", expect_stderr=True,
)
assert not Path('scratch') / 'target' / 'simple' in result.files_updated
# Test upgrade call, check that new version is installed
result = script.pip_install_local('--upgrade', '-t',
target_dir, "simple==2.0")
assert Path('scratch') / 'target' / 'simple' in result.files_updated, (
str(result)
)
egg_folder = (
Path('scratch') / 'target' / 'simple-2.0-py%s.egg-info' % pyversion)
assert egg_folder in result.files_created, (
str(result)
)
# Test install and upgrade of single-module package
result = script.pip_install_local('-t', target_dir, 'singlemodule==0.0.0')
singlemodule_py = Path('scratch') / 'target' / 'singlemodule.py'
assert singlemodule_py in result.files_created, str(result)
result = script.pip_install_local('-t', target_dir, 'singlemodule==0.0.1',
'--upgrade')
assert singlemodule_py in result.files_updated, str(result)
def test_install_with_target_and_scripts_no_warning(script, common_wheels):
"""
Test that installing with --target does not trigger the "script not
in PATH" warning (issue #5201)
"""
# We need to have wheel installed so that the project builds via a wheel,
# which is the only execution path that has the script warning.
script.pip('install', 'wheel', '--no-index', '-f', common_wheels)
target_dir = script.scratch_path / 'target'
pkga_path = script.scratch_path / 'pkga'
pkga_path.mkdir()
pkga_path.join("setup.py").write(textwrap.dedent("""
from setuptools import setup
setup(name='pkga',
version='0.1',
py_modules=["pkga"],
entry_points={
'console_scripts': ['pkga=pkga:main']
}
)
"""))
pkga_path.join("pkga.py").write(textwrap.dedent("""
def main(): pass
"""))
result = script.pip('install', '--target', target_dir, pkga_path)
# This assertion isn't actually needed, if we get the script warning
# the script.pip() call will fail with "stderr not expected". But we
# leave the assertion to make the intention of the code clearer.
assert "--no-warn-script-location" not in result.stderr, str(result)
def test_install_package_with_root(script, data):
"""
Test installing a package using pip install --root
"""
root_dir = script.scratch_path / 'root'
result = script.pip(
'install', '--root', root_dir, '-f', data.find_links, '--no-index',
'simple==1.0',
)
normal_install_path = (
script.base_path / script.site_packages / 'simple-1.0-py%s.egg-info' %
pyversion
)
# use distutils to change the root exactly how the --root option does it
from distutils.util import change_root
root_path = change_root(
os.path.join(script.scratch, 'root'),
normal_install_path
)
assert root_path in result.files_created, str(result)
# Should show find-links location in output
assert "Looking in indexes: " not in result.stdout
assert "Looking in links: " in result.stdout
def test_install_package_with_prefix(script, data):
"""
Test installing a package using pip install --prefix
"""
prefix_path = script.scratch_path / 'prefix'
result = script.pip(
'install', '--prefix', prefix_path, '-f', data.find_links,
'--no-binary', 'simple', '--no-index', 'simple==1.0',
)
rel_prefix_path = script.scratch / 'prefix'
install_path = (
distutils.sysconfig.get_python_lib(prefix=rel_prefix_path) /
'simple-1.0-py{}.egg-info'.format(pyversion)
)
assert install_path in result.files_created, str(result)
def test_install_editable_with_prefix(script):
# make a dummy project
pkga_path = script.scratch_path / 'pkga'
pkga_path.mkdir()
pkga_path.join("setup.py").write(textwrap.dedent("""
from setuptools import setup
setup(name='pkga',
version='0.1')
"""))
if hasattr(sys, "pypy_version_info"):
site_packages = os.path.join(
'prefix', 'lib', 'python{}'.format(pyversion), 'site-packages')
else:
site_packages = distutils.sysconfig.get_python_lib(prefix='prefix')
# make sure target path is in PYTHONPATH
pythonpath = script.scratch_path / site_packages
pythonpath.makedirs()
script.environ["PYTHONPATH"] = pythonpath
# install pkga package into the absolute prefix directory
prefix_path = script.scratch_path / 'prefix'
result = script.pip(
'install', '--editable', pkga_path, '--prefix', prefix_path)
# assert pkga is installed at correct location
install_path = script.scratch / site_packages / 'pkga.egg-link'
assert install_path in result.files_created, str(result)
def test_install_package_conflict_prefix_and_user(script, data):
"""
Test installing a package using pip install --prefix --user errors out
"""
prefix_path = script.scratch_path / 'prefix'
result = script.pip(
'install', '-f', data.find_links, '--no-index', '--user',
'--prefix', prefix_path, 'simple==1.0',
expect_error=True, quiet=True,
)
assert (
"Can not combine '--user' and '--prefix'" in result.stderr
)
# skip on win/py3 for now, see issue #782
@pytest.mark.skipif("sys.platform == 'win32' and sys.version_info >= (3,)")
def test_install_package_that_emits_unicode(script, data):
"""
Install a package with a setup.py that emits UTF-8 output and then fails.
Refs https://github.com/pypa/pip/issues/326
"""
to_install = data.packages.join("BrokenEmitsUTF8")
result = script.pip(
'install', to_install, expect_error=True, expect_temp=True, quiet=True,
)
assert (
'FakeError: this package designed to fail on install' in result.stdout
)
assert 'UnicodeDecodeError' not in result.stdout
def test_install_package_with_utf8_setup(script, data):
"""Install a package with a setup.py that declares a utf-8 encoding."""
to_install = data.packages.join("SetupPyUTF8")
script.pip('install', to_install)
def test_install_package_with_latin1_setup(script, data):
"""Install a package with a setup.py that declares a latin-1 encoding."""
to_install = data.packages.join("SetupPyLatin1")
script.pip('install', to_install)
def test_url_req_case_mismatch_no_index(script, data):
"""
tar ball url requirements (with no egg fragment), that happen to have upper
case project names, should be considered equal to later requirements that
reference the project name using lower case.
tests/data/packages contains Upper-1.0.tar.gz and Upper-2.0.tar.gz
'requiresupper' has install_requires = ['upper']
"""
Upper = '/'.join((data.find_links, 'Upper-1.0.tar.gz'))
result = script.pip(
'install', '--no-index', '-f', data.find_links, Upper, 'requiresupper'
)
# only Upper-1.0.tar.gz should get installed.
egg_folder = script.site_packages / 'Upper-1.0-py%s.egg-info' % pyversion
assert egg_folder in result.files_created, str(result)
egg_folder = script.site_packages / 'Upper-2.0-py%s.egg-info' % pyversion
assert egg_folder not in result.files_created, str(result)
def test_url_req_case_mismatch_file_index(script, data):
"""
tar ball url requirements (with no egg fragment), that happen to have upper
case project names, should be considered equal to later requirements that
reference the project name using lower case.
tests/data/packages3 contains Dinner-1.0.tar.gz and Dinner-2.0.tar.gz
'requiredinner' has install_requires = ['dinner']
This test is similar to test_url_req_case_mismatch_no_index; that test
tests behaviour when using "--no-index -f", while this one does the same
test when using "--index-url". Unfortunately this requires a different
set of packages as it requires a prepared index.html file and
subdirectory-per-package structure.
"""
Dinner = '/'.join((data.find_links3, 'dinner', 'Dinner-1.0.tar.gz'))
result = script.pip(
'install', '--index-url', data.find_links3, Dinner, 'requiredinner'
)
# only Upper-1.0.tar.gz should get installed.
egg_folder = script.site_packages / 'Dinner-1.0-py%s.egg-info' % pyversion
assert egg_folder in result.files_created, str(result)
egg_folder = script.site_packages / 'Dinner-2.0-py%s.egg-info' % pyversion
assert egg_folder not in result.files_created, str(result)
def test_url_incorrect_case_no_index(script, data):
"""
Same as test_url_req_case_mismatch_no_index, except testing for the case
where the incorrect case is given in the name of the package to install
rather than in a requirements file.
"""
result = script.pip(
'install', '--no-index', '-f', data.find_links, "upper",
)
# only Upper-2.0.tar.gz should get installed.
egg_folder = script.site_packages / 'Upper-1.0-py%s.egg-info' % pyversion
assert egg_folder not in result.files_created, str(result)
egg_folder = script.site_packages / 'Upper-2.0-py%s.egg-info' % pyversion
assert egg_folder in result.files_created, str(result)
def test_url_incorrect_case_file_index(script, data):
"""
Same as test_url_req_case_mismatch_file_index, except testing for the case
where the incorrect case is given in the name of the package to install
rather than in a requirements file.
"""
result = script.pip(
'install', '--index-url', data.find_links3, "dinner",
expect_stderr=True,
)
# only Upper-2.0.tar.gz should get installed.
egg_folder = script.site_packages / 'Dinner-1.0-py%s.egg-info' % pyversion
assert egg_folder not in result.files_created, str(result)
egg_folder = script.site_packages / 'Dinner-2.0-py%s.egg-info' % pyversion
assert egg_folder in result.files_created, str(result)
# Should show index-url location in output
assert "Looking in indexes: " in result.stdout
assert "Looking in links: " not in result.stdout
@pytest.mark.network
def test_compiles_pyc(script):
"""
Test installing with --compile on
"""
del script.environ["PYTHONDONTWRITEBYTECODE"]
script.pip("install", "--compile", "--no-binary=:all:", "INITools==0.2")
# There are many locations for the __init__.pyc file so attempt to find
# any of them
exists = [
os.path.exists(script.site_packages_path / "initools/__init__.pyc"),
]
exists += glob.glob(
script.site_packages_path / "initools/__pycache__/__init__*.pyc"
)
assert any(exists)
@pytest.mark.network
def test_no_compiles_pyc(script):
"""
Test installing from wheel with --compile on
"""
del script.environ["PYTHONDONTWRITEBYTECODE"]
script.pip("install", "--no-compile", "--no-binary=:all:", "INITools==0.2")
# There are many locations for the __init__.pyc file so attempt to find
# any of them
exists = [
os.path.exists(script.site_packages_path / "initools/__init__.pyc"),
]
exists += glob.glob(
script.site_packages_path / "initools/__pycache__/__init__*.pyc"
)
assert not any(exists)
def test_install_upgrade_editable_depending_on_other_editable(script):
script.scratch_path.join("pkga").mkdir()
pkga_path = script.scratch_path / 'pkga'
pkga_path.join("setup.py").write(textwrap.dedent("""
from setuptools import setup
setup(name='pkga',
version='0.1')
"""))
script.pip('install', '--editable', pkga_path)
result = script.pip('list', '--format=freeze')
assert "pkga==0.1" in result.stdout
script.scratch_path.join("pkgb").mkdir()
pkgb_path = script.scratch_path / 'pkgb'
pkgb_path.join("setup.py").write(textwrap.dedent("""
from setuptools import setup
setup(name='pkgb',
version='0.1',
install_requires=['pkga'])
"""))
script.pip('install', '--upgrade', '--editable', pkgb_path, '--no-index')
result = script.pip('list', '--format=freeze')
assert "pkgb==0.1" in result.stdout
def test_install_subprocess_output_handling(script, data):
args = ['install', data.src.join('chattymodule')]
# Regular install should not show output from the chatty setup.py
result = script.pip(*args)
assert 0 == result.stdout.count("HELLO FROM CHATTYMODULE")
script.pip("uninstall", "-y", "chattymodule")
# With --verbose we should show the output.
# Only count examples with sys.argv[1] == egg_info, because we call
# setup.py multiple times, which should not count as duplicate output.
result = script.pip(*(args + ["--verbose"]))
assert 1 == result.stdout.count("HELLO FROM CHATTYMODULE egg_info")
script.pip("uninstall", "-y", "chattymodule")
# If the install fails, then we *should* show the output... but only once,
# even if --verbose is given.
result = script.pip(*(args + ["--global-option=--fail"]),
expect_error=True)
assert 1 == result.stdout.count("I DIE, I DIE")
result = script.pip(*(args + ["--global-option=--fail", "--verbose"]),
expect_error=True)
assert 1 == result.stdout.count("I DIE, I DIE")
def test_install_log(script, data, tmpdir):
# test that verbose logs go to "--log" file
f = tmpdir.join("log.txt")
args = ['--log=%s' % f,
'install', data.src.join('chattymodule')]
result = script.pip(*args)
assert 0 == result.stdout.count("HELLO FROM CHATTYMODULE")
with open(f, 'r') as fp:
# one from egg_info, one from install
assert 2 == fp.read().count("HELLO FROM CHATTYMODULE")
def test_install_topological_sort(script, data):
args = ['install', 'TopoRequires4', '--no-index', '-f', data.packages]
res = str(script.pip(*args, expect_error=False))
order1 = 'TopoRequires, TopoRequires2, TopoRequires3, TopoRequires4'
order2 = 'TopoRequires, TopoRequires3, TopoRequires2, TopoRequires4'
assert order1 in res or order2 in res, res
@pytest.mark.network
def test_install_wheel_broken(script, data, common_wheels):
script.pip('install', 'wheel', '--no-index', '-f', common_wheels)
res = script.pip(
'install', '--no-index', '-f', data.find_links, '-f', common_wheels,
'wheelbroken',
expect_stderr=True)
assert "Successfully installed wheelbroken-0.1" in str(res), str(res)
@pytest.mark.network
def test_cleanup_after_failed_wheel(script, data, common_wheels):
script.pip('install', 'wheel', '--no-index', '-f', common_wheels)
res = script.pip(
'install', '--no-index', '-f', data.find_links, '-f', common_wheels,
'wheelbrokenafter',
expect_stderr=True)
# One of the effects of not cleaning up is broken scripts:
script_py = script.bin_path / "script.py"
assert script_py.exists, script_py
shebang = open(script_py, 'r').readline().strip()
assert shebang != '#!python', shebang
# OK, assert that we *said* we were cleaning up:
assert "Running setup.py clean for wheelbrokenafter" in str(res), str(res)
@pytest.mark.network
def test_install_builds_wheels(script, data, common_wheels):
# We need to use a subprocess to get the right value on Windows.
res = script.run('python', '-c', (
'from pip._internal.utils import appdirs; '
'print(appdirs.user_cache_dir("pip"))'
))
wheels_cache = os.path.join(res.stdout.rstrip('\n'), 'wheels')
# NB This incidentally tests a local tree + tarball inputs
# see test_install_editable_from_git_autobuild_wheel for editable
# vcs coverage.
script.pip('install', 'wheel', '--no-index', '-f', common_wheels)
to_install = data.packages.join('requires_wheelbroken_upper')
res = script.pip(
'install', '--no-index', '-f', data.find_links, '-f', common_wheels,
to_install, expect_stderr=True)
expected = ("Successfully installed requires-wheelbroken-upper-0"
" upper-2.0 wheelbroken-0.1")
# Must have installed it all
assert expected in str(res), str(res)
wheels = []
for top, dirs, files in os.walk(wheels_cache):
wheels.extend(files)
# and built wheels for upper and wheelbroken
assert "Running setup.py bdist_wheel for upper" in str(res), str(res)
assert "Running setup.py bdist_wheel for wheelb" in str(res), str(res)
# Wheels are built for local directories, but not cached.
assert "Running setup.py bdist_wheel for requir" in str(res), str(res)
# wheelbroken has to run install
# into the cache
assert wheels != [], str(res)
# and installed from the wheel
assert "Running setup.py install for upper" not in str(res), str(res)
# Wheels are built for local directories, but not cached.
assert "Running setup.py install for requir" not in str(res), str(res)
# wheelbroken has to run install
assert "Running setup.py install for wheelb" in str(res), str(res)
# We want to make sure we used the correct implementation tag
assert wheels == [
"Upper-2.0-{}-none-any.whl".format(pep425tags.implementation_tag),
]
@pytest.mark.network
def test_install_no_binary_disables_building_wheels(
script, data, common_wheels):
script.pip('install', 'wheel', '--no-index', '-f', common_wheels)
to_install = data.packages.join('requires_wheelbroken_upper')
res = script.pip(
'install', '--no-index', '--no-binary=upper', '-f', data.find_links,
'-f', common_wheels,
to_install, expect_stderr=True)
expected = ("Successfully installed requires-wheelbroken-upper-0"
" upper-2.0 wheelbroken-0.1")
# Must have installed it all
assert expected in str(res), str(res)
# and built wheels for wheelbroken only
assert "Running setup.py bdist_wheel for wheelb" in str(res), str(res)
# Wheels are built for local directories, but not cached across runs
assert "Running setup.py bdist_wheel for requir" in str(res), str(res)
# Don't build wheel for upper which was blacklisted
assert "Running setup.py bdist_wheel for upper" not in str(res), str(res)
# Wheels are built for local directories, but not cached across runs
assert "Running setup.py install for requir" not in str(res), str(res)
# And these two fell back to sdist based installed.
assert "Running setup.py install for wheelb" in str(res), str(res)
assert "Running setup.py install for upper" in str(res), str(res)
@pytest.mark.network
def test_install_no_binary_disables_cached_wheels(script, data, common_wheels):
script.pip('install', 'wheel', '--no-index', '-f', common_wheels)
# Seed the cache
script.pip(
'install', '--no-index', '-f', data.find_links, '-f', common_wheels,
'upper')
script.pip('uninstall', 'upper', '-y')
res = script.pip(
'install', '--no-index', '--no-binary=:all:', '-f', data.find_links,
'upper', expect_stderr=True)
assert "Successfully installed upper-2.0" in str(res), str(res)
# No wheel building for upper, which was blacklisted
assert "Running setup.py bdist_wheel for upper" not in str(res), str(res)
# Must have used source, not a cached wheel to install upper.
assert "Running setup.py install for upper" in str(res), str(res)
def test_install_editable_with_wrong_egg_name(script):
script.scratch_path.join("pkga").mkdir()
pkga_path = script.scratch_path / 'pkga'
pkga_path.join("setup.py").write(textwrap.dedent("""
from setuptools import setup
setup(name='pkga',
version='0.1')
"""))
result = script.pip(
'install', '--editable', 'file://%s#egg=pkgb' % pkga_path,
expect_error=True)
assert ("egg_info for package pkgb produced metadata "
"for project name pkga. Fix your #egg=pkgb "
"fragments.") in result.stderr
assert "Successfully installed pkga" in str(result), str(result)
def test_install_tar_xz(script, data):
try:
import lzma # noqa
except ImportError:
pytest.skip("No lzma support")
res = script.pip('install', data.packages / 'singlemodule-0.0.1.tar.xz')
assert "Successfully installed singlemodule-0.0.1" in res.stdout, res
def test_install_tar_lzma(script, data):
try:
import lzma # noqa
except ImportError:
pytest.skip("No lzma support")
res = script.pip('install', data.packages / 'singlemodule-0.0.1.tar.lzma')
assert "Successfully installed singlemodule-0.0.1" in res.stdout, res
def test_double_install(script):
"""
Test double install passing with two same version requirements
"""
result = script.pip('install', 'pip', 'pip',
use_module=True,
expect_error=False)
msg = "Double requirement given: pip (already in pip, name='pip')"
assert msg not in result.stderr
def test_double_install_fail(script):
"""
Test double install failing with two different version requirements
"""
result = script.pip('install', 'pip==*', 'pip==7.1.2', expect_error=True)
msg = ("Double requirement given: pip==7.1.2 (already in pip==*, "
"name='pip')")
assert msg in result.stderr
def test_install_incompatible_python_requires(script, common_wheels):
script.scratch_path.join("pkga").mkdir()
pkga_path = script.scratch_path / 'pkga'
pkga_path.join("setup.py").write(textwrap.dedent("""
from setuptools import setup
setup(name='pkga',
python_requires='<1.0',
version='0.1')
"""))
script.pip(
'install', 'setuptools>24.2', # This should not be needed
'--no-index', '-f', common_wheels,
)
result = script.pip('install', pkga_path, expect_error=True)
assert ("pkga requires Python '<1.0' "
"but the running Python is ") in result.stderr, str(result)
def test_install_incompatible_python_requires_editable(script, common_wheels):
script.scratch_path.join("pkga").mkdir()
pkga_path = script.scratch_path / 'pkga'
pkga_path.join("setup.py").write(textwrap.dedent("""
from setuptools import setup
setup(name='pkga',
python_requires='<1.0',
version='0.1')
"""))
script.pip(
'install', 'setuptools>24.2', # This should not be needed
'--no-index', '-f', common_wheels,
)
result = script.pip(
'install', '--editable=%s' % pkga_path, expect_error=True)
assert ("pkga requires Python '<1.0' "
"but the running Python is ") in result.stderr, str(result)
@pytest.mark.network
def test_install_incompatible_python_requires_wheel(script, common_wheels):
script.scratch_path.join("pkga").mkdir()
pkga_path = script.scratch_path / 'pkga'
pkga_path.join("setup.py").write(textwrap.dedent("""
from setuptools import setup
setup(name='pkga',
python_requires='<1.0',
version='0.1')
"""))
script.pip(
'install', 'setuptools>24.2', # This should not be needed
'--no-index', '-f', common_wheels,
)
script.pip('install', 'wheel', '--no-index', '-f', common_wheels)
script.run(
'python', 'setup.py', 'bdist_wheel', '--universal', cwd=pkga_path)
result = script.pip('install', './pkga/dist/pkga-0.1-py2.py3-none-any.whl',
expect_error=True)
assert ("pkga requires Python '<1.0' "
"but the running Python is ") in result.stderr
def test_install_compatible_python_requires(script, common_wheels):
script.scratch_path.join("pkga").mkdir()
pkga_path = script.scratch_path / 'pkga'
pkga_path.join("setup.py").write(textwrap.dedent("""
from setuptools import setup
setup(name='pkga',
python_requires='>1.0',
version='0.1')
"""))
script.pip(
'install', 'setuptools>24.2', # This should not be needed
'--no-index', '-f', common_wheels,
)
res = script.pip('install', pkga_path, expect_error=True)
assert "Successfully installed pkga-0.1" in res.stdout, res
@pytest.mark.network
def test_install_pep508_with_url(script):
res = script.pip(
'install', '--no-index',
'packaging@https://files.pythonhosted.org/packages/2f/2b/'
'c681de3e1dbcd469537aefb15186b800209aa1f299d933d23b48d85c9d56/'
'packaging-15.3-py2.py3-none-any.whl#sha256='
'ce1a869fe039fbf7e217df36c4653d1dbe657778b2d41709593a0003584405f4'
)
assert "Successfully installed packaging-15.3" in str(res), str(res)
@pytest.mark.network
def test_install_pep508_with_url_in_install_requires(script):
pkga_path = create_test_package_with_setup(
script, name='pkga', version='1.0',
install_requires=[
'packaging@https://files.pythonhosted.org/packages/2f/2b/'
'c681de3e1dbcd469537aefb15186b800209aa1f299d933d23b48d85c9d56/'
'packaging-15.3-py2.py3-none-any.whl#sha256='
'ce1a869fe039fbf7e217df36c4653d1dbe657778b2d41709593a0003584405f4'
],
)
res = script.pip('install', pkga_path, expect_error=True)
assert "Direct url requirement " in res.stderr, str(res)
assert "are not allowed for dependencies" in res.stderr, str(res)
def test_installing_scripts_outside_path_prints_warning(script):
result = script.pip_install_local(
"--prefix", script.scratch_path, "script_wheel1", expect_error=True
)
assert "Successfully installed script-wheel1" in result.stdout, str(result)
assert "--no-warn-script-location" in result.stderr
def test_installing_scripts_outside_path_can_suppress_warning(script):
result = script.pip_install_local(
"--prefix", script.scratch_path, "--no-warn-script-location",
"script_wheel1"
)
assert "Successfully installed script-wheel1" in result.stdout, str(result)
assert "--no-warn-script-location" not in result.stderr
def test_installing_scripts_on_path_does_not_print_warning(script):
result = script.pip_install_local("script_wheel1")
assert "Successfully installed script-wheel1" in result.stdout, str(result)
assert "--no-warn-script-location" not in result.stderr
def test_installed_files_recorded_in_deterministic_order(script, data):
"""
Ensure that we record the files installed by a package in a deterministic
order, to make installs reproducible.
"""
to_install = data.packages.join("FSPkg")
result = script.pip('install', to_install, expect_error=False)
fspkg_folder = script.site_packages / 'fspkg'
egg_info = 'FSPkg-0.1.dev0-py%s.egg-info' % pyversion
installed_files_path = (
script.site_packages / egg_info / 'installed-files.txt'
)
assert fspkg_folder in result.files_created, str(result.stdout)
assert installed_files_path in result.files_created, str(result)
installed_files_path = result.files_created[installed_files_path].full
installed_files_lines = [
p for p in Path(installed_files_path).read_text().split('\n') if p
]
assert installed_files_lines == sorted(installed_files_lines)
def test_install_conflict_results_in_warning(script, data):
pkgA_path = create_test_package_with_setup(
script,
name='pkgA', version='1.0', install_requires=['pkgb == 1.0'],
)
pkgB_path = create_test_package_with_setup(
script,
name='pkgB', version='2.0',
)
# Install pkgA without its dependency
result1 = script.pip('install', '--no-index', pkgA_path, '--no-deps')
assert "Successfully installed pkgA-1.0" in result1.stdout, str(result1)
# Then install an incorrect version of the dependency
result2 = script.pip(
'install', '--no-index', pkgB_path,
expect_stderr=True,
)
assert "pkga 1.0 has requirement pkgb==1.0" in result2.stderr, str(result2)
assert "Successfully installed pkgB-2.0" in result2.stdout, str(result2)
def test_install_conflict_warning_can_be_suppressed(script, data):
pkgA_path = create_test_package_with_setup(
script,
name='pkgA', version='1.0', install_requires=['pkgb == 1.0'],
)
pkgB_path = create_test_package_with_setup(
script,
name='pkgB', version='2.0',
)
# Install pkgA without its dependency
result1 = script.pip('install', '--no-index', pkgA_path, '--no-deps')
assert "Successfully installed pkgA-1.0" in result1.stdout, str(result1)
# Then install an incorrect version of the dependency; suppressing warning
result2 = script.pip(
'install', '--no-index', pkgB_path, '--no-warn-conflicts'
)
assert "Successfully installed pkgB-2.0" in result2.stdout, str(result2)
|
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import logging
import os
import posixpath
from environment import IsPreviewServer
from extensions_paths import JSON_TEMPLATES, PRIVATE_TEMPLATES
import third_party.json_schema_compiler.json_parse as json_parse
import third_party.json_schema_compiler.model as model
from environment import IsPreviewServer
from third_party.json_schema_compiler.memoize import memoize
def _CreateId(node, prefix):
if node.parent is not None and not isinstance(node.parent, model.Namespace):
return '-'.join([prefix, node.parent.simple_name, node.simple_name])
return '-'.join([prefix, node.simple_name])
def _FormatValue(value):
'''Inserts commas every three digits for integer values. It is magic.
'''
s = str(value)
return ','.join([s[max(0, i - 3):i] for i in range(len(s), 0, -3)][::-1])
def _GetByNameDict(namespace):
'''Returns a dictionary mapping names to named items from |namespace|.
This lets us render specific API entities rather than the whole thing at once,
for example {{apis.manifestTypes.byName.ExternallyConnectable}}.
Includes items from namespace['types'], namespace['functions'],
namespace['events'], and namespace['properties'].
'''
by_name = {}
for item_type in ('types', 'functions', 'events', 'properties'):
if item_type in namespace:
old_size = len(by_name)
by_name.update(
(item['name'], item) for item in namespace[item_type])
assert len(by_name) == old_size + len(namespace[item_type]), (
'Duplicate name in %r' % namespace)
return by_name
def _GetEventByNameFromEvents(events):
'''Parses the dictionary |events| to find the definitions of members of the
type Event. Returns a dictionary mapping the name of a member to that
member's definition.
'''
assert 'types' in events, \
'The dictionary |events| must contain the key "types".'
event_list = [t for t in events['types'] if t.get('name') == 'Event']
assert len(event_list) == 1, 'Exactly one type must be called "Event".'
return _GetByNameDict(event_list[0])
class _JSCModel(object):
'''Uses a Model from the JSON Schema Compiler and generates a dict that
a Handlebar template can use for a data source.
'''
def __init__(self,
api_name,
api_models,
ref_resolver,
disable_refs,
availability_finder,
json_cache,
template_cache,
features_bundle,
event_byname_function):
self._ref_resolver = ref_resolver
self._disable_refs = disable_refs
self._availability_finder = availability_finder
self._api_availabilities = json_cache.GetFromFile(
posixpath.join(JSON_TEMPLATES, 'api_availabilities.json'))
self._intro_tables = json_cache.GetFromFile(
posixpath.join(JSON_TEMPLATES, 'intro_tables.json'))
self._api_features = features_bundle.GetAPIFeatures()
self._template_cache = template_cache
self._event_byname_function = event_byname_function
self._namespace = api_models.GetModel(api_name).Get()
def _FormatDescription(self, description):
if self._disable_refs:
return description
return self._ref_resolver.ResolveAllLinks(description,
namespace=self._namespace.name)
def _GetLink(self, link):
if self._disable_refs:
type_name = link.split('.', 1)[-1]
return { 'href': '#type-%s' % type_name, 'text': link, 'name': link }
return self._ref_resolver.SafeGetLink(link, namespace=self._namespace.name)
def ToDict(self):
if self._namespace is None:
return {}
chrome_dot_name = 'chrome.%s' % self._namespace.name
as_dict = {
'name': self._namespace.name,
'namespace': self._namespace.documentation_options.get('namespace',
chrome_dot_name),
'title': self._namespace.documentation_options.get('title',
chrome_dot_name),
'documentationOptions': self._namespace.documentation_options,
'types': self._GenerateTypes(self._namespace.types.values()),
'functions': self._GenerateFunctions(self._namespace.functions),
'events': self._GenerateEvents(self._namespace.events),
'domEvents': self._GenerateDomEvents(self._namespace.events),
'properties': self._GenerateProperties(self._namespace.properties),
}
if self._namespace.deprecated:
as_dict['deprecated'] = self._namespace.deprecated
# Rendering the intro list is really expensive and there's no point doing it
# unless we're rending the page - and disable_refs=True implies we're not.
if not self._disable_refs:
as_dict.update({
'introList': self._GetIntroTableList(),
'channelWarning': self._GetChannelWarning(),
})
as_dict['byName'] = _GetByNameDict(as_dict)
return as_dict
def _GetApiAvailability(self):
return self._availability_finder.GetApiAvailability(self._namespace.name)
def _GetChannelWarning(self):
if not self._IsExperimental():
return { self._GetApiAvailability().channel: True }
return None
def _IsExperimental(self):
return self._namespace.name.startswith('experimental')
def _GenerateTypes(self, types):
return [self._GenerateType(t) for t in types]
def _GenerateType(self, type_):
type_dict = {
'name': type_.simple_name,
'description': self._FormatDescription(type_.description),
'properties': self._GenerateProperties(type_.properties),
'functions': self._GenerateFunctions(type_.functions),
'events': self._GenerateEvents(type_.events),
'id': _CreateId(type_, 'type')
}
self._RenderTypeInformation(type_, type_dict)
return type_dict
def _GenerateFunctions(self, functions):
return [self._GenerateFunction(f) for f in functions.values()]
def _GenerateFunction(self, function):
function_dict = {
'name': function.simple_name,
'description': self._FormatDescription(function.description),
'callback': self._GenerateCallback(function.callback),
'parameters': [],
'returns': None,
'id': _CreateId(function, 'method')
}
self._AddCommonProperties(function_dict, function)
if function.returns:
function_dict['returns'] = self._GenerateType(function.returns)
for param in function.params:
function_dict['parameters'].append(self._GenerateProperty(param))
if function.callback is not None:
# Show the callback as an extra parameter.
function_dict['parameters'].append(
self._GenerateCallbackProperty(function.callback))
if len(function_dict['parameters']) > 0:
function_dict['parameters'][-1]['last'] = True
return function_dict
def _GenerateEvents(self, events):
return [self._GenerateEvent(e) for e in events.values()
if not e.supports_dom]
def _GenerateDomEvents(self, events):
return [self._GenerateEvent(e) for e in events.values()
if e.supports_dom]
def _GenerateEvent(self, event):
event_dict = {
'name': event.simple_name,
'description': self._FormatDescription(event.description),
'filters': [self._GenerateProperty(f) for f in event.filters],
'conditions': [self._GetLink(condition)
for condition in event.conditions],
'actions': [self._GetLink(action) for action in event.actions],
'supportsRules': event.supports_rules,
'supportsListeners': event.supports_listeners,
'properties': [],
'id': _CreateId(event, 'event'),
'byName': {},
}
self._AddCommonProperties(event_dict, event)
# Add the Event members to each event in this object.
# If refs are disabled then don't worry about this, since it's only needed
# for rendering, and disable_refs=True implies we're not rendering.
if self._event_byname_function and not self._disable_refs:
event_dict['byName'].update(self._event_byname_function())
# We need to create the method description for addListener based on the
# information stored in |event|.
if event.supports_listeners:
callback_object = model.Function(parent=event,
name='callback',
json={},
namespace=event.parent,
origin='')
callback_object.params = event.params
if event.callback:
callback_object.callback = event.callback
callback_parameters = self._GenerateCallbackProperty(callback_object)
callback_parameters['last'] = True
event_dict['byName']['addListener'] = {
'name': 'addListener',
'callback': self._GenerateFunction(callback_object),
'parameters': [callback_parameters]
}
if event.supports_dom:
# Treat params as properties of the custom Event object associated with
# this DOM Event.
event_dict['properties'] += [self._GenerateProperty(param)
for param in event.params]
return event_dict
def _GenerateCallback(self, callback):
if not callback:
return None
callback_dict = {
'name': callback.simple_name,
'simple_type': {'simple_type': 'function'},
'optional': callback.optional,
'parameters': []
}
for param in callback.params:
callback_dict['parameters'].append(self._GenerateProperty(param))
if (len(callback_dict['parameters']) > 0):
callback_dict['parameters'][-1]['last'] = True
return callback_dict
def _GenerateProperties(self, properties):
return [self._GenerateProperty(v) for v in properties.values()]
def _GenerateProperty(self, property_):
if not hasattr(property_, 'type_'):
for d in dir(property_):
if not d.startswith('_'):
print ('%s -> %s' % (d, getattr(property_, d)))
type_ = property_.type_
# Make sure we generate property info for arrays, too.
# TODO(kalman): what about choices?
if type_.property_type == model.PropertyType.ARRAY:
properties = type_.item_type.properties
else:
properties = type_.properties
property_dict = {
'name': property_.simple_name,
'optional': property_.optional,
'description': self._FormatDescription(property_.description),
'properties': self._GenerateProperties(type_.properties),
'functions': self._GenerateFunctions(type_.functions),
'parameters': [],
'returns': None,
'id': _CreateId(property_, 'property')
}
self._AddCommonProperties(property_dict, property_)
if type_.property_type == model.PropertyType.FUNCTION:
function = type_.function
for param in function.params:
property_dict['parameters'].append(self._GenerateProperty(param))
if function.returns:
property_dict['returns'] = self._GenerateType(function.returns)
value = property_.value
if value is not None:
if isinstance(value, int):
property_dict['value'] = _FormatValue(value)
else:
property_dict['value'] = value
else:
self._RenderTypeInformation(type_, property_dict)
return property_dict
def _GenerateCallbackProperty(self, callback):
property_dict = {
'name': callback.simple_name,
'description': self._FormatDescription(callback.description),
'optional': callback.optional,
'is_callback': True,
'id': _CreateId(callback, 'property'),
'simple_type': 'function',
}
if (callback.parent is not None and
not isinstance(callback.parent, model.Namespace)):
property_dict['parentName'] = callback.parent.simple_name
return property_dict
def _RenderTypeInformation(self, type_, dst_dict):
dst_dict['is_object'] = type_.property_type == model.PropertyType.OBJECT
if type_.property_type == model.PropertyType.CHOICES:
dst_dict['choices'] = self._GenerateTypes(type_.choices)
# We keep track of which == last for knowing when to add "or" between
# choices in templates.
if len(dst_dict['choices']) > 0:
dst_dict['choices'][-1]['last'] = True
elif type_.property_type == model.PropertyType.REF:
dst_dict['link'] = self._GetLink(type_.ref_type)
elif type_.property_type == model.PropertyType.ARRAY:
dst_dict['array'] = self._GenerateType(type_.item_type)
elif type_.property_type == model.PropertyType.ENUM:
dst_dict['enum_values'] = [
{'name': value.name, 'description': value.description}
for value in type_.enum_values]
if len(dst_dict['enum_values']) > 0:
dst_dict['enum_values'][-1]['last'] = True
elif type_.instance_of is not None:
dst_dict['simple_type'] = type_.instance_of
else:
dst_dict['simple_type'] = type_.property_type.name
def _GetIntroTableList(self):
'''Create a generic data structure that can be traversed by the templates
to create an API intro table.
'''
intro_rows = [
self._GetIntroDescriptionRow(),
self._GetIntroAvailabilityRow()
] + self._GetIntroDependencyRows()
# Add rows using data from intro_tables.json, overriding any existing rows
# if they share the same 'title' attribute.
row_titles = [row['title'] for row in intro_rows]
for misc_row in self._GetMiscIntroRows():
if misc_row['title'] in row_titles:
intro_rows[row_titles.index(misc_row['title'])] = misc_row
else:
intro_rows.append(misc_row)
return intro_rows
def _GetIntroDescriptionRow(self):
''' Generates the 'Description' row data for an API intro table.
'''
return {
'title': 'Description',
'content': [
{ 'text': self._FormatDescription(self._namespace.description) }
]
}
def _GetIntroAvailabilityRow(self):
''' Generates the 'Availability' row data for an API intro table.
'''
if self._IsExperimental():
status = 'experimental'
version = None
else:
availability = self._GetApiAvailability()
status = availability.channel
version = availability.version
return {
'title': 'Availability',
'content': [{
'partial': self._template_cache.GetFromFile(
posixpath.join(PRIVATE_TEMPLATES,
'intro_tables',
'%s_message.html' % status)).Get(),
'version': version
}]
}
def _GetIntroDependencyRows(self):
# Devtools aren't in _api_features. If we're dealing with devtools, bail.
if 'devtools' in self._namespace.name:
return []
api_feature = self._api_features.Get().get(self._namespace.name)
if not api_feature:
logging.error('"%s" not found in _api_features.json' %
self._namespace.name)
return []
permissions_content = []
manifest_content = []
def categorize_dependency(dependency):
def make_code_node(text):
return { 'class': 'code', 'text': text }
context, name = dependency.split(':', 1)
if context == 'permission':
permissions_content.append(make_code_node('"%s"' % name))
elif context == 'manifest':
manifest_content.append(make_code_node('"%s": {...}' % name))
elif context == 'api':
transitive_dependencies = (
self._api_features.Get().get(name, {}).get('dependencies', []))
for transitive_dependency in transitive_dependencies:
categorize_dependency(transitive_dependency)
else:
logging.error('Unrecognized dependency for %s: %s' %
(self._namespace.name, context))
for dependency in api_feature.get('dependencies', ()):
categorize_dependency(dependency)
dependency_rows = []
if permissions_content:
dependency_rows.append({
'title': 'Permissions',
'content': permissions_content
})
if manifest_content:
dependency_rows.append({
'title': 'Manifest',
'content': manifest_content
})
return dependency_rows
def _GetMiscIntroRows(self):
''' Generates miscellaneous intro table row data, such as 'Permissions',
'Samples', and 'Learn More', using intro_tables.json.
'''
misc_rows = []
# Look up the API name in intro_tables.json, which is structured
# similarly to the data structure being created. If the name is found, loop
# through the attributes and add them to this structure.
table_info = self._intro_tables.Get().get(self._namespace.name)
if table_info is None:
return misc_rows
for category in table_info.keys():
content = copy.deepcopy(table_info[category])
for node in content:
# If there is a 'partial' argument and it hasn't already been
# converted to a Handlebar object, transform it to a template.
if 'partial' in node:
node['partial'] = self._template_cache.GetFromFile(
posixpath.join(PRIVATE_TEMPLATES, node['partial'])).Get()
misc_rows.append({ 'title': category, 'content': content })
return misc_rows
def _AddCommonProperties(self, target, src):
if src.deprecated is not None:
target['deprecated'] = self._FormatDescription(
src.deprecated)
if (src.parent is not None and
not isinstance(src.parent, model.Namespace)):
target['parentName'] = src.parent.simple_name
class _LazySamplesGetter(object):
'''This class is needed so that an extensions API page does not have to fetch
the apps samples page and vice versa.
'''
def __init__(self, api_name, samples):
self._api_name = api_name
self._samples = samples
def get(self, key):
return self._samples.FilterSamples(key, self._api_name)
class APIDataSource(object):
'''This class fetches and loads JSON APIs from the FileSystem passed in with
|compiled_fs_factory|, so the APIs can be plugged into templates.
'''
class Factory(object):
def __init__(self,
compiled_fs_factory,
file_system,
availability_finder,
api_models,
features_bundle,
object_store_creator):
self._json_cache = compiled_fs_factory.ForJson(file_system)
self._template_cache = compiled_fs_factory.ForTemplates(file_system)
self._availability_finder = availability_finder
self._api_models = api_models
self._features_bundle = features_bundle
self._model_cache_refs = object_store_creator.Create(
APIDataSource, 'model-cache-refs')
self._model_cache_no_refs = object_store_creator.Create(
APIDataSource, 'model-cache-no-refs')
# These must be set later via the SetFooDataSourceFactory methods.
self._ref_resolver_factory = None
self._samples_data_source_factory = None
# This caches the result of _LoadEventByName.
self._event_byname = None
def SetSamplesDataSourceFactory(self, samples_data_source_factory):
self._samples_data_source_factory = samples_data_source_factory
def SetReferenceResolverFactory(self, ref_resolver_factory):
self._ref_resolver_factory = ref_resolver_factory
def Create(self, request):
'''Creates an APIDataSource.
'''
if self._samples_data_source_factory is None:
# Only error if there is a request, which means this APIDataSource is
# actually being used to render a page.
if request is not None:
logging.error('SamplesDataSource.Factory was never set in '
'APIDataSource.Factory.')
samples = None
else:
samples = self._samples_data_source_factory.Create(request)
return APIDataSource(self._GetSchemaModel, samples)
def _LoadEventByName(self):
'''All events have some members in common. We source their description
from Event in events.json.
'''
if self._event_byname is None:
self._event_byname = _GetEventByNameFromEvents(
self._GetSchemaModel('events', True))
return self._event_byname
def _GetModelCache(self, disable_refs):
if disable_refs:
return self._model_cache_no_refs
return self._model_cache_refs
def _GetSchemaModel(self, api_name, disable_refs):
jsc_model = self._GetModelCache(disable_refs).Get(api_name).Get()
if jsc_model is not None:
return jsc_model
jsc_model = _JSCModel(
api_name,
self._api_models,
self._ref_resolver_factory.Create() if not disable_refs else None,
disable_refs,
self._availability_finder,
self._json_cache,
self._template_cache,
self._features_bundle,
self._LoadEventByName).ToDict()
self._GetModelCache(disable_refs).Set(api_name, jsc_model)
return jsc_model
def __init__(self, get_schema_model, samples):
self._get_schema_model = get_schema_model
self._samples = samples
def _GenerateHandlebarContext(self, handlebar_dict):
# Parsing samples on the preview server takes seconds and doesn't add
# anything. Don't do it.
if not IsPreviewServer():
handlebar_dict['samples'] = _LazySamplesGetter(
handlebar_dict['name'],
self._samples)
return handlebar_dict
def get(self, api_name, disable_refs=False):
return self._GenerateHandlebarContext(
self._get_schema_model(api_name, disable_refs))
|
|
# -*- coding: utf-8 -*-
import bisect
from cms.models import Title, Page, EmptyTitle
from cms.utils import get_language_list
from cms.utils.compat import DJANGO_1_5
from cms.utils.conf import get_cms_setting
from cms.utils.permissions import get_user_sites_queryset
from django.contrib.admin.views.main import ChangeList, ALL_VAR, IS_POPUP_VAR, \
ORDER_TYPE_VAR, ORDER_VAR, SEARCH_VAR
from django.contrib.sites.models import Site
import django
COPY_VAR = "copy"
def cache_tree_children(queryset):
"""
For all items in the queryset, set the '_cached_children' attribute to a
list. This attribute is in turn used by the 'get_children' method on the
item, which would otherwise (if '_cached_children' is not set) cause a
database query.
The queryset must be ordered by 'path', or the function will put the children
in the wrong order.
"""
parents_dict = {}
# Loop through the queryset twice, so that the function works even if the
# mptt tree is broken. Since django caches querysets internally, the extra
# computation time is minimal.
for obj in queryset:
parents_dict[obj.pk] = obj
obj._cached_children = []
for obj in queryset:
parent = parents_dict.get(obj.parent_id)
if parent:
parent._cached_children.append(obj)
class CMSChangeList(ChangeList):
"""
Renders a Changelist - In our case it looks like a tree - it's the list of
*instances* in the Admin.
It is usually responsible for pagination (not here though, we have a
treeview)
"""
real_queryset = False
def __init__(self, request, *args, **kwargs):
from cms.utils.plugins import current_site
self._current_site = current_site(request)
super(CMSChangeList, self).__init__(request, *args, **kwargs)
try:
self.queryset = self.get_queryset(request)
except:
raise
self.get_results(request)
if self._current_site:
request.session['cms_admin_site'] = self._current_site.pk
self.set_sites(request)
def get_queryset(self, request=None):
if COPY_VAR in self.params:
del self.params[COPY_VAR]
if 'language' in self.params:
del self.params['language']
if 'page_id' in self.params:
del self.params['page_id']
if django.VERSION[1] > 3:
qs = super(CMSChangeList, self).get_queryset(request).drafts()
else:
qs = super(CMSChangeList, self).get_queryset().drafts()
if request:
site = self.current_site()
permissions = Page.permissions.get_change_id_list(request.user, site)
if permissions != Page.permissions.GRANT_ALL:
qs = qs.filter(pk__in=permissions)
# root_query_set is a read-only property in Django 1.6
# and will be removed in Django 1.8.
queryset_attr = 'root_query_set' if DJANGO_1_5 else 'root_queryset'
setattr(self, queryset_attr, self.root_query_set.filter(pk__in=permissions))
self.real_queryset = True
qs = qs.filter(site=self._current_site)
return qs
def is_filtered(self):
from cms.utils.plugins import SITE_VAR
lookup_params = self.params.copy() # a dictionary of the query string
for i in (ALL_VAR, ORDER_VAR, ORDER_TYPE_VAR, SEARCH_VAR, IS_POPUP_VAR, SITE_VAR, 'language', 'page_id'):
if i in lookup_params:
del lookup_params[i]
if not lookup_params.items() and not self.query:
return False
return True
def get_results(self, request):
if self.real_queryset:
super(CMSChangeList, self).get_results(request)
if not self.is_filtered():
self.full_result_count = self.result_count = self.root_query_set.count()
else:
self.full_result_count = self.root_query_set.count()
def set_items(self, request):
site = self.current_site()
# Get all the pages, ordered by tree ID (it's convenient to build the
# tree using a stack now)
pages = self.get_queryset(request).drafts().order_by('path').select_related('publisher_public')
# Get lists of page IDs for which the current user has
# "permission to..." on the current site.
if get_cms_setting('PERMISSION'):
perm_edit_ids = Page.permissions.get_change_id_list(request.user, site)
perm_publish_ids = Page.permissions.get_publish_id_list(request.user, site)
perm_advanced_settings_ids = Page.permissions.get_advanced_settings_id_list(request.user, site)
restricted_ids = Page.permissions.get_restricted_id_list(site)
if perm_edit_ids and perm_edit_ids != Page.permissions.GRANT_ALL:
pages = pages.filter(pk__in=perm_edit_ids)
root_pages = []
pages = list(pages)
all_pages = pages[:] # That is, basically, a copy.
# Unfortunately we cannot use the MPTT builtin code for pre-caching
# the children here, because MPTT expects the tree to be 'complete'
# and otherwise complaints about 'invalid item order'
cache_tree_children(pages)
ids = dict((page.id, page) for page in pages)
parent_ids = {}
for page in pages:
if not page.parent_id in parent_ids:
parent_ids[page.parent_id] = []
parent_ids[page.parent_id].append(page)
for page in pages:
children = parent_ids.get(page.pk, [])
# If the parent page is not among the nodes shown, this node should
# be a "root node". The filtering for this has already been made, so
# using the ids dictionary means this check is constant time
page.root_node = page.parent_id not in ids
if get_cms_setting('PERMISSION'):
# caching the permissions
page.permission_edit_cache = perm_edit_ids == Page.permissions.GRANT_ALL or page.pk in perm_edit_ids
page.permission_publish_cache = perm_publish_ids == Page.permissions.GRANT_ALL or page.pk in perm_publish_ids
page.permission_advanced_settings_cache = perm_advanced_settings_ids == Page.permissions.GRANT_ALL or page.pk in perm_advanced_settings_ids
page.permission_user_cache = request.user
page.permission_restricted = page.pk in restricted_ids
if page.root_node or self.is_filtered():
page.last = True
if len(children):
# TODO: WTF!?!
# The last one is not the last... wait, what?
# children should NOT be a queryset. If it is, check that
# your django-mptt version is 0.5.1
children[-1].last = False
page.menu_level = 0
root_pages.append(page)
if page.parent_id:
page.get_cached_ancestors()
else:
page.ancestors_ascending = []
# Because 'children' is the reverse-FK accessor for the 'parent'
# FK from Page->Page, we have to use wrong English here and set
# an attribute called 'childrens'. We are aware that this is WRONG
# but what should we do?
# If the queryset is filtered, do NOT set the 'childrens' attribute
# since *ALL* pages will be in the 'root_pages' list and therefore
# be displayed. (If the queryset is filtered, the result is not a
# tree but rather a flat list).
if self.is_filtered():
page.childrens = []
else:
page.childrens = children
for page in all_pages:
page.title_cache = {}
page.all_languages = []
if page.publisher_public_id:
page.publisher_public.title_cache = {}
page.publisher_public.all_languages = []
ids[page.publisher_public_id] = page.publisher_public
titles = Title.objects.filter(page__in=ids)
insort = bisect.insort # local copy to avoid globals lookup in the loop
for title in titles:
page = ids[title.page_id]
page.title_cache[title.language] = title
if not title.language in page.all_languages:
insort(page.all_languages, title.language)
site_id = self.current_site()
languages = get_language_list(site_id)
for page in all_pages:
for lang in languages:
if not lang in page.title_cache:
page.title_cache[lang] = EmptyTitle(lang)
self.root_pages = root_pages
def get_items(self):
return self.root_pages
def set_sites(self, request):
"""Sets sites property to current instance - used in tree view for
sites combo.
"""
if get_cms_setting('PERMISSION'):
self.sites = get_user_sites_queryset(request.user)
else:
self.sites = Site.objects.all()
self.has_access_to_multiple_sites = len(self.sites) > 1
def current_site(self):
return self._current_site
|
|
#! /usr/bin/env python
#
# See README for usage instructions.
import glob
import os
import subprocess
import sys
import platform
# We must use setuptools, not distutils, because we need to use the
# namespace_packages option for the "google" package.
from setuptools import setup, Extension, find_packages
from distutils.command.clean import clean as _clean
if sys.version_info[0] == 3:
# Python 3
from distutils.command.build_py import build_py_2to3 as _build_py
else:
# Python 2
from distutils.command.build_py import build_py as _build_py
from distutils.spawn import find_executable
# Find the Protocol Compiler.
if 'PROTOC' in os.environ and os.path.exists(os.environ['PROTOC']):
protoc = os.environ['PROTOC']
elif os.path.exists("../src/protoc"):
protoc = "../src/protoc"
elif os.path.exists("../src/protoc.exe"):
protoc = "../src/protoc.exe"
elif os.path.exists("../vsprojects/Debug/protoc.exe"):
protoc = "../vsprojects/Debug/protoc.exe"
elif os.path.exists("../vsprojects/Release/protoc.exe"):
protoc = "../vsprojects/Release/protoc.exe"
else:
protoc = find_executable("protoc")
def GetVersion():
"""Gets the version from google/protobuf/__init__.py
Do not import google.protobuf.__init__ directly, because an installed
protobuf library may be loaded instead."""
with open(os.path.join('google', 'protobuf', '__init__.py')) as version_file:
exec(version_file.read(), globals())
return __version__
def generate_proto(source, require = True):
"""Invokes the Protocol Compiler to generate a _pb2.py from the given
.proto file. Does nothing if the output already exists and is newer than
the input."""
if not require and not os.path.exists(source):
return
output = source.replace(".proto", "_pb2.py").replace("../src/", "")
if (not os.path.exists(output) or
(os.path.exists(source) and
os.path.getmtime(source) > os.path.getmtime(output))):
print("Generating %s..." % output)
if not os.path.exists(source):
sys.stderr.write("Can't find required file: %s\n" % source)
sys.exit(-1)
if protoc is None:
sys.stderr.write(
"protoc is not installed nor found in ../src. Please compile it "
"or install the binary package.\n")
sys.exit(-1)
protoc_command = [ protoc, "-I../src", "-I.", "--python_out=.", source ]
if subprocess.call(protoc_command) != 0:
sys.exit(-1)
def GenerateUnittestProtos():
generate_proto("../src/google/protobuf/any_test.proto", False)
generate_proto("../src/google/protobuf/map_proto2_unittest.proto", False)
generate_proto("../src/google/protobuf/map_unittest.proto", False)
generate_proto("../src/google/protobuf/test_messages_proto3.proto", False)
generate_proto("../src/google/protobuf/test_messages_proto2.proto", False)
generate_proto("../src/google/protobuf/unittest_arena.proto", False)
generate_proto("../src/google/protobuf/unittest_no_arena.proto", False)
generate_proto("../src/google/protobuf/unittest_no_arena_import.proto", False)
generate_proto("../src/google/protobuf/unittest.proto", False)
generate_proto("../src/google/protobuf/unittest_custom_options.proto", False)
generate_proto("../src/google/protobuf/unittest_import.proto", False)
generate_proto("../src/google/protobuf/unittest_import_public.proto", False)
generate_proto("../src/google/protobuf/unittest_mset.proto", False)
generate_proto("../src/google/protobuf/unittest_mset_wire_format.proto", False)
generate_proto("../src/google/protobuf/unittest_no_generic_services.proto", False)
generate_proto("../src/google/protobuf/unittest_proto3_arena.proto", False)
generate_proto("../src/google/protobuf/util/json_format_proto3.proto", False)
generate_proto("google/protobuf/internal/any_test.proto", False)
generate_proto("google/protobuf/internal/descriptor_pool_test1.proto", False)
generate_proto("google/protobuf/internal/descriptor_pool_test2.proto", False)
generate_proto("google/protobuf/internal/factory_test1.proto", False)
generate_proto("google/protobuf/internal/factory_test2.proto", False)
generate_proto("google/protobuf/internal/file_options_test.proto", False)
generate_proto("google/protobuf/internal/import_test_package/inner.proto", False)
generate_proto("google/protobuf/internal/import_test_package/outer.proto", False)
generate_proto("google/protobuf/internal/missing_enum_values.proto", False)
generate_proto("google/protobuf/internal/message_set_extensions.proto", False)
generate_proto("google/protobuf/internal/more_extensions.proto", False)
generate_proto("google/protobuf/internal/more_extensions_dynamic.proto", False)
generate_proto("google/protobuf/internal/more_messages.proto", False)
generate_proto("google/protobuf/internal/packed_field_test.proto", False)
generate_proto("google/protobuf/internal/test_bad_identifiers.proto", False)
generate_proto("google/protobuf/pyext/python.proto", False)
class clean(_clean):
def run(self):
# Delete generated files in the code tree.
for (dirpath, dirnames, filenames) in os.walk("."):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
if filepath.endswith("_pb2.py") or filepath.endswith(".pyc") or \
filepath.endswith(".so") or filepath.endswith(".o") or \
filepath.endswith('google/protobuf/compiler/__init__.py') or \
filepath.endswith('google/protobuf/util/__init__.py'):
os.remove(filepath)
# _clean is an old-style class, so super() doesn't work.
_clean.run(self)
class build_py(_build_py):
def run(self):
# Generate necessary .proto file if it doesn't exist.
generate_proto("../src/google/protobuf/descriptor.proto")
generate_proto("../src/google/protobuf/compiler/plugin.proto")
generate_proto("../src/google/protobuf/any.proto")
generate_proto("../src/google/protobuf/api.proto")
generate_proto("../src/google/protobuf/duration.proto")
generate_proto("../src/google/protobuf/empty.proto")
generate_proto("../src/google/protobuf/field_mask.proto")
generate_proto("../src/google/protobuf/source_context.proto")
generate_proto("../src/google/protobuf/struct.proto")
generate_proto("../src/google/protobuf/timestamp.proto")
generate_proto("../src/google/protobuf/type.proto")
generate_proto("../src/google/protobuf/wrappers.proto")
GenerateUnittestProtos()
# Make sure google.protobuf/** are valid packages.
for path in ['', 'internal/', 'compiler/', 'pyext/', 'util/']:
try:
open('google/protobuf/%s__init__.py' % path, 'a').close()
except EnvironmentError:
pass
# _build_py is an old-style class, so super() doesn't work.
_build_py.run(self)
class test_conformance(_build_py):
target = 'test_python'
def run(self):
if sys.version_info >= (2, 7):
# Python 2.6 dodges these extra failures.
os.environ["CONFORMANCE_PYTHON_EXTRA_FAILURES"] = (
"--failure_list failure_list_python-post26.txt")
cmd = 'cd ../conformance && make %s' % (test_conformance.target)
status = subprocess.check_call(cmd, shell=True)
def get_option_from_sys_argv(option_str):
if option_str in sys.argv:
sys.argv.remove(option_str)
return True
return False
if __name__ == '__main__':
ext_module_list = []
warnings_as_errors = '--warnings_as_errors'
if get_option_from_sys_argv('--cpp_implementation'):
# Link libprotobuf.a and libprotobuf-lite.a statically with the
# extension. Note that those libraries have to be compiled with
# -fPIC for this to work.
compile_static_ext = get_option_from_sys_argv('--compile_static_extension')
extra_compile_args = ['-Wno-write-strings',
'-Wno-invalid-offsetof',
'-Wno-sign-compare']
libraries = ['protobuf']
extra_objects = None
if compile_static_ext:
libraries = None
extra_objects = ['../src/.libs/libprotobuf.a',
'../src/.libs/libprotobuf-lite.a']
test_conformance.target = 'test_python_cpp'
if "clang" in os.popen('$CC --version 2> /dev/null').read():
extra_compile_args.append('-Wno-shorten-64-to-32')
v, _, _ = platform.mac_ver()
if v:
v = float('.'.join(v.split('.')[:2]))
if v >= 10.12:
extra_compile_args.append('-std=c++11')
if warnings_as_errors in sys.argv:
extra_compile_args.append('-Werror')
sys.argv.remove(warnings_as_errors)
# C++ implementation extension
ext_module_list.extend([
Extension(
"google.protobuf.pyext._message",
glob.glob('google/protobuf/pyext/*.cc'),
include_dirs=[".", "../src"],
libraries=libraries,
extra_objects=extra_objects,
library_dirs=['../src/.libs'],
extra_compile_args=extra_compile_args,
),
Extension(
"google.protobuf.internal._api_implementation",
glob.glob('google/protobuf/internal/api_implementation.cc'),
extra_compile_args=['-DPYTHON_PROTO2_CPP_IMPL_V2'],
),
])
os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'cpp'
# Keep this list of dependencies in sync with tox.ini.
install_requires = ['six>=1.9', 'setuptools']
if sys.version_info <= (2,7):
install_requires.append('ordereddict')
install_requires.append('unittest2')
setup(
name='protobuf',
version=GetVersion(),
description='Protocol Buffers',
download_url='https://github.com/google/protobuf/releases',
long_description="Protocol Buffers are Google's data interchange format",
url='https://developers.google.com/protocol-buffers/',
maintainer='protobuf@googlegroups.com',
maintainer_email='protobuf@googlegroups.com',
license='3-Clause BSD License',
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
],
namespace_packages=['google'],
packages=find_packages(
exclude=[
'import_test_package',
],
),
test_suite='google.protobuf.internal',
cmdclass={
'clean': clean,
'build_py': build_py,
'test_conformance': test_conformance,
},
install_requires=install_requires,
ext_modules=ext_module_list,
)
|
|
import subprocess
from collections import namedtuple
import os
import sys
import tempfile
import uuid
import re
from django.db.backends.base.creation import TEST_DATABASE_PREFIX
def is_testing():
return os.environ.get("CCHQ_TESTING") == "1"
class SharedDriveConfiguration(object):
def __init__(self, shared_drive_path, restore_dir, transfer_dir, temp_dir, blob_dir):
self.shared_drive_path = shared_drive_path
self.restore_dir_name = restore_dir
self.transfer_dir_name = transfer_dir
self.temp_dir_name = temp_dir
self.blob_dir_name = blob_dir
self._restore_dir = self._init_dir(restore_dir)
self.transfer_dir = self._init_dir(transfer_dir)
self.temp_dir = self._init_dir(temp_dir)
self.blob_dir = self._init_dir(blob_dir)
def _init_dir(self, name):
if not self.shared_drive_path or not os.path.isdir(self.shared_drive_path) or not name:
return None
path = os.path.join(self.shared_drive_path, name)
if not os.path.exists(path):
os.mkdir(path)
elif not os.path.isdir(path):
raise Exception('Shared folder is not a directory: {}'.format(name))
return path
def get_unset_reason(self, name):
if not self.shared_drive_path:
return "invalid shared drive path: %r" % (self.shared_drive_path,)
if not os.path.isdir(self.shared_drive_path):
return "shared drive path is not a directory: %r" % (self.shared_drive_path,)
directory = getattr(self, name + "_name")
if not directory:
return name + " is empty or not configured in settings"
return None
@property
def restore_dir(self):
return self._restore_dir or tempfile.gettempdir()
@property
def transfer_enabled(self):
from django_transfer import is_enabled
return is_enabled() and self.transfer_dir
def get_temp_file(self, suffix="", prefix="tmp"):
name = '{}{}{}'.format(prefix, uuid.uuid4().hex, suffix)
return os.path.join(self.temp_dir, name)
def get_server_url(http_method, server_root, username, password):
if username and password:
return '%(http_method)s://%(user)s:%(pass)s@%(server)s' % {
'http_method': http_method,
'user': username,
'pass': password,
'server': server_root,
}
else:
return '%(http_method)s://%(server)s' % {
'http_method': http_method,
'server': server_root,
}
def get_dynamic_db_settings(server_root, username, password, dbname,
use_https=False):
"""
Get dynamic database settings.
Other apps can use this if they want to change settings
"""
http_method = 'https' if use_https else 'http'
server_url = get_server_url(http_method, server_root, username, password)
database = '%(server)s/%(database)s' % {
'server': server_url,
'database': dbname,
}
return {
'COUCH_SERVER': server_url,
'COUCH_DATABASE': database,
}
def get_db_name(dbname, is_test):
"""Get databse name (possibly with test prefix)
:param is_test: Add test prefix if true.
"""
if isinstance(dbname, bytes):
dbname = dbname.decode('utf-8')
return (TEST_DATABASE_PREFIX + dbname) if is_test else dbname
def assign_test_db_names(dbs):
"""Fix database names for REUSE_DB
Django automatically uses test database names when testing, but
only if the test database setup routine is called. This allows us
to safely skip the test database setup with REUSE_DB.
"""
for db in dbs.values():
test_db_name = get_db_name(db['NAME'], True)
db['NAME'] = db.setdefault('TEST', {}).setdefault('NAME', test_db_name)
class CouchSettingsHelper(namedtuple('CouchSettingsHelper',
['couch_database_configs', 'couchdb_apps', 'extra_db_names', 'unit_testing'])):
def make_couchdb_tuples(self):
"""
Helper function to generate couchdb tuples
for mapping app name to couch database URL.
"""
return [self._make_couchdb_tuple(row) for row in self.couchdb_apps]
def _make_couchdb_tuple(self, row):
if isinstance(row, tuple):
app_label, postfix = row
else:
app_label, postfix = row, None
if postfix:
if postfix in self.db_urls_by_prefix:
url = self.db_urls_by_prefix[postfix]
else:
url = '%s__%s' % (self.main_db_url, postfix)
return app_label, url
else:
return app_label, self.main_db_url
def get_extra_couchdbs(self):
"""
Create a mapping from database prefix to database url
"""
extra_dbs = {}
postfixes = []
for row in self.couchdb_apps:
if isinstance(row, tuple):
_, postfix = row
if postfix:
postfixes.append(postfix)
postfixes.extend(self.extra_db_names)
for postfix in postfixes:
if postfix in self.db_urls_by_prefix:
url = self.db_urls_by_prefix[postfix]
else:
url = '%s__%s' % (self.main_db_url, postfix)
extra_dbs[postfix] = url
return extra_dbs
@property
def main_db_url(self):
return self.db_urls_by_prefix[None]
@property
def db_urls_by_prefix(self):
if not getattr(self, '_urls_by_prefix', None):
urls_by_prefix = {}
for key, config in self.couch_database_configs.items():
prefix = None if key == 'default' else key
url = self._get_db_url(config)
urls_by_prefix[prefix] = url
self._urls_by_prefix = urls_by_prefix
return self._urls_by_prefix
def _get_db_url(self, config):
return get_dynamic_db_settings(
config['COUCH_SERVER_ROOT'],
config['COUCH_USERNAME'],
config['COUCH_PASSWORD'],
get_db_name(config['COUCH_DATABASE_NAME'], self.unit_testing),
use_https=config['COUCH_HTTPS'],
)["COUCH_DATABASE"]
def celery_failure_handler(task, exc, task_id, args, kwargs, einfo):
from redis.exceptions import ConnectionError
from django_redis.exceptions import ConnectionInterrupted
if isinstance(exc, (ConnectionInterrupted, ConnectionError)):
task.retry(args=args, kwargs=kwargs, exc=exc, max_retries=3, countdown=60 * 5)
def get_allowed_websocket_channels(request, channels):
from django.core.exceptions import PermissionDenied
if request.user and request.user.is_authenticated and request.user.is_superuser:
return channels
else:
raise PermissionDenied(
'Not allowed to subscribe or to publish to websockets without '
'superuser permissions or domain membership!'
)
def fix_logger_obfuscation(fix_logger_obfuscation_, logging_config):
if fix_logger_obfuscation_:
# this is here because the logging config cannot import
# corehq.util.log.HqAdminEmailHandler, for example, if there
# is a syntax error in any module imported by corehq/__init__.py
# Setting FIX_LOGGER_ERROR_OBFUSCATION = True in
# localsettings.py will reveal the real error.
# Note that changing this means you will not be able to use/test anything
# related to email logging.
for handler in logging_config["handlers"].values():
if handler["class"].startswith("corehq."):
if fix_logger_obfuscation_ != 'quiet':
print("{} logger is being changed to {}".format(
handler['class'],
'logging.StreamHandler'
), file=sys.stderr)
handler["class"] = "logging.StreamHandler"
def configure_sentry(base_dir, server_env, dsn):
import sentry_sdk
from sentry_sdk.integrations.logging import ignore_logger
from sentry_sdk.integrations.django import DjangoIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.sqlalchemy import SqlalchemyIntegration
from sentry_sdk.integrations.redis import RedisIntegration
def _before_send(event, hint):
# can't import this during load since settings is not fully configured yet
from corehq.util.sentry import before_sentry_send
return before_sentry_send(event, hint)
release = get_release_name(base_dir, server_env)
ignore_logger('quickcache')
ignore_logger('django.template')
ignore_logger('pillowtop')
ignore_logger('restore')
ignore_logger('kafka.conn')
sentry_sdk.init(
dsn,
release=release,
environment=server_env,
request_bodies='never',
before_send=_before_send,
integrations=[
DjangoIntegration(),
CeleryIntegration(),
SqlalchemyIntegration(),
RedisIntegration()
]
)
def get_release_name(base_dir, server_env):
"""Return the release name. This should match the name of the release
created by commcare-cloud
"""
release_dir = base_dir.split('/')[-1]
if re.match(r'\d{4}-\d{2}-\d{2}_\d{2}.\d{2}', release_dir):
return "{}-{}".format(release_dir, server_env)
else:
return get_git_commit(base_dir) or 'unknown'
def get_git_commit(base_dir):
try:
out = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=base_dir)
return out.strip().decode('ascii')
except OSError:
pass
|
|
from __future__ import print_function, absolute_import, division
import logging
import time
from sys import argv
import gym
import prettytensor as pt
from gym.spaces import Box
from simple_trpo.tb_logger import TBLogger
from simple_trpo.utils import *
from simple_trpo.vectorized_env import VectorizedEnv
class TRPO(object):
def __init__(self, env, config):
self.config = config
self.env = env
if not isinstance(env.observation_space, Box) or\
not isinstance(env.action_space, Box):
print("Both the input space and the output space should be continuous.")
print("(Probably OK to remove the requirement for the input space).")
exit(-1)
self.session = tf.Session()
self.obs = obs = tf.placeholder(
dtype, shape=[
None, env.observation_space.shape[0]])
act_dim = np.prod(env.action_space.shape)
self.action = action = tf.placeholder(tf.float32, shape=[None, act_dim])
self.advant = advant = tf.placeholder(dtype, shape=[None])
self.old_action_dist_mu = old_action_dist_mu = tf.placeholder(dtype, shape=[None, act_dim])
self.old_action_dist_logstd = old_action_dist_logstd = tf.placeholder(dtype, shape=[None, act_dim])
# Create neural network.
action_dist_mu = (pt.wrap(self.obs).
fully_connected(64, activation_fn=tf.nn.relu).
fully_connected(64, activation_fn=tf.nn.relu).
fully_connected(act_dim)) # output means and logstd's
action_dist_logstd_param = tf.Variable((.01 * np.random.randn(1, act_dim)).astype(np.float32))
action_dist_logstd = tf.tile(action_dist_logstd_param, tf.stack((tf.shape(action_dist_mu)[0], 1)))
self.action_dist_mu = action_dist_mu
self.action_dist_logstd = action_dist_logstd
N = tf.shape(obs)[0]
# compute probabilities of current actions and old action
log_p_n = gauss_log_prob(action_dist_mu, action_dist_logstd, action)
log_oldp_n = gauss_log_prob(old_action_dist_mu, old_action_dist_logstd, action)
# proceed as before, good.
ratio_n = tf.exp(log_p_n - log_oldp_n)
Nf = tf.cast(N, dtype)
surr = -tf.reduce_mean(ratio_n * advant) # Surrogate loss
var_list = tf.trainable_variables()
# Introduced the change into here:
kl = gauss_KL(old_action_dist_mu, old_action_dist_logstd,
action_dist_mu, action_dist_logstd) / Nf
ent = gauss_ent(action_dist_mu, action_dist_logstd) / Nf
self.losses = [surr, kl, ent]
self.pg = flatgrad(surr, var_list)
# KL divergence where first arg is fixed
# replace old->tf.stop_gradient from previous kl
kl_firstfixed = gauss_selfKL_firstfixed(action_dist_mu, action_dist_logstd) / Nf
grads = tf.gradients(kl_firstfixed, var_list)
self.flat_tangent = tf.placeholder(dtype, shape=[None])
shapes = map(var_shape, var_list)
start = 0
tangents = []
for shape in shapes:
size = np.prod(shape)
param = tf.reshape(self.flat_tangent[start:(start + size)], shape)
tangents.append(param)
start += size
gvp = [tf.reduce_sum(g * t) for (g, t) in zip(grads, tangents)]
self.fvp = flatgrad(gvp, var_list)
self.get_flat = GetFlat(self.session, var_list)
self.set_from_flat = SetFromFlat(self.session, var_list)
self.session.run(tf.initialize_variables(var_list))
self.vf = LinearVF()
def act(self, obs):
obs = np.expand_dims(obs, 0)
action_dist_mu, action_dist_logstd =\
self.session.run([self.action_dist_mu, self.action_dist_logstd], {self.obs: obs})
act = action_dist_mu + np.exp(action_dist_logstd) * np.random.randn(*action_dist_logstd.shape)
return act.ravel(),\
ConfigObject(action_dist_mu=action_dist_mu,
action_dist_logstd=action_dist_logstd)
def learn(self):
config = self.config
start_time = time.time()
timesteps_elapsed = 0
episodes_elapsed = 0
tb_logger = TBLogger(config.env_id, self.config.name)
for i in range(1, config.n_iter):
# Generating paths.
paths = vectorized_rollout(
self.env,
self,
config.max_pathlength,
config.timesteps_per_batch,
render=False) # (i % render_freq) == 0)
# Computing returns and estimating advantage function.
for path in paths:
path["baseline"] = self.vf.predict(path)
path["returns"] = discount(path["rewards"], config.gamma)
path["advant"] = path["returns"] - path["baseline"]
# Updating policy.
action_dist_mu = np.concatenate([path["action_dists_mu"] for path in paths])
action_dist_logstd = np.concatenate([path["action_dists_logstd"] for path in paths])
obs_n = np.concatenate([path["obs"] for path in paths])
action_n = np.concatenate([path["actions"] for path in paths])
# Standardize the advantage function to have mean=0 and std=1.
advant_n = np.concatenate([path["advant"] for path in paths])
advant_n -= advant_n.mean()
advant_n /= (advant_n.std() + 1e-8)
# Computing baseline function for next iter.
self.vf.fit(paths)
import ipdb; ipdb.set_trace()
feed = {self.obs: obs_n,
self.action: action_n,
self.advant: advant_n,
self.old_action_dist_mu: action_dist_mu,
self.old_action_dist_logstd: action_dist_logstd}
theta_prev = self.get_flat()
def fisher_vector_product(p):
feed[self.flat_tangent] = p
return self.session.run(self.fvp, feed) + p * config.cg_damping
g = self.session.run(self.pg, feed_dict=feed)
stepdir = conjugate_gradient(fisher_vector_product, -g)
shs = (.5 * stepdir.dot(fisher_vector_product(stepdir)))
assert shs > 0
lm = np.sqrt(shs / config.max_kl)
fullstep = stepdir / lm
theta = theta_prev + fullstep
self.set_from_flat(theta)
surrogate_loss, kl_old_new, entropy = self.session.run(self.losses, feed_dict=feed)
ep_rewards = np.array([path["rewards"].sum() for path in paths])
stats = {}
timesteps_elapsed += sum([len(path["rewards"]) for path in paths])
episodes_elapsed += len(paths)
stats["timesteps_elapsed"] = timesteps_elapsed
stats["episodes_elapsed"] = episodes_elapsed
stats["reward_mean_per_episode"] = ep_rewards.mean()
stats["entropy"] = entropy
stats["kl_difference_between_old_and_new"] = kl_old_new
stats["surrogate_loss"] = surrogate_loss
for k, v in stats.items():
tb_logger.log(k, v)
tb_logger.summary_step += 1
stats["Time elapsed"] = "%.2f mins" % ((time.time() - start_time) / 60.0)
print("\n********** Iteration {} ************".format(i))
for k, v in stats.items():
print(k + ": " + " " * (40 - len(k)) + str(v))
if entropy != entropy:
exit(-1)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--env_id", type=str, default='Hopper-v1')
parser.add_argument("--name", type=str, default='unnamed_experiment')
parser.add_argument("--timesteps_per_batch", type=int, default=8000)
parser.add_argument("--max_pathlength", type=int, default=2000)
parser.add_argument("--n_iter", type=int, default=3000)
parser.add_argument('-s', "--seed", type=int, default=0)
parser.add_argument("--gamma", type=float, default=.99)
parser.add_argument("--max_kl", type=float, default=.001)
parser.add_argument("--cg_damping", type=float, default=1e-3)
args = parser.parse_args()
print('python main.py {}'.format(' '.join(argv)))
config = ConfigObject(
timesteps_per_batch=args.timesteps_per_batch,
max_pathlength=args.max_pathlength,
gamma=args.gamma,
n_iter=args.n_iter,
max_kl=args.max_kl,
env_id=args.env_id,
name=args.name,
cg_damping=args.cg_damping)
logging.getLogger().setLevel(logging.DEBUG)
# env = gym.make(args.env_id)
env_id = args.env_id
def seeded_env_fn(seed):
def env_fn():
from rl_teacher.envs import make_with_torque_removed
env = make_with_torque_removed(env_id)
env.seed(seed)
return env
return env_fn
env_fns = [seeded_env_fn(seed) for seed in range(4)]
# env = VectorizedEnv(env_fns)
env = gym.make(args.env_id)
agent = TRPO(env, config)
agent.learn()
print('python main.py {}'.format(' '.join(argv)))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.