code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
from .base import TransactionChannelTestCase, ChannelTestCase, Client, apply_routes # NOQA isort:skip
from .http import HttpClient # NOQA isort:skip
|
raphael-boucher/channels
|
channels/tests/__init__.py
|
Python
|
bsd-3-clause
| 151
|
import datetime
import os
from django import forms
from django.db.models.fields import Field
from django.core import checks
from django.core.files.base import File
from django.core.files.storage import default_storage
from django.core.files.images import ImageFile
from django.db.models import signals
from django.utils.encoding import force_str, force_text
from django.utils import six
from django.utils.translation import ugettext_lazy as _
class FieldFile(File):
def __init__(self, instance, field, name):
super(FieldFile, self).__init__(None, name)
self.instance = instance
self.field = field
self.storage = field.storage
self._committed = True
def __eq__(self, other):
# Older code may be expecting FileField values to be simple strings.
# By overriding the == operator, it can remain backwards compatibility.
if hasattr(other, 'name'):
return self.name == other.name
return self.name == other
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.name)
# The standard File contains most of the necessary properties, but
# FieldFiles can be instantiated without a name, so that needs to
# be checked for here.
def _require_file(self):
if not self:
raise ValueError("The '%s' attribute has no file associated with it." % self.field.name)
def _get_file(self):
self._require_file()
if not hasattr(self, '_file') or self._file is None:
self._file = self.storage.open(self.name, 'rb')
return self._file
def _set_file(self, file):
self._file = file
def _del_file(self):
del self._file
file = property(_get_file, _set_file, _del_file)
def _get_path(self):
self._require_file()
return self.storage.path(self.name)
path = property(_get_path)
def _get_url(self):
self._require_file()
return self.storage.url(self.name)
url = property(_get_url)
def _get_size(self):
self._require_file()
if not self._committed:
return self.file.size
return self.storage.size(self.name)
size = property(_get_size)
def open(self, mode='rb'):
self._require_file()
self.file.open(mode)
# open() doesn't alter the file's contents, but it does reset the pointer
open.alters_data = True
# In addition to the standard File API, FieldFiles have extra methods
# to further manipulate the underlying file, as well as update the
# associated model instance.
def save(self, name, content, save=True):
name = self.field.generate_filename(self.instance, name)
self.name = self.storage.save(name, content)
setattr(self.instance, self.field.name, self.name)
# Update the filesize cache
self._size = content.size
self._committed = True
# Save the object because it has changed, unless save is False
if save:
self.instance.save()
save.alters_data = True
def delete(self, save=True):
if not self:
return
# Only close the file if it's already open, which we know by the
# presence of self._file
if hasattr(self, '_file'):
self.close()
del self.file
self.storage.delete(self.name)
self.name = None
setattr(self.instance, self.field.name, self.name)
# Delete the filesize cache
if hasattr(self, '_size'):
del self._size
self._committed = False
if save:
self.instance.save()
delete.alters_data = True
def _get_closed(self):
file = getattr(self, '_file', None)
return file is None or file.closed
closed = property(_get_closed)
def close(self):
file = getattr(self, '_file', None)
if file is not None:
file.close()
def __getstate__(self):
# FieldFile needs access to its associated model field and an instance
# it's attached to in order to work properly, but the only necessary
# data to be pickled is the file's name itself. Everything else will
# be restored later, by FileDescriptor below.
return {'name': self.name, 'closed': False, '_committed': True, '_file': None}
class FileDescriptor(object):
"""
The descriptor for the file attribute on the model instance. Returns a
FieldFile when accessed so you can do stuff like::
>>> from myapp.models import MyModel
>>> instance = MyModel.objects.get(pk=1)
>>> instance.file.size
Assigns a file object on assignment so you can do::
>>> with open('/tmp/hello.world', 'r') as f:
... instance.file = File(f)
"""
def __init__(self, field):
self.field = field
def __get__(self, instance=None, owner=None):
if instance is None:
raise AttributeError(
"The '%s' attribute can only be accessed from %s instances."
% (self.field.name, owner.__name__))
# This is slightly complicated, so worth an explanation.
# instance.file`needs to ultimately return some instance of `File`,
# probably a subclass. Additionally, this returned object needs to have
# the FieldFile API so that users can easily do things like
# instance.file.path and have that delegated to the file storage engine.
# Easy enough if we're strict about assignment in __set__, but if you
# peek below you can see that we're not. So depending on the current
# value of the field we have to dynamically construct some sort of
# "thing" to return.
# The instance dict contains whatever was originally assigned
# in __set__.
file = instance.__dict__[self.field.name]
# If this value is a string (instance.file = "path/to/file") or None
# then we simply wrap it with the appropriate attribute class according
# to the file field. [This is FieldFile for FileFields and
# ImageFieldFile for ImageFields; it's also conceivable that user
# subclasses might also want to subclass the attribute class]. This
# object understands how to convert a path to a file, and also how to
# handle None.
if isinstance(file, six.string_types) or file is None:
attr = self.field.attr_class(instance, self.field, file)
instance.__dict__[self.field.name] = attr
# Other types of files may be assigned as well, but they need to have
# the FieldFile interface added to the. Thus, we wrap any other type of
# File inside a FieldFile (well, the field's attr_class, which is
# usually FieldFile).
elif isinstance(file, File) and not isinstance(file, FieldFile):
file_copy = self.field.attr_class(instance, self.field, file.name)
file_copy.file = file
file_copy._committed = False
instance.__dict__[self.field.name] = file_copy
# Finally, because of the (some would say boneheaded) way pickle works,
# the underlying FieldFile might not actually itself have an associated
# file. So we need to reset the details of the FieldFile in those cases.
elif isinstance(file, FieldFile) and not hasattr(file, 'field'):
file.instance = instance
file.field = self.field
file.storage = self.field.storage
# That was fun, wasn't it?
return instance.__dict__[self.field.name]
def __set__(self, instance, value):
instance.__dict__[self.field.name] = value
class FileField(Field):
# The class to wrap instance attributes in. Accessing the file object off
# the instance will always return an instance of attr_class.
attr_class = FieldFile
# The descriptor to use for accessing the attribute off of the class.
descriptor_class = FileDescriptor
description = _("File")
def __init__(self, verbose_name=None, name=None, upload_to='', storage=None, **kwargs):
self._primary_key_set_explicitly = 'primary_key' in kwargs
self._unique_set_explicitly = 'unique' in kwargs
self.storage = storage or default_storage
self.upload_to = upload_to
if callable(upload_to):
self.generate_filename = upload_to
kwargs['max_length'] = kwargs.get('max_length', 100)
super(FileField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(FileField, self).check(**kwargs)
errors.extend(self._check_unique())
errors.extend(self._check_primary_key())
return errors
def _check_unique(self):
if self._unique_set_explicitly:
return [
checks.Error(
"'unique' is not a valid argument for a %s." % self.__class__.__name__,
hint=None,
obj=self,
id='fields.E200',
)
]
else:
return []
def _check_primary_key(self):
if self._primary_key_set_explicitly:
return [
checks.Error(
"'primary_key' is not a valid argument for a %s." % self.__class__.__name__,
hint=None,
obj=self,
id='fields.E201',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(FileField, self).deconstruct()
if kwargs.get("max_length", None) == 100:
del kwargs["max_length"]
kwargs['upload_to'] = self.upload_to
if self.storage is not default_storage:
kwargs['storage'] = self.storage
return name, path, args, kwargs
def get_internal_type(self):
return "FileField"
def get_prep_lookup(self, lookup_type, value):
if hasattr(value, 'name'):
value = value.name
return super(FileField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
"Returns field's value prepared for saving into a database."
value = super(FileField, self).get_prep_value(value)
# Need to convert File objects provided via a form to unicode for database insertion
if value is None:
return None
return six.text_type(value)
def pre_save(self, model_instance, add):
"Returns field's value just before saving."
file = super(FileField, self).pre_save(model_instance, add)
if file and not file._committed:
# Commit the file to storage prior to saving the model
file.save(file.name, file, save=False)
return file
def contribute_to_class(self, cls, name):
super(FileField, self).contribute_to_class(cls, name)
setattr(cls, self.name, self.descriptor_class(self))
def get_directory_name(self):
return os.path.normpath(force_text(datetime.datetime.now().strftime(force_str(self.upload_to))))
def get_filename(self, filename):
return os.path.normpath(self.storage.get_valid_name(os.path.basename(filename)))
def generate_filename(self, instance, filename):
return os.path.join(self.get_directory_name(), self.get_filename(filename))
def save_form_data(self, instance, data):
# Important: None means "no change", other false value means "clear"
# This subtle distinction (rather than a more explicit marker) is
# needed because we need to consume values that are also sane for a
# regular (non Model-) Form to find in its cleaned_data dictionary.
if data is not None:
# This value will be converted to unicode and stored in the
# database, so leaving False as-is is not acceptable.
if not data:
data = ''
setattr(instance, self.name, data)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FileField, 'max_length': self.max_length}
# If a file has been provided previously, then the form doesn't require
# that a new file is provided this time.
# The code to mark the form field as not required is used by
# form_for_instance, but can probably be removed once form_for_instance
# is gone. ModelForm uses a different method to check for an existing file.
if 'initial' in kwargs:
defaults['required'] = False
defaults.update(kwargs)
return super(FileField, self).formfield(**defaults)
class ImageFileDescriptor(FileDescriptor):
"""
Just like the FileDescriptor, but for ImageFields. The only difference is
assigning the width/height to the width_field/height_field, if appropriate.
"""
def __set__(self, instance, value):
previous_file = instance.__dict__.get(self.field.name)
super(ImageFileDescriptor, self).__set__(instance, value)
# To prevent recalculating image dimensions when we are instantiating
# an object from the database (bug #11084), only update dimensions if
# the field had a value before this assignment. Since the default
# value for FileField subclasses is an instance of field.attr_class,
# previous_file will only be None when we are called from
# Model.__init__(). The ImageField.update_dimension_fields method
# hooked up to the post_init signal handles the Model.__init__() cases.
# Assignment happening outside of Model.__init__() will trigger the
# update right here.
if previous_file is not None:
self.field.update_dimension_fields(instance, force=True)
class ImageFieldFile(ImageFile, FieldFile):
def delete(self, save=True):
# Clear the image dimensions cache
if hasattr(self, '_dimensions_cache'):
del self._dimensions_cache
super(ImageFieldFile, self).delete(save)
class ImageField(FileField):
attr_class = ImageFieldFile
descriptor_class = ImageFileDescriptor
description = _("Image")
def __init__(self, verbose_name=None, name=None, width_field=None,
height_field=None, **kwargs):
self.width_field, self.height_field = width_field, height_field
super(ImageField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(ImageField, self).check(**kwargs)
errors.extend(self._check_image_library_installed())
return errors
def _check_image_library_installed(self):
try:
from PIL import Image # NOQA
except ImportError:
return [
checks.Error(
'Cannot use ImageField because Pillow is not installed.',
hint=('Get Pillow at https://pypi.python.org/pypi/Pillow '
'or run command "pip install pillow".'),
obj=self,
id='fields.E210',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(ImageField, self).deconstruct()
if self.width_field:
kwargs['width_field'] = self.width_field
if self.height_field:
kwargs['height_field'] = self.height_field
return name, path, args, kwargs
def contribute_to_class(self, cls, name):
super(ImageField, self).contribute_to_class(cls, name)
# Attach update_dimension_fields so that dimension fields declared
# after their corresponding image field don't stay cleared by
# Model.__init__, see bug #11196.
# Only run post-initialization dimension update on non-abstract models
if not cls._meta.abstract:
signals.post_init.connect(self.update_dimension_fields, sender=cls)
def update_dimension_fields(self, instance, force=False, *args, **kwargs):
"""
Updates field's width and height fields, if defined.
This method is hooked up to model's post_init signal to update
dimensions after instantiating a model instance. However, dimensions
won't be updated if the dimensions fields are already populated. This
avoids unnecessary recalculation when loading an object from the
database.
Dimensions can be forced to update with force=True, which is how
ImageFileDescriptor.__set__ calls this method.
"""
# Nothing to update if the field doesn't have have dimension fields.
has_dimension_fields = self.width_field or self.height_field
if not has_dimension_fields:
return
# getattr will call the ImageFileDescriptor's __get__ method, which
# coerces the assigned value into an instance of self.attr_class
# (ImageFieldFile in this case).
file = getattr(instance, self.attname)
# Nothing to update if we have no file and not being forced to update.
if not file and not force:
return
dimension_fields_filled = not(
(self.width_field and not getattr(instance, self.width_field))
or (self.height_field and not getattr(instance, self.height_field))
)
# When both dimension fields have values, we are most likely loading
# data from the database or updating an image field that already had
# an image stored. In the first case, we don't want to update the
# dimension fields because we are already getting their values from the
# database. In the second case, we do want to update the dimensions
# fields and will skip this return because force will be True since we
# were called from ImageFileDescriptor.__set__.
if dimension_fields_filled and not force:
return
# file should be an instance of ImageFieldFile or should be None.
if file:
width = file.width
height = file.height
else:
# No file, so clear dimensions fields.
width = None
height = None
# Update the width and height fields.
if self.width_field:
setattr(instance, self.width_field, width)
if self.height_field:
setattr(instance, self.height_field, height)
def formfield(self, **kwargs):
defaults = {'form_class': forms.ImageField}
defaults.update(kwargs)
return super(ImageField, self).formfield(**defaults)
|
mbox/django
|
django/db/models/fields/files.py
|
Python
|
bsd-3-clause
| 18,658
|
from __future__ import division, print_function, absolute_import
from numpy import sqrt, inner, zeros, inf, finfo
from numpy.linalg import norm
from .utils import make_system
__all__ = ['minres']
def minres(A, b, x0=None, shift=0.0, tol=1e-5, maxiter=None,
M=None, callback=None, show=False, check=False):
"""
Use MINimum RESidual iteration to solve Ax=b
MINRES minimizes norm(A*x - b) for a real symmetric matrix A. Unlike
the Conjugate Gradient method, A can be indefinite or singular.
If shift != 0 then the method solves (A - shift*I)x = b
Parameters
----------
A : {sparse matrix, dense matrix, LinearOperator}
The real symmetric N-by-N matrix of the linear system
Alternatively, ``A`` can be a linear operator which can
produce ``Ax`` using, e.g.,
``scipy.sparse.linalg.LinearOperator``.
b : {array, matrix}
Right hand side of the linear system. Has shape (N,) or (N,1).
Returns
-------
x : {array, matrix}
The converged solution.
info : integer
Provides convergence information:
0 : successful exit
>0 : convergence to tolerance not achieved, number of iterations
<0 : illegal input or breakdown
Other Parameters
----------------
x0 : {array, matrix}
Starting guess for the solution.
tol : float
Tolerance to achieve. The algorithm terminates when the relative
residual is below `tol`.
maxiter : integer
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
M : {sparse matrix, dense matrix, LinearOperator}
Preconditioner for A. The preconditioner should approximate the
inverse of A. Effective preconditioning dramatically improves the
rate of convergence, which implies that fewer iterations are needed
to reach a given error tolerance.
callback : function
User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector.
References
----------
Solution of sparse indefinite systems of linear equations,
C. C. Paige and M. A. Saunders (1975),
SIAM J. Numer. Anal. 12(4), pp. 617-629.
https://web.stanford.edu/group/SOL/software/minres/
This file is a translation of the following MATLAB implementation:
https://web.stanford.edu/group/SOL/software/minres/minres-matlab.zip
"""
A, M, x, b, postprocess = make_system(A, M, x0, b)
matvec = A.matvec
psolve = M.matvec
first = 'Enter minres. '
last = 'Exit minres. '
n = A.shape[0]
if maxiter is None:
maxiter = 5 * n
msg = [' beta2 = 0. If M = I, b and x are eigenvectors ', # -1
' beta1 = 0. The exact solution is x0 ', # 0
' A solution to Ax = b was found, given rtol ', # 1
' A least-squares solution was found, given rtol ', # 2
' Reasonable accuracy achieved, given eps ', # 3
' x has converged to an eigenvector ', # 4
' acond has exceeded 0.1/eps ', # 5
' The iteration limit was reached ', # 6
' A does not define a symmetric matrix ', # 7
' M does not define a symmetric matrix ', # 8
' M does not define a pos-def preconditioner '] # 9
if show:
print(first + 'Solution of symmetric Ax = b')
print(first + 'n = %3g shift = %23.14e' % (n,shift))
print(first + 'itnlim = %3g rtol = %11.2e' % (maxiter,tol))
print()
istop = 0
itn = 0
Anorm = 0
Acond = 0
rnorm = 0
ynorm = 0
xtype = x.dtype
eps = finfo(xtype).eps
# Set up y and v for the first Lanczos vector v1.
# y = beta1 P' v1, where P = C**(-1).
# v is really P' v1.
r1 = b - A*x
y = psolve(r1)
beta1 = inner(r1, y)
if beta1 < 0:
raise ValueError('indefinite preconditioner')
elif beta1 == 0:
return (postprocess(x), 0)
beta1 = sqrt(beta1)
if check:
# are these too strict?
# see if A is symmetric
w = matvec(y)
r2 = matvec(w)
s = inner(w,w)
t = inner(y,r2)
z = abs(s - t)
epsa = (s + eps) * eps**(1.0/3.0)
if z > epsa:
raise ValueError('non-symmetric matrix')
# see if M is symmetric
r2 = psolve(y)
s = inner(y,y)
t = inner(r1,r2)
z = abs(s - t)
epsa = (s + eps) * eps**(1.0/3.0)
if z > epsa:
raise ValueError('non-symmetric preconditioner')
# Initialize other quantities
oldb = 0
beta = beta1
dbar = 0
epsln = 0
qrnorm = beta1
phibar = beta1
rhs1 = beta1
rhs2 = 0
tnorm2 = 0
gmax = 0
gmin = finfo(xtype).max
cs = -1
sn = 0
w = zeros(n, dtype=xtype)
w2 = zeros(n, dtype=xtype)
r2 = r1
if show:
print()
print()
print(' Itn x(1) Compatible LS norm(A) cond(A) gbar/|A|')
while itn < maxiter:
itn += 1
s = 1.0/beta
v = s*y
y = matvec(v)
y = y - shift * v
if itn >= 2:
y = y - (beta/oldb)*r1
alfa = inner(v,y)
y = y - (alfa/beta)*r2
r1 = r2
r2 = y
y = psolve(r2)
oldb = beta
beta = inner(r2,y)
if beta < 0:
raise ValueError('non-symmetric matrix')
beta = sqrt(beta)
tnorm2 += alfa**2 + oldb**2 + beta**2
if itn == 1:
if beta/beta1 <= 10*eps:
istop = -1 # Terminate later
# Apply previous rotation Qk-1 to get
# [deltak epslnk+1] = [cs sn][dbark 0 ]
# [gbar k dbar k+1] [sn -cs][alfak betak+1].
oldeps = epsln
delta = cs * dbar + sn * alfa # delta1 = 0 deltak
gbar = sn * dbar - cs * alfa # gbar 1 = alfa1 gbar k
epsln = sn * beta # epsln2 = 0 epslnk+1
dbar = - cs * beta # dbar 2 = beta2 dbar k+1
root = norm([gbar, dbar])
Arnorm = phibar * root
# Compute the next plane rotation Qk
gamma = norm([gbar, beta]) # gammak
gamma = max(gamma, eps)
cs = gbar / gamma # ck
sn = beta / gamma # sk
phi = cs * phibar # phik
phibar = sn * phibar # phibark+1
# Update x.
denom = 1.0/gamma
w1 = w2
w2 = w
w = (v - oldeps*w1 - delta*w2) * denom
x = x + phi*w
# Go round again.
gmax = max(gmax, gamma)
gmin = min(gmin, gamma)
z = rhs1 / gamma
rhs1 = rhs2 - delta*z
rhs2 = - epsln*z
# Estimate various norms and test for convergence.
Anorm = sqrt(tnorm2)
ynorm = norm(x)
epsa = Anorm * eps
epsx = Anorm * ynorm * eps
epsr = Anorm * ynorm * tol
diag = gbar
if diag == 0:
diag = epsa
qrnorm = phibar
rnorm = qrnorm
if ynorm == 0 or Anorm == 0:
test1 = inf
else:
test1 = rnorm / (Anorm*ynorm) # ||r|| / (||A|| ||x||)
if Anorm == 0:
test2 = inf
else:
test2 = root / Anorm # ||Ar|| / (||A|| ||r||)
# Estimate cond(A).
# In this version we look at the diagonals of R in the
# factorization of the lower Hessenberg matrix, Q * H = R,
# where H is the tridiagonal matrix from Lanczos with one
# extra row, beta(k+1) e_k^T.
Acond = gmax/gmin
# See if any of the stopping criteria are satisfied.
# In rare cases, istop is already -1 from above (Abar = const*I).
if istop == 0:
t1 = 1 + test1 # These tests work if tol < eps
t2 = 1 + test2
if t2 <= 1:
istop = 2
if t1 <= 1:
istop = 1
if itn >= maxiter:
istop = 6
if Acond >= 0.1/eps:
istop = 4
if epsx >= beta1:
istop = 3
# if rnorm <= epsx : istop = 2
# if rnorm <= epsr : istop = 1
if test2 <= tol:
istop = 2
if test1 <= tol:
istop = 1
# See if it is time to print something.
prnt = False
if n <= 40:
prnt = True
if itn <= 10:
prnt = True
if itn >= maxiter-10:
prnt = True
if itn % 10 == 0:
prnt = True
if qrnorm <= 10*epsx:
prnt = True
if qrnorm <= 10*epsr:
prnt = True
if Acond <= 1e-2/eps:
prnt = True
if istop != 0:
prnt = True
if show and prnt:
str1 = '%6g %12.5e %10.3e' % (itn, x[0], test1)
str2 = ' %10.3e' % (test2,)
str3 = ' %8.1e %8.1e %8.1e' % (Anorm, Acond, gbar/Anorm)
print(str1 + str2 + str3)
if itn % 10 == 0:
print()
if callback is not None:
callback(x)
if istop != 0:
break # TODO check this
if show:
print()
print(last + ' istop = %3g itn =%5g' % (istop,itn))
print(last + ' Anorm = %12.4e Acond = %12.4e' % (Anorm,Acond))
print(last + ' rnorm = %12.4e ynorm = %12.4e' % (rnorm,ynorm))
print(last + ' Arnorm = %12.4e' % (Arnorm,))
print(last + msg[istop+1])
if istop == 6:
info = maxiter
else:
info = 0
return (postprocess(x),info)
if __name__ == '__main__':
from numpy import zeros, arange
from scipy.linalg import norm
from scipy.sparse import spdiags
n = 10
residuals = []
def cb(x):
residuals.append(norm(b - A*x))
# A = poisson((10,),format='csr')
A = spdiags([arange(1,n+1,dtype=float)], [0], n, n, format='csr')
M = spdiags([1.0/arange(1,n+1,dtype=float)], [0], n, n, format='csr')
A.psolve = M.matvec
b = zeros(A.shape[0])
x = minres(A,b,tol=1e-12,maxiter=None,callback=cb)
# x = cg(A,b,x0=b,tol=1e-12,maxiter=None,callback=cb)[0]
|
gertingold/scipy
|
scipy/sparse/linalg/isolve/minres.py
|
Python
|
bsd-3-clause
| 10,577
|
# -*- coding: utf-8 -*-
from frontera.contrib.scrapy.converters import RequestConverter, ResponseConverter
from scrapy.http.request import Request as ScrapyRequest
from scrapy.http.response import Response as ScrapyResponse
from frontera.core.models import Request as FrontierRequest
class TestSpider(object):
def callback(self):
pass
def errback(self):
pass
def test_request_response_converters():
spider = TestSpider()
rc = RequestConverter(spider)
rsc = ResponseConverter(spider, rc)
url = "http://test.com/test?param=123"
request = ScrapyRequest(url=url, callback=spider.callback, errback=spider.errback)
request.meta['test_param'] = 'test_value'
request.headers.appendlist("TestKey", "test value")
request.cookies['MyCookie'] = 'CookieContent'
frontier_request = rc.to_frontier(request)
assert frontier_request.meta['scrapy_callback'] == 'callback'
assert frontier_request.meta['scrapy_errback'] == 'errback'
assert frontier_request.url == url
assert frontier_request.method == 'GET'
assert frontier_request.headers['Testkey'] == 'test value'
assert frontier_request.cookies['MyCookie'] == 'CookieContent'
assert 'frontier_request' not in frontier_request.meta['scrapy_meta']
request_converted = rc.from_frontier(frontier_request)
assert request_converted.meta['test_param'] == 'test_value'
assert request_converted.url == url
assert request_converted.method == 'GET'
assert request_converted.cookies['MyCookie'] == 'CookieContent'
assert request_converted.headers.get('Testkey') == 'test value'
# Some middleware could change .meta contents
request_converted.meta['middleware_stuff'] = 'appeared'
response = ScrapyResponse(url=url, request=request_converted, headers={'TestHeader': 'Test value'})
frontier_response = rsc.to_frontier(response)
assert frontier_response.meta['scrapy_meta']['test_param'] == 'test_value'
assert frontier_response.meta['scrapy_meta']['middleware_stuff'] == 'appeared'
assert frontier_response.status_code == 200
assert 'frontier_request' not in frontier_response.meta['scrapy_meta']
response_converted = rsc.from_frontier(frontier_response)
assert response_converted.meta['test_param'] == 'test_value'
assert response_converted.url == url
assert response_converted.status == 200
assert response_converted.headers['TestHeader'] == 'Test value'
frontier_request = FrontierRequest(url)
request_converted = rc.from_frontier(frontier_request)
assert frontier_request.url == url
|
pombredanne/frontera
|
frontera/tests/test_scrapy.py
|
Python
|
bsd-3-clause
| 2,597
|
import os
import re
import sys
import imp
import copy
import glob
import atexit
import tempfile
import subprocess
import shutil
from distutils.errors import DistutilsError
try:
set
except NameError:
from sets import Set as set
from numpy.distutils.compat import get_exception
__all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict',
'dict_append', 'appendpath', 'generate_config_py',
'get_cmd', 'allpath', 'get_mathlibs',
'terminal_has_colors', 'red_text', 'green_text', 'yellow_text',
'blue_text', 'cyan_text', 'cyg2win32','mingw32','all_strings',
'has_f_sources', 'has_cxx_sources', 'filter_sources',
'get_dependencies', 'is_local_src_dir', 'get_ext_source_files',
'get_script_files', 'get_lib_source_files', 'get_data_files',
'dot_join', 'get_frame', 'minrelpath','njoin',
'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language',
'quote_args', 'get_build_architecture', 'get_info', 'get_pkg_info']
class InstallableLib:
"""
Container to hold information on an installable library.
Parameters
----------
name : str
Name of the installed library.
build_info : dict
Dictionary holding build information.
target_dir : str
Absolute path specifying where to install the library.
See Also
--------
Configuration.add_installed_library
Notes
-----
The three parameters are stored as attributes with the same names.
"""
def __init__(self, name, build_info, target_dir):
self.name = name
self.build_info = build_info
self.target_dir = target_dir
def quote_args(args):
# don't used _nt_quote_args as it does not check if
# args items already have quotes or not.
args = list(args)
for i in range(len(args)):
a = args[i]
if ' ' in a and a[0] not in '"\'':
args[i] = '"%s"' % (a)
return args
def allpath(name):
"Convert a /-separated pathname to one using the OS's path separator."
splitted = name.split('/')
return os.path.join(*splitted)
def rel_path(path, parent_path):
"""Return path relative to parent_path.
"""
pd = os.path.abspath(parent_path)
apath = os.path.abspath(path)
if len(apath)<len(pd):
return path
if apath==pd:
return ''
if pd == apath[:len(pd)]:
assert apath[len(pd)] in [os.sep],repr((path,apath[len(pd)]))
path = apath[len(pd)+1:]
return path
def get_path_from_frame(frame, parent_path=None):
"""Return path of the module given a frame object from the call stack.
Returned path is relative to parent_path when given,
otherwise it is absolute path.
"""
# First, try to find if the file name is in the frame.
try:
caller_file = eval('__file__', frame.f_globals, frame.f_locals)
d = os.path.dirname(os.path.abspath(caller_file))
except NameError:
# __file__ is not defined, so let's try __name__. We try this second
# because setuptools spoofs __name__ to be '__main__' even though
# sys.modules['__main__'] might be something else, like easy_install(1).
caller_name = eval('__name__', frame.f_globals, frame.f_locals)
__import__(caller_name)
mod = sys.modules[caller_name]
if hasattr(mod, '__file__'):
d = os.path.dirname(os.path.abspath(mod.__file__))
else:
# we're probably running setup.py as execfile("setup.py")
# (likely we're building an egg)
d = os.path.abspath('.')
# hmm, should we use sys.argv[0] like in __builtin__ case?
if parent_path is not None:
d = rel_path(d, parent_path)
return d or '.'
def njoin(*path):
"""Join two or more pathname components +
- convert a /-separated pathname to one using the OS's path separator.
- resolve `..` and `.` from path.
Either passing n arguments as in njoin('a','b'), or a sequence
of n names as in njoin(['a','b']) is handled, or a mixture of such arguments.
"""
paths = []
for p in path:
if is_sequence(p):
# njoin(['a', 'b'], 'c')
paths.append(njoin(*p))
else:
assert is_string(p)
paths.append(p)
path = paths
if not path:
# njoin()
joined = ''
else:
# njoin('a', 'b')
joined = os.path.join(*path)
if os.path.sep != '/':
joined = joined.replace('/',os.path.sep)
return minrelpath(joined)
def get_mathlibs(path=None):
"""Return the MATHLIB line from numpyconfig.h
"""
if path is not None:
config_file = os.path.join(path,'_numpyconfig.h')
else:
# Look for the file in each of the numpy include directories.
dirs = get_numpy_include_dirs()
for path in dirs:
fn = os.path.join(path,'_numpyconfig.h')
if os.path.exists(fn):
config_file = fn
break
else:
raise DistutilsError('_numpyconfig.h not found in numpy include '
'dirs %r' % (dirs,))
fid = open(config_file)
mathlibs = []
s = '#define MATHLIB'
for line in fid.readlines():
if line.startswith(s):
value = line[len(s):].strip()
if value:
mathlibs.extend(value.split(','))
fid.close()
return mathlibs
def minrelpath(path):
"""Resolve `..` and '.' from path.
"""
if not is_string(path):
return path
if '.' not in path:
return path
l = path.split(os.sep)
while l:
try:
i = l.index('.',1)
except ValueError:
break
del l[i]
j = 1
while l:
try:
i = l.index('..',j)
except ValueError:
break
if l[i-1]=='..':
j += 1
else:
del l[i],l[i-1]
j = 1
if not l:
return ''
return os.sep.join(l)
def _fix_paths(paths,local_path,include_non_existing):
assert is_sequence(paths), repr(type(paths))
new_paths = []
assert not is_string(paths),repr(paths)
for n in paths:
if is_string(n):
if '*' in n or '?' in n:
p = glob.glob(n)
p2 = glob.glob(njoin(local_path,n))
if p2:
new_paths.extend(p2)
elif p:
new_paths.extend(p)
else:
if include_non_existing:
new_paths.append(n)
print('could not resolve pattern in %r: %r' \
% (local_path,n))
else:
n2 = njoin(local_path,n)
if os.path.exists(n2):
new_paths.append(n2)
else:
if os.path.exists(n):
new_paths.append(n)
elif include_non_existing:
new_paths.append(n)
if not os.path.exists(n):
print('non-existing path in %r: %r' \
% (local_path,n))
elif is_sequence(n):
new_paths.extend(_fix_paths(n,local_path,include_non_existing))
else:
new_paths.append(n)
return [minrelpath(p) for p in new_paths]
def gpaths(paths, local_path='', include_non_existing=True):
"""Apply glob to paths and prepend local_path if needed.
"""
if is_string(paths):
paths = (paths,)
return _fix_paths(paths,local_path, include_non_existing)
_temporary_directory = None
def clean_up_temporary_directory():
from numpy.distutils import log
global _temporary_directory
if not _temporary_directory:
return
log.debug('removing %s', _temporary_directory)
try:
shutil.rmtree(_temporary_directory)
except OSError:
pass
_temporary_directory = None
def make_temp_file(suffix='', prefix='', text=True):
global _temporary_directory
if not _temporary_directory:
_temporary_directory = tempfile.mkdtemp()
atexit.register(clean_up_temporary_directory)
fid, name = tempfile.mkstemp(suffix=suffix,
prefix=prefix,
dir=_temporary_directory,
text=text)
fo = os.fdopen(fid, 'w')
return fo, name
# Hooks for colored terminal output.
# See also http://www.livinglogic.de/Python/ansistyle
def terminal_has_colors():
if sys.platform=='cygwin' and 'USE_COLOR' not in os.environ:
# Avoid importing curses that causes illegal operation
# with a message:
# PYTHON2 caused an invalid page fault in
# module CYGNURSES7.DLL as 015f:18bbfc28
# Details: Python 2.3.3 [GCC 3.3.1 (cygming special)]
# ssh to Win32 machine from debian
# curses.version is 2.2
# CYGWIN_98-4.10, release 1.5.7(0.109/3/2))
return 0
if hasattr(sys.stdout,'isatty') and sys.stdout.isatty():
try:
import curses
curses.setupterm()
if (curses.tigetnum("colors") >= 0
and curses.tigetnum("pairs") >= 0
and ((curses.tigetstr("setf") is not None
and curses.tigetstr("setb") is not None)
or (curses.tigetstr("setaf") is not None
and curses.tigetstr("setab") is not None)
or curses.tigetstr("scp") is not None)):
return 1
except Exception:
pass
return 0
if terminal_has_colors():
_colour_codes = dict(black=0, red=1, green=2, yellow=3,
blue=4, magenta=5, cyan=6, white=7, default=9)
def colour_text(s, fg=None, bg=None, bold=False):
seq = []
if bold:
seq.append('1')
if fg:
fgcode = 30 + _colour_codes.get(fg.lower(), 0)
seq.append(str(fgcode))
if bg:
bgcode = 40 + _colour_codes.get(fg.lower(), 7)
seq.append(str(bgcode))
if seq:
return '\x1b[%sm%s\x1b[0m' % (';'.join(seq), s)
else:
return s
else:
def colour_text(s, fg=None, bg=None):
return s
def default_text(s):
return colour_text(s, 'default')
def red_text(s):
return colour_text(s, 'red')
def green_text(s):
return colour_text(s, 'green')
def yellow_text(s):
return colour_text(s, 'yellow')
def cyan_text(s):
return colour_text(s, 'cyan')
def blue_text(s):
return colour_text(s, 'blue')
#########################
def cyg2win32(path):
if sys.platform=='cygwin' and path.startswith('/cygdrive'):
path = path[10] + ':' + os.path.normcase(path[11:])
return path
def mingw32():
"""Return true when using mingw32 environment.
"""
if sys.platform=='win32':
if os.environ.get('OSTYPE','')=='msys':
return True
if os.environ.get('MSYSTEM','')=='MINGW32':
return True
return False
def msvc_runtime_library():
"Return name of MSVC runtime library if Python was built with MSVC >= 7"
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = sys.version[msc_pos+6:msc_pos+10]
lib = {'1300' : 'msvcr70', # MSVC 7.0
'1310' : 'msvcr71', # MSVC 7.1
'1400' : 'msvcr80', # MSVC 8
'1500' : 'msvcr90', # MSVC 9 (VS 2008)
}.get(msc_ver, None)
else:
lib = None
return lib
def msvc_on_amd64():
if not (sys.platform=='win32' or os.name=='nt'):
return
if get_build_architecture() != 'AMD64':
return
if 'DISTUTILS_USE_SDK' in os.environ:
return
# try to avoid _MSVCCompiler__root attribute error
print('Forcing DISTUTILS_USE_SDK=1')
os.environ['DISTUTILS_USE_SDK']='1'
return
#########################
#XXX need support for .C that is also C++
cxx_ext_match = re.compile(r'.*[.](cpp|cxx|cc)\Z',re.I).match
fortran_ext_match = re.compile(r'.*[.](f90|f95|f77|for|ftn|f)\Z',re.I).match
f90_ext_match = re.compile(r'.*[.](f90|f95)\Z',re.I).match
f90_module_name_match = re.compile(r'\s*module\s*(?P<name>[\w_]+)',re.I).match
def _get_f90_modules(source):
"""Return a list of Fortran f90 module names that
given source file defines.
"""
if not f90_ext_match(source):
return []
modules = []
f = open(source,'r')
f_readlines = getattr(f,'xreadlines',f.readlines)
for line in f_readlines():
m = f90_module_name_match(line)
if m:
name = m.group('name')
modules.append(name)
# break # XXX can we assume that there is one module per file?
f.close()
return modules
def is_string(s):
return isinstance(s, str)
def all_strings(lst):
"""Return True if all items in lst are string objects. """
for item in lst:
if not is_string(item):
return False
return True
def is_sequence(seq):
if is_string(seq):
return False
try:
len(seq)
except:
return False
return True
def is_glob_pattern(s):
return is_string(s) and ('*' in s or '?' is s)
def as_list(seq):
if is_sequence(seq):
return list(seq)
else:
return [seq]
def get_language(sources):
# not used in numpy/scipy packages, use build_ext.detect_language instead
"""Determine language value (c,f77,f90) from sources """
language = None
for source in sources:
if isinstance(source, str):
if f90_ext_match(source):
language = 'f90'
break
elif fortran_ext_match(source):
language = 'f77'
return language
def has_f_sources(sources):
"""Return True if sources contains Fortran files """
for source in sources:
if fortran_ext_match(source):
return True
return False
def has_cxx_sources(sources):
"""Return True if sources contains C++ files """
for source in sources:
if cxx_ext_match(source):
return True
return False
def filter_sources(sources):
"""Return four lists of filenames containing
C, C++, Fortran, and Fortran 90 module sources,
respectively.
"""
c_sources = []
cxx_sources = []
f_sources = []
fmodule_sources = []
for source in sources:
if fortran_ext_match(source):
modules = _get_f90_modules(source)
if modules:
fmodule_sources.append(source)
else:
f_sources.append(source)
elif cxx_ext_match(source):
cxx_sources.append(source)
else:
c_sources.append(source)
return c_sources, cxx_sources, f_sources, fmodule_sources
def _get_headers(directory_list):
# get *.h files from list of directories
headers = []
for d in directory_list:
head = glob.glob(os.path.join(d,"*.h")) #XXX: *.hpp files??
headers.extend(head)
return headers
def _get_directories(list_of_sources):
# get unique directories from list of sources.
direcs = []
for f in list_of_sources:
d = os.path.split(f)
if d[0] != '' and not d[0] in direcs:
direcs.append(d[0])
return direcs
def get_dependencies(sources):
#XXX scan sources for include statements
return _get_headers(_get_directories(sources))
def is_local_src_dir(directory):
"""Return true if directory is local directory.
"""
if not is_string(directory):
return False
abs_dir = os.path.abspath(directory)
c = os.path.commonprefix([os.getcwd(),abs_dir])
new_dir = abs_dir[len(c):].split(os.sep)
if new_dir and not new_dir[0]:
new_dir = new_dir[1:]
if new_dir and new_dir[0]=='build':
return False
new_dir = os.sep.join(new_dir)
return os.path.isdir(new_dir)
def general_source_files(top_path):
pruned_directories = {'CVS':1, '.svn':1, 'build':1}
prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$')
for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):
pruned = [ d for d in dirnames if d not in pruned_directories ]
dirnames[:] = pruned
for f in filenames:
if not prune_file_pat.search(f):
yield os.path.join(dirpath, f)
def general_source_directories_files(top_path):
"""Return a directory name relative to top_path and
files contained.
"""
pruned_directories = ['CVS','.svn','build']
prune_file_pat = re.compile(r'(?:[~#]|\.py[co]|\.o)$')
for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):
pruned = [ d for d in dirnames if d not in pruned_directories ]
dirnames[:] = pruned
for d in dirnames:
dpath = os.path.join(dirpath, d)
rpath = rel_path(dpath, top_path)
files = []
for f in os.listdir(dpath):
fn = os.path.join(dpath,f)
if os.path.isfile(fn) and not prune_file_pat.search(fn):
files.append(fn)
yield rpath, files
dpath = top_path
rpath = rel_path(dpath, top_path)
filenames = [os.path.join(dpath,f) for f in os.listdir(dpath) \
if not prune_file_pat.search(f)]
files = [f for f in filenames if os.path.isfile(f)]
yield rpath, files
def get_ext_source_files(ext):
# Get sources and any include files in the same directory.
filenames = []
sources = filter(is_string, ext.sources)
filenames.extend(sources)
filenames.extend(get_dependencies(sources))
for d in ext.depends:
if is_local_src_dir(d):
filenames.extend(list(general_source_files(d)))
elif os.path.isfile(d):
filenames.append(d)
return filenames
def get_script_files(scripts):
scripts = filter(is_string, scripts)
return scripts
def get_lib_source_files(lib):
filenames = []
sources = lib[1].get('sources',[])
sources = filter(is_string, sources)
filenames.extend(sources)
filenames.extend(get_dependencies(sources))
depends = lib[1].get('depends',[])
for d in depends:
if is_local_src_dir(d):
filenames.extend(list(general_source_files(d)))
elif os.path.isfile(d):
filenames.append(d)
return filenames
def get_data_files(data):
if is_string(data):
return [data]
sources = data[1]
filenames = []
for s in sources:
if hasattr(s, '__call__'):
continue
if is_local_src_dir(s):
filenames.extend(list(general_source_files(s)))
elif is_string(s):
if os.path.isfile(s):
filenames.append(s)
else:
print('Not existing data file:',s)
else:
raise TypeError(repr(s))
return filenames
def dot_join(*args):
return '.'.join([a for a in args if a])
def get_frame(level=0):
"""Return frame object from call stack with given level.
"""
try:
return sys._getframe(level+1)
except AttributeError:
frame = sys.exc_info()[2].tb_frame
for _ in range(level+1):
frame = frame.f_back
return frame
class SconsInfo(object):
"""
Container object holding build info for building a package with scons.
Parameters
----------
scons_path : str or None
Path to scons script, relative to the directory of setup.py.
If None, no scons script is specified. This can be useful to add only
pre- and post-hooks to a configuration.
parent_name : str or None
Name of the parent package (for example "numpy").
pre_hook : sequence of callables or None
Callables that are executed before scons is invoked.
Each callable should be defined as ``callable(*args, **kw)``.
post_hook : sequence of callables or None
Callables that are executed after scons is invoked.
Each callable should be defined as ``callable(*args, **kw)``.
source_files : list of str or None
List of paths to source files, relative to the directory of setup.py.
pkg_path : str or None
Path to the package for which the `SconsInfo` instance holds the
build info, relative to the directory of setup.py.
Notes
-----
All parameters are available as attributes of a `SconsInfo` instance.
"""
def __init__(self, scons_path, parent_name, pre_hook,
post_hook, source_files, pkg_path):
self.scons_path = scons_path
self.parent_name = parent_name
self.pre_hook = pre_hook
self.post_hook = post_hook
self.source_files = source_files
if pkg_path:
self.pkg_path = pkg_path
else:
if scons_path:
self.pkg_path = os.path.dirname(scons_path)
else:
self.pkg_path = ''
######################
class Configuration(object):
_list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs',
'libraries', 'headers', 'scripts', 'py_modules', 'scons_data',
'installed_libraries']
_dict_keys = ['package_dir', 'installed_pkg_config']
_extra_keys = ['name', 'version']
numpy_include_dirs = []
def __init__(self,
package_name=None,
parent_name=None,
top_path=None,
package_path=None,
caller_level=1,
setup_name='setup.py',
**attrs):
"""Construct configuration instance of a package.
package_name -- name of the package
Ex.: 'distutils'
parent_name -- name of the parent package
Ex.: 'numpy'
top_path -- directory of the toplevel package
Ex.: the directory where the numpy package source sits
package_path -- directory of package. Will be computed by magic from the
directory of the caller module if not specified
Ex.: the directory where numpy.distutils is
caller_level -- frame level to caller namespace, internal parameter.
"""
self.name = dot_join(parent_name, package_name)
self.version = None
caller_frame = get_frame(caller_level)
self.local_path = get_path_from_frame(caller_frame, top_path)
# local_path -- directory of a file (usually setup.py) that
# defines a configuration() function.
# local_path -- directory of a file (usually setup.py) that
# defines a configuration() function.
if top_path is None:
top_path = self.local_path
self.local_path = ''
if package_path is None:
package_path = self.local_path
elif os.path.isdir(njoin(self.local_path,package_path)):
package_path = njoin(self.local_path,package_path)
if not os.path.isdir(package_path or '.'):
raise ValueError("%r is not a directory" % (package_path,))
self.top_path = top_path
self.package_path = package_path
# this is the relative path in the installed package
self.path_in_package = os.path.join(*self.name.split('.'))
self.list_keys = self._list_keys[:]
self.dict_keys = self._dict_keys[:]
for n in self.list_keys:
v = copy.copy(attrs.get(n, []))
setattr(self, n, as_list(v))
for n in self.dict_keys:
v = copy.copy(attrs.get(n, {}))
setattr(self, n, v)
known_keys = self.list_keys + self.dict_keys
self.extra_keys = self._extra_keys[:]
for n in attrs.keys():
if n in known_keys:
continue
a = attrs[n]
setattr(self,n,a)
if isinstance(a, list):
self.list_keys.append(n)
elif isinstance(a, dict):
self.dict_keys.append(n)
else:
self.extra_keys.append(n)
if os.path.exists(njoin(package_path,'__init__.py')):
self.packages.append(self.name)
self.package_dir[self.name] = package_path
self.options = dict(
ignore_setup_xxx_py = False,
assume_default_configuration = False,
delegate_options_to_subpackages = False,
quiet = False,
)
caller_instance = None
for i in range(1,3):
try:
f = get_frame(i)
except ValueError:
break
try:
caller_instance = eval('self',f.f_globals,f.f_locals)
break
except NameError:
pass
if isinstance(caller_instance, self.__class__):
if caller_instance.options['delegate_options_to_subpackages']:
self.set_options(**caller_instance.options)
self.setup_name = setup_name
def todict(self):
"""
Return a dictionary compatible with the keyword arguments of distutils
setup function.
Examples
--------
>>> setup(**config.todict()) #doctest: +SKIP
"""
self._optimize_data_files()
d = {}
known_keys = self.list_keys + self.dict_keys + self.extra_keys
for n in known_keys:
a = getattr(self,n)
if a:
d[n] = a
return d
def info(self, message):
if not self.options['quiet']:
print(message)
def warn(self, message):
sys.stderr.write('Warning: %s' % (message,))
def set_options(self, **options):
"""
Configure Configuration instance.
The following options are available:
- ignore_setup_xxx_py
- assume_default_configuration
- delegate_options_to_subpackages
- quiet
"""
for key, value in options.items():
if key in self.options:
self.options[key] = value
else:
raise ValueError('Unknown option: '+key)
def get_distribution(self):
"""Return the distutils distribution object for self."""
from numpy.distutils.core import get_distribution
return get_distribution()
def _wildcard_get_subpackage(self, subpackage_name,
parent_name,
caller_level = 1):
l = subpackage_name.split('.')
subpackage_path = njoin([self.local_path]+l)
dirs = filter(os.path.isdir,glob.glob(subpackage_path))
config_list = []
for d in dirs:
if not os.path.isfile(njoin(d,'__init__.py')):
continue
if 'build' in d.split(os.sep):
continue
n = '.'.join(d.split(os.sep)[-len(l):])
c = self.get_subpackage(n,
parent_name = parent_name,
caller_level = caller_level+1)
config_list.extend(c)
return config_list
def _get_configuration_from_setup_py(self, setup_py,
subpackage_name,
subpackage_path,
parent_name,
caller_level = 1):
# In case setup_py imports local modules:
sys.path.insert(0,os.path.dirname(setup_py))
try:
fo_setup_py = open(setup_py, 'U')
setup_name = os.path.splitext(os.path.basename(setup_py))[0]
n = dot_join(self.name,subpackage_name,setup_name)
setup_module = imp.load_module('_'.join(n.split('.')),
fo_setup_py,
setup_py,
('.py', 'U', 1))
fo_setup_py.close()
if not hasattr(setup_module,'configuration'):
if not self.options['assume_default_configuration']:
self.warn('Assuming default configuration '\
'(%s does not define configuration())'\
% (setup_module))
config = Configuration(subpackage_name, parent_name,
self.top_path, subpackage_path,
caller_level = caller_level + 1)
else:
pn = dot_join(*([parent_name] + subpackage_name.split('.')[:-1]))
args = (pn,)
def fix_args_py2(args):
if setup_module.configuration.func_code.co_argcount > 1:
args = args + (self.top_path,)
return args
def fix_args_py3(args):
if setup_module.configuration.__code__.co_argcount > 1:
args = args + (self.top_path,)
return args
if sys.version_info[0] < 3:
args = fix_args_py2(args)
else:
args = fix_args_py3(args)
config = setup_module.configuration(*args)
if config.name!=dot_join(parent_name,subpackage_name):
self.warn('Subpackage %r configuration returned as %r' % \
(dot_join(parent_name,subpackage_name), config.name))
finally:
del sys.path[0]
return config
def get_subpackage(self,subpackage_name,
subpackage_path=None,
parent_name=None,
caller_level = 1):
"""Return list of subpackage configurations.
Parameters
----------
subpackage_name: str,None
Name of the subpackage to get the configuration. '*' in
subpackage_name is handled as a wildcard.
subpackage_path: str
If None, then the path is assumed to be the local path plus the
subpackage_name. If a setup.py file is not found in the
subpackage_path, then a default configuration is used.
parent_name: str
Parent name.
"""
if subpackage_name is None:
if subpackage_path is None:
raise ValueError(
"either subpackage_name or subpackage_path must be specified")
subpackage_name = os.path.basename(subpackage_path)
# handle wildcards
l = subpackage_name.split('.')
if subpackage_path is None and '*' in subpackage_name:
return self._wildcard_get_subpackage(subpackage_name,
parent_name,
caller_level = caller_level+1)
assert '*' not in subpackage_name,repr((subpackage_name, subpackage_path,parent_name))
if subpackage_path is None:
subpackage_path = njoin([self.local_path] + l)
else:
subpackage_path = njoin([subpackage_path] + l[:-1])
subpackage_path = self.paths([subpackage_path])[0]
setup_py = njoin(subpackage_path, self.setup_name)
if not self.options['ignore_setup_xxx_py']:
if not os.path.isfile(setup_py):
setup_py = njoin(subpackage_path,
'setup_%s.py' % (subpackage_name))
if not os.path.isfile(setup_py):
if not self.options['assume_default_configuration']:
self.warn('Assuming default configuration '\
'(%s/{setup_%s,setup}.py was not found)' \
% (os.path.dirname(setup_py), subpackage_name))
config = Configuration(subpackage_name, parent_name,
self.top_path, subpackage_path,
caller_level = caller_level+1)
else:
config = self._get_configuration_from_setup_py(
setup_py,
subpackage_name,
subpackage_path,
parent_name,
caller_level = caller_level + 1)
if config:
return [config]
else:
return []
def add_subpackage(self,subpackage_name,
subpackage_path=None,
standalone = False):
"""Add a sub-package to the current Configuration instance.
This is useful in a setup.py script for adding sub-packages to a
package.
Parameters
----------
subpackage_name: str
name of the subpackage
subpackage_path: str
if given, the subpackage path such as the subpackage is in
subpackage_path / subpackage_name. If None,the subpackage is
assumed to be located in the local path / subpackage_name.
standalone: bool
"""
if standalone:
parent_name = None
else:
parent_name = self.name
config_list = self.get_subpackage(subpackage_name,subpackage_path,
parent_name = parent_name,
caller_level = 2)
if not config_list:
self.warn('No configuration returned, assuming unavailable.')
for config in config_list:
d = config
if isinstance(config, Configuration):
d = config.todict()
assert isinstance(d,dict),repr(type(d))
self.info('Appending %s configuration to %s' \
% (d.get('name'), self.name))
self.dict_append(**d)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add a subpackage '+ subpackage_name)
def add_data_dir(self,data_path):
"""Recursively add files under data_path to data_files list.
Recursively add files under data_path to the list of data_files to be
installed (and distributed). The data_path can be either a relative
path-name, or an absolute path-name, or a 2-tuple where the first
argument shows where in the install directory the data directory
should be installed to.
Parameters
----------
data_path: seq,str
Argument can be either
* 2-sequence (<datadir suffix>,<path to data directory>)
* path to data directory where python datadir suffix defaults
to package dir.
Notes
-----
Rules for installation paths:
foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar
(gun, foo/bar) -> parent/gun
foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b
(gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun
(gun/*, foo/*) -> parent/gun/a, parent/gun/b
/foo/bar -> (bar, /foo/bar) -> parent/bar
(gun, /foo/bar) -> parent/gun
(fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar
Examples
--------
For example suppose the source directory contains fun/foo.dat and
fun/bar/car.dat::
>>> self.add_data_dir('fun') #doctest: +SKIP
>>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP
>>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP
Will install data-files to the locations::
<package install directory>/
fun/
foo.dat
bar/
car.dat
sun/
foo.dat
bar/
car.dat
gun/
foo.dat
car.dat
"""
if is_sequence(data_path):
d, data_path = data_path
else:
d = None
if is_sequence(data_path):
[self.add_data_dir((d,p)) for p in data_path]
return
if not is_string(data_path):
raise TypeError("not a string: %r" % (data_path,))
if d is None:
if os.path.isabs(data_path):
return self.add_data_dir((os.path.basename(data_path), data_path))
return self.add_data_dir((data_path, data_path))
paths = self.paths(data_path, include_non_existing=False)
if is_glob_pattern(data_path):
if is_glob_pattern(d):
pattern_list = allpath(d).split(os.sep)
pattern_list.reverse()
# /a/*//b/ -> /a/*/b
rl = range(len(pattern_list)-1); rl.reverse()
for i in rl:
if not pattern_list[i]:
del pattern_list[i]
#
for path in paths:
if not os.path.isdir(path):
print('Not a directory, skipping',path)
continue
rpath = rel_path(path, self.local_path)
path_list = rpath.split(os.sep)
path_list.reverse()
target_list = []
i = 0
for s in pattern_list:
if is_glob_pattern(s):
if i>=len(path_list):
raise ValueError('cannot fill pattern %r with %r' \
% (d, path))
target_list.append(path_list[i])
else:
assert s==path_list[i],repr((s,path_list[i],data_path,d,path,rpath))
target_list.append(s)
i += 1
if path_list[i:]:
self.warn('mismatch of pattern_list=%s and path_list=%s'\
% (pattern_list,path_list))
target_list.reverse()
self.add_data_dir((os.sep.join(target_list),path))
else:
for path in paths:
self.add_data_dir((d,path))
return
assert not is_glob_pattern(d),repr(d)
dist = self.get_distribution()
if dist is not None and dist.data_files is not None:
data_files = dist.data_files
else:
data_files = self.data_files
for path in paths:
for d1,f in list(general_source_directories_files(path)):
target_path = os.path.join(self.path_in_package,d,d1)
data_files.append((target_path, f))
def _optimize_data_files(self):
data_dict = {}
for p,files in self.data_files:
if p not in data_dict:
data_dict[p] = set()
for f in files:
data_dict[p].add(f)
self.data_files[:] = [(p,list(files)) for p,files in data_dict.items()]
def add_data_files(self,*files):
"""Add data files to configuration data_files.
Parameters
----------
files: sequence
Argument(s) can be either
* 2-sequence (<datadir prefix>,<path to data file(s)>)
* paths to data files where python datadir prefix defaults
to package dir.
Notes
-----
The form of each element of the files sequence is very flexible
allowing many combinations of where to get the files from the package
and where they should ultimately be installed on the system. The most
basic usage is for an element of the files argument sequence to be a
simple filename. This will cause that file from the local path to be
installed to the installation path of the self.name package (package
path). The file argument can also be a relative path in which case the
entire relative path will be installed into the package directory.
Finally, the file can be an absolute path name in which case the file
will be found at the absolute path name but installed to the package
path.
This basic behavior can be augmented by passing a 2-tuple in as the
file argument. The first element of the tuple should specify the
relative path (under the package install directory) where the
remaining sequence of files should be installed to (it has nothing to
do with the file-names in the source distribution). The second element
of the tuple is the sequence of files that should be installed. The
files in this sequence can be filenames, relative paths, or absolute
paths. For absolute paths the file will be installed in the top-level
package installation directory (regardless of the first argument).
Filenames and relative path names will be installed in the package
install directory under the path name given as the first element of
the tuple.
Rules for installation paths:
#. file.txt -> (., file.txt)-> parent/file.txt
#. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt
#. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt
#. *.txt -> parent/a.txt, parent/b.txt
#. foo/*.txt -> parent/foo/a.txt, parent/foo/b.txt
#. */*.txt -> (*, */*.txt) -> parent/c/a.txt, parent/d/b.txt
#. (sun, file.txt) -> parent/sun/file.txt
#. (sun, bar/file.txt) -> parent/sun/file.txt
#. (sun, /foo/bar/file.txt) -> parent/sun/file.txt
#. (sun, *.txt) -> parent/sun/a.txt, parent/sun/b.txt
#. (sun, bar/*.txt) -> parent/sun/a.txt, parent/sun/b.txt
#. (sun/*, */*.txt) -> parent/sun/c/a.txt, parent/d/b.txt
An additional feature is that the path to a data-file can actually be
a function that takes no arguments and returns the actual path(s) to
the data-files. This is useful when the data files are generated while
building the package.
Examples
--------
Add files to the list of data_files to be included with the package.
>>> self.add_data_files('foo.dat',
... ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']),
... 'bar/cat.dat',
... '/full/path/to/can.dat') #doctest: +SKIP
will install these data files to::
<package install directory>/
foo.dat
fun/
gun.dat
nun/
pun.dat
sun.dat
bar/
car.dat
can.dat
where <package install directory> is the package (or sub-package)
directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C:
\\Python2.4 \\Lib \\site-packages \\mypackage') or
'/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C:
\\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage').
"""
if len(files)>1:
for f in files:
self.add_data_files(f)
return
assert len(files)==1
if is_sequence(files[0]):
d,files = files[0]
else:
d = None
if is_string(files):
filepat = files
elif is_sequence(files):
if len(files)==1:
filepat = files[0]
else:
for f in files:
self.add_data_files((d,f))
return
else:
raise TypeError(repr(type(files)))
if d is None:
if hasattr(filepat, '__call__'):
d = ''
elif os.path.isabs(filepat):
d = ''
else:
d = os.path.dirname(filepat)
self.add_data_files((d,files))
return
paths = self.paths(filepat, include_non_existing=False)
if is_glob_pattern(filepat):
if is_glob_pattern(d):
pattern_list = d.split(os.sep)
pattern_list.reverse()
for path in paths:
path_list = path.split(os.sep)
path_list.reverse()
path_list.pop() # filename
target_list = []
i = 0
for s in pattern_list:
if is_glob_pattern(s):
target_list.append(path_list[i])
i += 1
else:
target_list.append(s)
target_list.reverse()
self.add_data_files((os.sep.join(target_list), path))
else:
self.add_data_files((d,paths))
return
assert not is_glob_pattern(d),repr((d,filepat))
dist = self.get_distribution()
if dist is not None and dist.data_files is not None:
data_files = dist.data_files
else:
data_files = self.data_files
data_files.append((os.path.join(self.path_in_package,d),paths))
### XXX Implement add_py_modules
def add_include_dirs(self,*paths):
"""Add paths to configuration include directories.
Add the given sequence of paths to the beginning of the include_dirs
list. This list will be visible to all extension modules of the
current package.
"""
include_dirs = self.paths(paths)
dist = self.get_distribution()
if dist is not None:
if dist.include_dirs is None:
dist.include_dirs = []
dist.include_dirs.extend(include_dirs)
else:
self.include_dirs.extend(include_dirs)
def add_numarray_include_dirs(self):
import numpy.numarray.util as nnu
self.add_include_dirs(*nnu.get_numarray_include_dirs())
def add_headers(self,*files):
"""Add installable headers to configuration.
Add the given sequence of files to the beginning of the headers list.
By default, headers will be installed under <python-
include>/<self.name.replace('.','/')>/ directory. If an item of files
is a tuple, then its first argument specifies the actual installation
location relative to the <python-include> path.
Parameters
----------
files: str, seq
Argument(s) can be either:
* 2-sequence (<includedir suffix>,<path to header file(s)>)
* path(s) to header file(s) where python includedir suffix will
default to package name.
"""
headers = []
for path in files:
if is_string(path):
[headers.append((self.name,p)) for p in self.paths(path)]
else:
if not isinstance(path, (tuple, list)) or len(path) != 2:
raise TypeError(repr(path))
[headers.append((path[0],p)) for p in self.paths(path[1])]
dist = self.get_distribution()
if dist is not None:
if dist.headers is None:
dist.headers = []
dist.headers.extend(headers)
else:
self.headers.extend(headers)
def paths(self,*paths,**kws):
"""Apply glob to paths and prepend local_path if needed.
Applies glob.glob(...) to each path in the sequence (if needed) and
pre-pends the local_path if needed. Because this is called on all
source lists, this allows wildcard characters to be specified in lists
of sources for extension modules and libraries and scripts and allows
path-names be relative to the source directory.
"""
include_non_existing = kws.get('include_non_existing',True)
return gpaths(paths,
local_path = self.local_path,
include_non_existing=include_non_existing)
def _fix_paths_dict(self,kw):
for k in kw.keys():
v = kw[k]
if k in ['sources','depends','include_dirs','library_dirs',
'module_dirs','extra_objects']:
new_v = self.paths(v)
kw[k] = new_v
def add_extension(self,name,sources,**kw):
"""Add extension to configuration.
Create and add an Extension instance to the ext_modules list. This
method also takes the following optional keyword arguments that are
passed on to the Extension constructor.
Parameters
----------
name: str
name of the extension
sources: seq
list of the sources. The list of sources may contain functions
(called source generators) which must take an extension instance
and a build directory as inputs and return a source file or list of
source files or None. If None is returned then no sources are
generated. If the Extension instance has no sources after
processing all source generators, then no extension module is
built.
include_dirs:
define_macros:
undef_macros:
library_dirs:
libraries:
runtime_library_dirs:
extra_objects:
extra_compile_args:
extra_link_args:
export_symbols:
swig_opts:
depends:
The depends list contains paths to files or directories that the
sources of the extension module depend on. If any path in the
depends list is newer than the extension module, then the module
will be rebuilt.
language:
f2py_options:
module_dirs:
extra_info: dict,list
dict or list of dict of keywords to be appended to keywords.
Notes
-----
The self.paths(...) method is applied to all lists that may contain
paths.
"""
ext_args = copy.copy(kw)
ext_args['name'] = dot_join(self.name,name)
ext_args['sources'] = sources
if 'extra_info' in ext_args:
extra_info = ext_args['extra_info']
del ext_args['extra_info']
if isinstance(extra_info, dict):
extra_info = [extra_info]
for info in extra_info:
assert isinstance(info, dict), repr(info)
dict_append(ext_args,**info)
self._fix_paths_dict(ext_args)
# Resolve out-of-tree dependencies
libraries = ext_args.get('libraries',[])
libnames = []
ext_args['libraries'] = []
for libname in libraries:
if isinstance(libname,tuple):
self._fix_paths_dict(libname[1])
# Handle library names of the form libname@relative/path/to/library
if '@' in libname:
lname,lpath = libname.split('@',1)
lpath = os.path.abspath(njoin(self.local_path,lpath))
if os.path.isdir(lpath):
c = self.get_subpackage(None,lpath,
caller_level = 2)
if isinstance(c,Configuration):
c = c.todict()
for l in [l[0] for l in c.get('libraries',[])]:
llname = l.split('__OF__',1)[0]
if llname == lname:
c.pop('name',None)
dict_append(ext_args,**c)
break
continue
libnames.append(libname)
ext_args['libraries'] = libnames + ext_args['libraries']
from numpy.distutils.core import Extension
ext = Extension(**ext_args)
self.ext_modules.append(ext)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add an extension '+name)
return ext
def add_library(self,name,sources,**build_info):
"""
Add library to configuration.
Parameters
----------
name : str
Name of the extension.
sources : sequence
List of the sources. The list of sources may contain functions
(called source generators) which must take an extension instance
and a build directory as inputs and return a source file or list of
source files or None. If None is returned then no sources are
generated. If the Extension instance has no sources after
processing all source generators, then no extension module is
built.
build_info : dict, optional
The following keys are allowed:
* depends
* macros
* include_dirs
* extra_compiler_args
* f2py_options
* language
"""
self._add_library(name, sources, None, build_info)
dist = self.get_distribution()
if dist is not None:
self.warn('distutils distribution has been initialized,'\
' it may be too late to add a library '+ name)
def _add_library(self, name, sources, install_dir, build_info):
"""Common implementation for add_library and add_installed_library. Do
not use directly"""
build_info = copy.copy(build_info)
name = name #+ '__OF__' + self.name
build_info['sources'] = sources
# Sometimes, depends is not set up to an empty list by default, and if
# depends is not given to add_library, distutils barfs (#1134)
if not 'depends' in build_info:
build_info['depends'] = []
self._fix_paths_dict(build_info)
# Add to libraries list so that it is build with build_clib
self.libraries.append((name, build_info))
def add_installed_library(self, name, sources, install_dir, build_info=None):
"""
Similar to add_library, but the specified library is installed.
Most C libraries used with `distutils` are only used to build python
extensions, but libraries built through this method will be installed
so that they can be reused by third-party packages.
Parameters
----------
name : str
Name of the installed library.
sources : sequence
List of the library's source files. See `add_library` for details.
install_dir : str
Path to install the library, relative to the current sub-package.
build_info : dict, optional
The following keys are allowed:
* depends
* macros
* include_dirs
* extra_compiler_args
* f2py_options
* language
Returns
-------
None
See Also
--------
add_library, add_npy_pkg_config, get_info
Notes
-----
The best way to encode the options required to link against the specified
C libraries is to use a "libname.ini" file, and use `get_info` to
retrieve the required options (see `add_npy_pkg_config` for more
information).
"""
if not build_info:
build_info = {}
install_dir = os.path.join(self.package_path, install_dir)
self._add_library(name, sources, install_dir, build_info)
self.installed_libraries.append(InstallableLib(name, build_info, install_dir))
def add_npy_pkg_config(self, template, install_dir, subst_dict=None):
"""
Generate and install a npy-pkg config file from a template.
The config file generated from `template` is installed in the
given install directory, using `subst_dict` for variable substitution.
Parameters
----------
template : str
The path of the template, relatively to the current package path.
install_dir : str
Where to install the npy-pkg config file, relatively to the current
package path.
subst_dict : dict, optional
If given, any string of the form ``@key@`` will be replaced by
``subst_dict[key]`` in the template file when installed. The install
prefix is always available through the variable ``@prefix@``, since the
install prefix is not easy to get reliably from setup.py.
See also
--------
add_installed_library, get_info
Notes
-----
This works for both standard installs and in-place builds, i.e. the
``@prefix@`` refer to the source directory for in-place builds.
Examples
--------
::
config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar})
Assuming the foo.ini.in file has the following content::
[meta]
Name=@foo@
Version=1.0
Description=dummy description
[default]
Cflags=-I@prefix@/include
Libs=
The generated file will have the following content::
[meta]
Name=bar
Version=1.0
Description=dummy description
[default]
Cflags=-Iprefix_dir/include
Libs=
and will be installed as foo.ini in the 'lib' subpath.
"""
if subst_dict is None:
subst_dict = {}
basename = os.path.splitext(template)[0]
template = os.path.join(self.package_path, template)
if self.name in self.installed_pkg_config:
self.installed_pkg_config[self.name].append((template, install_dir,
subst_dict))
else:
self.installed_pkg_config[self.name] = [(template, install_dir,
subst_dict)]
def add_scons_installed_library(self, name, install_dir):
"""
Add a scons-built installable library to distutils.
Parameters
----------
name : str
The name of the library.
install_dir : str
Path to install the library, relative to the current sub-package.
"""
install_dir = os.path.join(self.package_path, install_dir)
self.installed_libraries.append(InstallableLib(name, {}, install_dir))
def add_sconscript(self, sconscript, subpackage_path=None,
standalone = False, pre_hook = None,
post_hook = None, source_files = None, package_path=None):
"""Add a sconscript to configuration.
pre_hook and post hook should be sequences of callable, which will be
use before and after executing scons. The callable should be defined as
callable(*args, **kw). It is ugly, but well, hooks are ugly anyway...
sconscript can be None, which can be useful to add only post/pre
hooks."""
if standalone:
parent_name = None
else:
parent_name = self.name
dist = self.get_distribution()
# Convert the sconscript name to a relative filename (relative from top
# setup.py's directory)
fullsconsname = self.paths(sconscript)[0]
# XXX: Think about a way to automatically register source files from
# scons...
full_source_files = []
if source_files:
full_source_files.extend([self.paths(i)[0] for i in source_files])
scons_info = SconsInfo(fullsconsname, parent_name,
pre_hook, post_hook,
full_source_files, package_path)
if dist is not None:
if dist.scons_data is None:
dist.scons_data = []
dist.scons_data.append(scons_info)
self.warn('distutils distribution has been initialized,'\
' it may be too late to add a subpackage '+ subpackage_name)
# XXX: we add a fake extension, to correctly initialize some
# options in distutils command.
dist.add_extension('', sources = [])
else:
self.scons_data.append(scons_info)
# XXX: we add a fake extension, to correctly initialize some
# options in distutils command.
self.add_extension('', sources = [])
def add_scripts(self,*files):
"""Add scripts to configuration.
Add the sequence of files to the beginning of the scripts list.
Scripts will be installed under the <prefix>/bin/ directory.
"""
scripts = self.paths(files)
dist = self.get_distribution()
if dist is not None:
if dist.scripts is None:
dist.scripts = []
dist.scripts.extend(scripts)
else:
self.scripts.extend(scripts)
def dict_append(self,**dict):
for key in self.list_keys:
a = getattr(self,key)
a.extend(dict.get(key,[]))
for key in self.dict_keys:
a = getattr(self,key)
a.update(dict.get(key,{}))
known_keys = self.list_keys + self.dict_keys + self.extra_keys
for key in dict.keys():
if key not in known_keys:
a = getattr(self, key, None)
if a and a==dict[key]: continue
self.warn('Inheriting attribute %r=%r from %r' \
% (key,dict[key],dict.get('name','?')))
setattr(self,key,dict[key])
self.extra_keys.append(key)
elif key in self.extra_keys:
self.info('Ignoring attempt to set %r (from %r to %r)' \
% (key, getattr(self,key), dict[key]))
elif key in known_keys:
# key is already processed above
pass
else:
raise ValueError("Don't know about key=%r" % (key))
def __str__(self):
from pprint import pformat
known_keys = self.list_keys + self.dict_keys + self.extra_keys
s = '<'+5*'-' + '\n'
s += 'Configuration of '+self.name+':\n'
known_keys.sort()
for k in known_keys:
a = getattr(self,k,None)
if a:
s += '%s = %s\n' % (k,pformat(a))
s += 5*'-' + '>'
return s
def get_config_cmd(self):
"""
Returns the numpy.distutils config command instance.
"""
cmd = get_cmd('config')
cmd.ensure_finalized()
cmd.dump_source = 0
cmd.noisy = 0
old_path = os.environ.get('PATH')
if old_path:
path = os.pathsep.join(['.',old_path])
os.environ['PATH'] = path
return cmd
def get_build_temp_dir(self):
"""
Return a path to a temporary directory where temporary files should be
placed.
"""
cmd = get_cmd('build')
cmd.ensure_finalized()
return cmd.build_temp
def have_f77c(self):
"""Check for availability of Fortran 77 compiler.
Use it inside source generating function to ensure that
setup distribution instance has been initialized.
Notes
-----
True if a Fortran 77 compiler is available (because a simple Fortran 77
code was able to be compiled successfully).
"""
simple_fortran_subroutine = '''
subroutine simple
end
'''
config_cmd = self.get_config_cmd()
flag = config_cmd.try_compile(simple_fortran_subroutine,lang='f77')
return flag
def have_f90c(self):
"""Check for availability of Fortran 90 compiler.
Use it inside source generating function to ensure that
setup distribution instance has been initialized.
Notes
-----
True if a Fortran 90 compiler is available (because a simple Fortran
90 code was able to be compiled successfully)
"""
simple_fortran_subroutine = '''
subroutine simple
end
'''
config_cmd = self.get_config_cmd()
flag = config_cmd.try_compile(simple_fortran_subroutine,lang='f90')
return flag
def append_to(self, extlib):
"""Append libraries, include_dirs to extension or library item.
"""
if is_sequence(extlib):
lib_name, build_info = extlib
dict_append(build_info,
libraries=self.libraries,
include_dirs=self.include_dirs)
else:
from numpy.distutils.core import Extension
assert isinstance(extlib,Extension), repr(extlib)
extlib.libraries.extend(self.libraries)
extlib.include_dirs.extend(self.include_dirs)
def _get_svn_revision(self,path):
"""Return path's SVN revision number.
"""
revision = None
m = None
try:
p = subprocess.Popen(['svnversion'], shell=True,
stdout=subprocess.PIPE, stderr=STDOUT,
close_fds=True)
sout = p.stdout
m = re.match(r'(?P<revision>\d+)', sout.read())
except:
pass
if m:
revision = int(m.group('revision'))
return revision
if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK',None):
entries = njoin(path,'_svn','entries')
else:
entries = njoin(path,'.svn','entries')
if os.path.isfile(entries):
f = open(entries)
fstr = f.read()
f.close()
if fstr[:5] == '<?xml': # pre 1.4
m = re.search(r'revision="(?P<revision>\d+)"',fstr)
if m:
revision = int(m.group('revision'))
else: # non-xml entries file --- check to be sure that
m = re.search(r'dir[\n\r]+(?P<revision>\d+)', fstr)
if m:
revision = int(m.group('revision'))
return revision
def get_version(self, version_file=None, version_variable=None):
"""Try to get version string of a package.
Return a version string of the current package or None if the version
information could not be detected.
Notes
-----
This method scans files named
__version__.py, <packagename>_version.py, version.py, and
__svn_version__.py for string variables version, __version\__, and
<packagename>_version, until a version number is found.
"""
version = getattr(self,'version',None)
if version is not None:
return version
# Get version from version file.
if version_file is None:
files = ['__version__.py',
self.name.split('.')[-1]+'_version.py',
'version.py',
'__svn_version__.py']
else:
files = [version_file]
if version_variable is None:
version_vars = ['version',
'__version__',
self.name.split('.')[-1]+'_version']
else:
version_vars = [version_variable]
for f in files:
fn = njoin(self.local_path,f)
if os.path.isfile(fn):
info = (open(fn),fn,('.py','U',1))
name = os.path.splitext(os.path.basename(fn))[0]
n = dot_join(self.name,name)
try:
version_module = imp.load_module('_'.join(n.split('.')),*info)
except ImportError:
msg = get_exception()
self.warn(str(msg))
version_module = None
if version_module is None:
continue
for a in version_vars:
version = getattr(version_module,a,None)
if version is not None:
break
if version is not None:
break
if version is not None:
self.version = version
return version
# Get version as SVN revision number
revision = self._get_svn_revision(self.local_path)
if revision is not None:
version = str(revision)
self.version = version
return version
def make_svn_version_py(self, delete=True):
"""Appends a data function to the data_files list that will generate
__svn_version__.py file to the current package directory.
Generate package __svn_version__.py file from SVN revision number,
it will be removed after python exits but will be available
when sdist, etc commands are executed.
Notes
-----
If __svn_version__.py existed before, nothing is done.
This is
intended for working with source directories that are in an SVN
repository.
"""
target = njoin(self.local_path,'__svn_version__.py')
revision = self._get_svn_revision(self.local_path)
if os.path.isfile(target) or revision is None:
return
else:
def generate_svn_version_py():
if not os.path.isfile(target):
version = str(revision)
self.info('Creating %s (version=%r)' % (target,version))
f = open(target,'w')
f.write('version = %r\n' % (version))
f.close()
import atexit
def rm_file(f=target,p=self.info):
if delete:
try: os.remove(f); p('removed '+f)
except OSError: pass
try: os.remove(f+'c'); p('removed '+f+'c')
except OSError: pass
atexit.register(rm_file)
return target
self.add_data_files(('', generate_svn_version_py()))
def make_config_py(self,name='__config__'):
"""Generate package __config__.py file containing system_info
information used during building the package.
This file is installed to the
package installation directory.
"""
self.py_modules.append((self.name,name,generate_config_py))
def scons_make_config_py(self, name = '__config__'):
"""Generate package __config__.py file containing system_info
information used during building the package.
"""
self.py_modules.append((self.name, name, scons_generate_config_py))
def get_info(self,*names):
"""Get resources information.
Return information (from system_info.get_info) for all of the names in
the argument list in a single dictionary.
"""
from system_info import get_info, dict_append
info_dict = {}
for a in names:
dict_append(info_dict,**get_info(a))
return info_dict
def get_cmd(cmdname, _cache={}):
if cmdname not in _cache:
import distutils.core
dist = distutils.core._setup_distribution
if dist is None:
from distutils.errors import DistutilsInternalError
raise DistutilsInternalError(
'setup distribution instance not initialized')
cmd = dist.get_command_obj(cmdname)
_cache[cmdname] = cmd
return _cache[cmdname]
def get_numpy_include_dirs():
# numpy_include_dirs are set by numpy/core/setup.py, otherwise []
include_dirs = Configuration.numpy_include_dirs[:]
if not include_dirs:
import numpy
include_dirs = [ numpy.get_include() ]
# else running numpy/core/setup.py
return include_dirs
def get_npy_pkg_dir():
"""Return the path where to find the npy-pkg-config directory."""
# XXX: import here for bootstrapping reasons
import numpy
d = os.path.join(os.path.dirname(numpy.__file__),
'core', 'lib', 'npy-pkg-config')
return d
def get_pkg_info(pkgname, dirs=None):
"""
Return library info for the given package.
Parameters
----------
pkgname : str
Name of the package (should match the name of the .ini file, without
the extension, e.g. foo for the file foo.ini).
dirs : sequence, optional
If given, should be a sequence of additional directories where to look
for npy-pkg-config files. Those directories are searched prior to the
NumPy directory.
Returns
-------
pkginfo : class instance
The `LibraryInfo` instance containing the build information.
Raises
------
PkgNotFound
If the package is not found.
See Also
--------
Configuration.add_npy_pkg_config, Configuration.add_installed_library,
get_info
"""
from numpy.distutils.npy_pkg_config import read_config
if dirs:
dirs.append(get_npy_pkg_dir())
else:
dirs = [get_npy_pkg_dir()]
return read_config(pkgname, dirs)
def get_info(pkgname, dirs=None):
"""
Return an info dict for a given C library.
The info dict contains the necessary options to use the C library.
Parameters
----------
pkgname : str
Name of the package (should match the name of the .ini file, without
the extension, e.g. foo for the file foo.ini).
dirs : sequence, optional
If given, should be a sequence of additional directories where to look
for npy-pkg-config files. Those directories are searched prior to the
NumPy directory.
Returns
-------
info : dict
The dictionary with build information.
Raises
------
PkgNotFound
If the package is not found.
See Also
--------
Configuration.add_npy_pkg_config, Configuration.add_installed_library,
get_pkg_info
Examples
--------
To get the necessary information for the npymath library from NumPy:
>>> npymath_info = np.distutils.misc_util.get_info('npymath')
>>> npymath_info #doctest: +SKIP
{'define_macros': [], 'libraries': ['npymath'], 'library_dirs':
['.../numpy/core/lib'], 'include_dirs': ['.../numpy/core/include']}
This info dict can then be used as input to a `Configuration` instance::
config.add_extension('foo', sources=['foo.c'], extra_info=npymath_info)
"""
from numpy.distutils.npy_pkg_config import parse_flags
pkg_info = get_pkg_info(pkgname, dirs)
# Translate LibraryInfo instance into a build_info dict
info = parse_flags(pkg_info.cflags())
for k, v in parse_flags(pkg_info.libs()).items():
info[k].extend(v)
# add_extension extra_info argument is ANAL
info['define_macros'] = info['macros']
del info['macros']
del info['ignored']
return info
def is_bootstrapping():
import __builtin__
try:
__builtin__.__NUMPY_SETUP__
return True
except AttributeError:
return False
__NUMPY_SETUP__ = False
def scons_generate_config_py(target):
"""generate config.py file containing system_info information
used during building the package.
usage:
config['py_modules'].append((packagename, '__config__',generate_config_py))
"""
from distutils.dir_util import mkpath
from numscons import get_scons_configres_dir, get_scons_configres_filename
d = {}
mkpath(os.path.dirname(target))
f = open(target, 'w')
f.write('# this file is generated by %s\n' % (os.path.abspath(sys.argv[0])))
f.write('# it contains system_info results at the time of building this package.\n')
f.write('__all__ = ["show"]\n\n')
confdir = get_scons_configres_dir()
confilename = get_scons_configres_filename()
for root, dirs, files in os.walk(confdir):
if files:
file = os.path.join(root, confilename)
assert root.startswith(confdir)
pkg_name = '.'.join(root[len(confdir)+1:].split(os.sep))
fid = open(file, 'r')
try:
cnt = fid.read()
d[pkg_name] = eval(cnt)
finally:
fid.close()
# d is a dictionary whose keys are package names, and values the
# corresponding configuration. Each configuration is itself a dictionary
# (lib : libinfo)
f.write('_config = %s\n' % d)
f.write(r'''
def show():
for pkg, config in _config.items():
print("package %s configuration:" % pkg)
for lib, libc in config.items():
print(' %s' % lib)
for line in libc.split('\n'):
print('\t%s' % line)
''')
f.close()
return target
#########################
def default_config_dict(name = None, parent_name = None, local_path=None):
"""Return a configuration dictionary for usage in
configuration() function defined in file setup_<name>.py.
"""
import warnings
warnings.warn('Use Configuration(%r,%r,top_path=%r) instead of '\
'deprecated default_config_dict(%r,%r,%r)'
% (name, parent_name, local_path,
name, parent_name, local_path,
))
c = Configuration(name, parent_name, local_path)
return c.todict()
def dict_append(d, **kws):
for k, v in kws.items():
if k in d:
ov = d[k]
if isinstance(ov,str):
d[k] = v
else:
d[k].extend(v)
else:
d[k] = v
def appendpath(prefix, path):
if os.path.sep != '/':
prefix = prefix.replace('/', os.path.sep)
path = path.replace('/', os.path.sep)
drive = ''
if os.path.isabs(path):
drive = os.path.splitdrive(prefix)[0]
absprefix = os.path.splitdrive(os.path.abspath(prefix))[1]
pathdrive, path = os.path.splitdrive(path)
d = os.path.commonprefix([absprefix, path])
if os.path.join(absprefix[:len(d)], absprefix[len(d):]) != absprefix \
or os.path.join(path[:len(d)], path[len(d):]) != path:
# Handle invalid paths
d = os.path.dirname(d)
subpath = path[len(d):]
if os.path.isabs(subpath):
subpath = subpath[1:]
else:
subpath = path
return os.path.normpath(njoin(drive + prefix, subpath))
def generate_config_py(target):
"""Generate config.py file containing system_info information
used during building the package.
Usage:
config['py_modules'].append((packagename, '__config__',generate_config_py))
"""
from numpy.distutils.system_info import system_info
from distutils.dir_util import mkpath
mkpath(os.path.dirname(target))
f = open(target, 'w')
f.write('# This file is generated by %s\n' % (os.path.abspath(sys.argv[0])))
f.write('# It contains system_info results at the time of building this package.\n')
f.write('__all__ = ["get_info","show"]\n\n')
for k, i in system_info.saved_results.items():
f.write('%s=%r\n' % (k, i))
f.write(r'''
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
''')
f.close()
return target
def msvc_version(compiler):
"""Return version major and minor of compiler instance if it is
MSVC, raise an exception otherwise."""
if not compiler.compiler_type == "msvc":
raise ValueError("Compiler instance is not msvc (%s)"\
% compiler.compiler_type)
return compiler._MSVCCompiler__version
if sys.version[:3] >= '2.5':
def get_build_architecture():
from distutils.msvccompiler import get_build_architecture
return get_build_architecture()
else:
#copied from python 2.5.1 distutils/msvccompiler.py
def get_build_architecture():
"""Return the processor architecture.
Possible results are "Intel", "Itanium", or "AMD64".
"""
prefix = " bit ("
i = sys.version.find(prefix)
if i == -1:
return "Intel"
j = sys.version.find(")", i)
return sys.version[i+len(prefix):j]
|
jasonmccampbell/numpy-refactor-sprint
|
numpy/distutils/misc_util.py
|
Python
|
bsd-3-clause
| 80,942
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
import dynamic_forms.fields
class Migration(migrations.Migration):
dependencies = []
operations = [
migrations.CreateModel(
bases = (models.Model,),
name = 'FormModel',
fields = [
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID'),),
('name', models.CharField(max_length=50, unique=True, verbose_name='Name'),),
('submit_url', models.CharField(max_length=100, unique=True, help_text='The full URL path to the form. It should start and end with a forward slash (<code>/</code>).', verbose_name='Submit URL'),),
('success_url', models.CharField(max_length=100, help_text='The full URL path where the user will be redirected after successfully sending the form. It should start and end with a forward slash (<code>/</code>). If empty, the success URL is generated by appending <code>done/</code> to the “Submit URL”.', default='', blank=True, verbose_name='Success URL'),),
('actions', dynamic_forms.fields.TextMultiSelectField(choices=(
('dynamic_forms.actions.dynamic_form_send_email', 'Send via email'),
('dynamic_forms.actions.dynamic_form_store_database', 'Store in database')), verbose_name='Actions', default=''),),
('form_template', models.CharField(max_length=100, default='dynamic_forms/form.html', verbose_name='Form template path'),),
('success_template', models.CharField(max_length=100, default='dynamic_forms/form_success.html', verbose_name='Success template path'),)],
options = {
'ordering': ['name'],
'verbose_name': 'Dynamic form',
'verbose_name_plural': 'Dynamic forms',
},
),
migrations.CreateModel(
bases = (models.Model,),
name = 'FormFieldModel',
fields = [
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID'),),
('parent_form', models.ForeignKey(to_field='id', to='dynamic_forms.FormModel'),),
('field_type', models.CharField(max_length=255, choices=(
('dynamic_forms.formfields.BooleanField', 'Boolean',),
('dynamic_forms.formfields.ChoiceField', 'Choices',),
('dynamic_forms.formfields.DateField', 'Date',),
('dynamic_forms.formfields.DateTimeField', 'Date and Time',),
('dynamic_forms.formfields.EmailField', 'Email',),
('dynamic_forms.formfields.IntegerField', 'Integer',),
('dynamic_forms.formfields.MultiLineTextField', 'Multi Line Text',),
('dynamic_forms.formfields.SingleLineTextField', 'Single Line Text',),
('dynamic_forms.formfields.TimeField', 'Time',)), verbose_name='Type'),),
('label', models.CharField(max_length=20, verbose_name='Label'),),
('name', models.SlugField(blank=True, verbose_name='Name'),),
('_options', models.TextField(null=True, blank=True, verbose_name='Options'),),
('position', models.SmallIntegerField(default=0, blank=True, verbose_name='Position'),)],
options = {
'ordering': ['parent_form', 'position'],
'verbose_name': 'Form field',
'verbose_name_plural': 'Form fields',
},
),
migrations.CreateModel(
bases = (models.Model,),
name = 'FormModelData',
fields = [
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID'),),
('form', models.ForeignKey(null=True, to_field='id', to='dynamic_forms.FormModel', on_delete=django.db.models.deletion.SET_NULL),),
('value', models.TextField(default='', blank=True, verbose_name='Form data'),),
('submitted', models.DateTimeField(auto_now_add=True, verbose_name='Submitted on'),)],
options = {
'verbose_name': 'Form data',
'verbose_name_plural': 'Form data',
},
),
]
|
uhuramedia/django-dynamic-forms
|
dynamic_forms/migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 4,424
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import unittest
import StringIO
import mock # pylint: disable=F0401
import fetch_benchmark_deps
def NormPaths(paths):
return sorted([os.path.normcase(p) for p in paths.splitlines()])
class FetchBenchmarkDepsUnittest(unittest.TestCase):
"""The test guards fetch_benchmark_deps.
It assumes the following telemetry APIs always success:
telemetry.wpr.archive_info.WprArchiveInfo.DownloadArchivesIfNeeded
catapult_base.cloud_storage.GetFilesInDirectoryIfChanged
"""
def setUp(self):
"""Override sys.argv as if it is called from commnad line."""
self._argv = sys.argv
sys.argv = ['./fetch_benchmark_deps', '']
def _RunFetchBenchmarkDepsTest(self, benchmark_name,
expected_fetched_file_paths = None):
"""Simulates './fetch_benchmark_deps [benchmark_name]'
It checks if the paths returned are expected and have corresponding sha1
checksums. The expected result can be omitted if the dependencies of
specified benchmarks are subject to changes.
Args:
benchmark_name: benchmark name
expected_fetched_file_paths: the expected result.
"""
sys.argv[1] = benchmark_name
output = StringIO.StringIO()
with mock.patch('telemetry.wpr.archive_info.WprArchiveInfo'
'.DownloadArchivesIfNeeded') as mock_download:
with mock.patch('catapult_base.cloud_storage'
'.GetFilesInDirectoryIfChanged') as mock_get:
mock_download.return_value = True
mock_get.GetFilesInDirectoryIfChanged.return_value = True
fetch_benchmark_deps.main(output)
for f in output.getvalue().splitlines():
fullpath = os.path.join(fetch_benchmark_deps.GetChromiumDir(), f)
sha1path = fullpath + '.sha1'
self.assertTrue(os.path.isfile(sha1path))
if expected_fetched_file_paths:
self.assertEquals(expected_fetched_file_paths,
NormPaths(output.getvalue()))
def testFetchWPRs(self):
self._RunFetchBenchmarkDepsTest('smoothness.top_25_smooth')
def testFetchServingDirs(self):
self._RunFetchBenchmarkDepsTest('media.tough_video_cases')
def testFetchOctane(self):
expected = 'src/tools/perf/page_sets/data/octane_001.wpr'
self._RunFetchBenchmarkDepsTest('octane', NormPaths(expected))
|
Chilledheart/chromium
|
tools/perf/fetch_benchmark_deps_unittest.py
|
Python
|
bsd-3-clause
| 2,479
|
#!/usr/bin/env python
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Re-runs the ChromeDriver's client-side commands, given a log file.
Takes a ChromeDriver log file that was created with the --replayable=true
command-line flag for the ChromeDriver binary (or with the same flag for
the run_py_tests.py).
To replay a log file, just run this script with the log file specified
in the --input-log-path flag. Alternatively, construct a CommandSequence
instance and iterate over it to access the logged commands one-by-one.
Notice that for the iteration approach, you must call
CommandSequence.ingestRealResponse with each response.
Implementation:
The CommandSequence class is the core of the implementation here. At a
basic level, it opens the given log file, looks for the next command and
response pair, and returns them (along with their parameters/payload) on
NextCommand, next, or __iter__.
To get effective replay, there are a few deviations from simply verbatim
repeating the logged commands and parameters:
1. Session, window, and element IDs in the log are identified with the
corresponding ID in the new session and substituted in each command
returned.
2. When a response is an error, we need to infer other parts of the
original response that would have been returned along with the
error.
3. If GetSessions is called while there are multiple sessions open,
the log will show more calls than actually occurred (one per open
session, even if it was only called once), so we absorb all of
these calls back into one.
"""
import collections
import json
import optparse
import os
import re
import sys
_THIS_DIR = os.path.abspath(os.path.dirname(__file__))
_PARENT_DIR = os.path.join(_THIS_DIR, os.pardir)
_CLIENT_DIR = os.path.join(_PARENT_DIR, "client")
_SERVER_DIR = os.path.join(_PARENT_DIR, "server")
# pylint: disable=g-import-not-at-top
sys.path.insert(1, _CLIENT_DIR)
import command_executor
sys.path.remove(_CLIENT_DIR)
sys.path.insert(1, _SERVER_DIR)
import server
sys.path.remove(_SERVER_DIR)
sys.path.insert(1, _PARENT_DIR)
import util
sys.path.remove(_PARENT_DIR)
# pylint: enable=g-import-not-at-top
class Method(object):
GET = "GET"
POST = "POST"
DELETE = "DELETE"
# TODO(crbug/chromedriver/2511) there should be a single source of truth for
# this data throughout chromedriver code (see e.g. http_handler.cc)
_COMMANDS = {
"AcceptAlert": (Method.POST, "/session/:sessionId/alert/accept"),
"AddCookie": (Method.POST, "/session/:sessionId/cookie"),
"ClearElement": (Method.POST, "/session/:sessionId/element/:id/clear"),
"ClearLocalStorage": (Method.DELETE, "/session/:sessionId/local_storage"),
"ClearSessionStorage":
(Method.DELETE, "/session/:sessionId/session_storage"),
"Click": (Method.POST, "/session/:sessionId/click"),
"ClickElement": (Method.POST, "/session/:sessionId/element/:id/click"),
"CloseWindow": (Method.DELETE, "/session/:sessionId/window"),
"DeleteAllCookies": (Method.DELETE, "/session/:sessionId/cookie"),
"DeleteCookie": (Method.DELETE, "/session/:sessionId/cookie/:name"),
"DeleteNetworkConditions":
(Method.DELETE, "/session/:sessionId/chromium/network_conditions"),
"DismissAlert": command_executor.Command.DISMISS_ALERT,
"DoubleClick": (Method.POST, "/session/:sessionId/doubleclick"),
"ElementScreenshot":
(Method.GET, "/session/:sessionId/element/:id/screenshot"),
"ExecuteAsyncScript": command_executor.Command.EXECUTE_ASYNC_SCRIPT,
"ExecuteCDP": (Method.POST, "/session/:sessionId/goog/cdp/execute"),
"ExecuteScript": (Method.POST, "/session/:sessionId/execute/sync"),
"FindChildElement":
(Method.POST, "/session/:sessionId/element/:id/element"),
"FindChildElements":
(Method.POST, "/session/:sessionId/element/:id/elements"),
"FindElement": (Method.POST, "/session/:sessionId/element"),
"FindElements": (Method.POST, "/session/:sessionId/elements"),
"Freeze": (Method.POST, "/session/:sessionId/goog/page/freeze"),
"FullscreenWindow": (Method.POST, "/session/:sessionId/window/fullscreen"),
"GetActiveElement": command_executor.Command.GET_ACTIVE_ELEMENT,
"GetAlertMessage": (Method.GET, "/session/:sessionId/alert_text"),
"GetCookies": (Method.GET, "/session/:sessionId/cookie"),
"GetElementAttribute":
(Method.GET, "/session/:sessionId/element/:id/attribute/:name"),
"GetElementProperty":
(Method.GET, "/session/:sessionId/element/:id/property/:name"),
"GetElementCSSProperty":
(Method.GET, "/session/:sessionId/element/:id/css/:propertyName"),
"GetElementLocation":
(Method.GET, "/session/:sessionId/element/:id/location"),
"GetElementLocationInView":
(Method.GET, "/session/:sessionId/element/:id/location_in_view"),
"GetElementRect": (Method.GET, "/session/:sessionId/element/:id/rect"),
"GetElementSize": (Method.GET, "/session/:sessionId/element/:id/size"),
"GetElementTagName": (Method.GET, "/session/:sessionId/element/:id/name"),
"GetElementText": (Method.GET, "/session/:sessionId/element/:id/text"),
"GetElementValue": (Method.GET, "/session/:sessionId/element/:id/value"),
"GetGeolocation": (Method.GET, "/session/:sessionId/location"),
"GetLocalStorageItem":
(Method.GET, "/session/:sessionId/local_storage/key/:key"),
"GetLocalStorageKeys":
(Method.GET, "/session/:sessionId/local_storage"),
"GetLocalStorageSize":
(Method.GET, "/session/:sessionId/local_storage/size"),
"GetLog": (Method.POST, "/session/:sessionId/se/log"),
"GetLogTypes": (Method.GET, "/session/:sessionId/se/log/types"),
"GetNamedCookie": (Method.GET, "/session/:sessionId/cookie/:name"),
"GetNetworkConditions":
(Method.GET, "/session/:sessionId/chromium/network_conditions"),
"GetNetworkConnection":
(Method.GET, "/session/:sessionId/network_connection"),
"GetSessionCapabilities": (Method.GET, "/session/:sessionId"),
"GetSessionStorageItem":
(Method.GET, "/session/:sessionId/session_storage/key/:key"),
"GetSessionStorageKeys":
(Method.GET, "/session/:sessionId/session_storage"),
"GetSessionStorageSize":
(Method.GET, "/session/:sessionId/session_storage/size"),
"GetSessions": (Method.GET, "/sessions"),
"GetSource": (Method.GET, "/session/:sessionId/source"),
"GetStatus": (Method.GET, "status"),
"GetTimeouts": (Method.GET, "/session/:sessionId/timeouts"),
"GetTitle": (Method.GET, "/session/:sessionId/title"),
"GetUrl": (Method.GET, "/session/:sessionId/url"),
"GetWindow": command_executor.Command.GET_CURRENT_WINDOW_HANDLE,
"GetWindowPosition":
(Method.GET, "/session/:sessionId/window/:windowHandle/position"),
"GetWindowRect":
(Method.GET, "/session/:sessionId/window/rect"),
"GetWindowSize":
(Method.GET, "/session/:sessionId/window/:windowHandle/size"),
"GetWindows": command_executor.Command.GET_WINDOW_HANDLES,
"GoBack": (Method.POST, "/session/:sessionId/back"),
"GoForward": (Method.POST, "/session/:sessionId/forward"),
"HeapSnapshot": (Method.GET, "/session/:sessionId/chromium/heap_snapshot"),
"InitSession": (Method.POST, "/session"),
"IsAlertOpen": (Method.GET, "/session/:sessionId/alert"),
"IsElementDisplayed":
(Method.GET, "/session/:sessionId/element/:id/displayed"),
"IsElementEnabled": (Method.GET, "/session/:sessionId/element/:id/enabled"),
"IsElementEqual":
(Method.GET, "/session/:sessionId/element/:id/equals/:other"),
"IsElementSelected":
(Method.GET, "/session/:sessionId/element/:id/selected"),
"IsLoading": (Method.GET, "/session/:sessionId/is_loading"),
"LaunchApp": (Method.POST, "/session/:sessionId/chromium/launch_app"),
"MaximizeWindow": (Method.POST, "/session/:sessionId/window/maximize"),
"MinimizeWindow": (Method.POST, "/session/:sessionId/window/minimize"),
"MouseDown": (Method.POST, "/session/:sessionId/buttondown"),
"MouseMove": (Method.POST, "/session/:sessionId/moveto"),
"MouseUp": (Method.POST, "/session/:sessionId/buttonup"),
"Navigate": (Method.POST, "/session/:sessionId/url"),
"PerformActions": (Method.POST, "/session/:sessionId/actions"),
"Quit": (Method.DELETE, "/session/:sessionId"),
"Refresh": (Method.POST, "/session/:sessionId/refresh"),
"ReleaseActions": (Method.DELETE, "/session/:sessionId/actions"),
"RemoveLocalStorageItem":
(Method.DELETE, "/session/:sessionId/local_storage/key/:key"),
"RemoveSessionStorageItem":
(Method.DELETE, "/session/:sessionId/session_storage/key/:key"),
"Resume": (Method.POST, "/session/:sessionId/goog/page/resume"),
"Screenshot": (Method.GET, "/session/:sessionId/screenshot"),
"SendCommand": (Method.POST, "/session/:sessionId/chromium/send_command"),
"SendCommandAndGetResult":
(Method.POST, "/session/:sessionId/chromium/send_command_and_get_result"),
"SendCommandFromWebSocket":
(Method.POST, "session/:sessionId/chromium/send_command_from_websocket"),
"SetAlertPrompt": command_executor.Command.SET_ALERT_VALUE,
"SetGeolocation": (Method.POST, "/session/:sessionId/location"),
"SetImplicitWait":
(Method.POST, "/session/:sessionId/timeouts/implicit_wait"),
"SetLocalStorageKeys": (Method.POST, "/session/:sessionId/local_storage"),
"SetNetworkConditions":
(Method.POST, "/session/:sessionId/chromium/network_conditions"),
"SetNetworkConnection":
(Method.POST, "/session/:sessionId/network_connection"),
"SetScriptTimeout":
(Method.POST, "/session/:sessionId/timeouts/async_script"),
"SetSessionStorageItem":
(Method.POST, "/session/:sessionId/session_storage"),
"SetTimeouts": (Method.POST, "/session/:sessionId/timeouts"),
"SetWindowPosition":
(Method.POST, "/session/:sessionId/window/:windowHandle/position"),
"SetWindowRect": (Method.POST, "/session/:sessionId/window/rect"),
"SetWindowSize":
(Method.POST, "/session/:sessionId/window/:windowHandle/size"),
"SubmitElement": (Method.POST, "/session/:sessionId/element/:id/submit"),
"SwitchToFrame": (Method.POST, "/session/:sessionId/frame"),
"SwitchToParentFrame": (Method.POST, "/session/:sessionId/frame/parent"),
"SwitchToWindow": (Method.POST, "/session/:sessionId/window"),
"Tap": (Method.POST, "/session/:sessionId/touch/click"),
"TouchDoubleTap": (Method.POST, "/session/:sessionId/touch/doubleclick"),
"TouchDown": (Method.POST, "/session/:sessionId/touch/down"),
"TouchFlick": (Method.POST, "/session/:sessionId/touch/flick"),
"TouchLongPress": (Method.POST, "/session/:sessionId/touch/longclick"),
"TouchMove": (Method.POST, "/session/:sessionId/touch/move"),
"TouchScroll": (Method.POST, "/session/:sessionId/touch/scroll"),
"TouchUp": (Method.POST, "/session/:sessionId/touch/up"),
"Type": (Method.POST, "/session/:sessionId/keys"),
"TypeElement": (Method.POST, "/session/:sessionId/element/:id/value"),
"UploadFile": (Method.POST, "/session/:sessionId/file")
}
MULTI_SESSION_COMMANDS = ["GetSessions"]
class ReplayException(Exception):
"""Thrown for irrecoverable problems in parsing the log file."""
def _CountChar(line, opening_char, closing_char):
"""Count (number of opening_char) - (number of closing_char) in |line|.
Used to check for the end of JSON parameters. Ignores characters inside of
non-escaped quotes.
Args:
line: line to count characters in
opening_char: "+1" character, { or [
closing_char: "-1" character, ] or }
Returns:
(number of opening_char) - (number of closing_char)
"""
in_quote = False
total = 0
for i, c in enumerate(line):
if not in_quote and c is opening_char:
total += 1
if not in_quote and c is closing_char:
total -= 1
if c == '"' and (i == 0 or line[i-1] != "\\"):
in_quote = not in_quote
return total
def _GetCommandName(header_line):
"""Return the command name from the logged header line."""
return header_line.split()[3]
def _GetEntryType(header_line):
return header_line.split()[2]
def _GetSessionId(header_line):
"""Return the session ID from the logged header line."""
return header_line.split()[1][1:-1]
# TODO(cwinstanley): Might just want to literally dump these to strings and
# search using regexes. All the ids have distinctive formats
# and this would allow getting even ids returned from scripts.
# TODO(cwinstanley): W3C element compliance
def _GetAnyElementIds(payload):
"""Looks for any element, session, or window IDs, and returns them.
Payload should be passed as a dict or list.
Args:
payload: payload to check for IDs, as a python list or dict.
Returns:
list of ID strings, in order, in this payload
"""
element_tag="element-6066-11e4-a52e-4f735466cecf"
if isinstance(payload, dict):
if element_tag in payload:
return [payload[element_tag]]
elif isinstance(payload, list):
elements = [item[element_tag] for item in payload if element_tag in item]
windows = [item for item in payload if "CDwindow" in item]
if not elements and not windows:
return None
return elements + windows
return None
def _ReplaceWindowAndElementIds(payload, id_map):
"""Replace the window, session, and element IDs in |payload| using |id_map|.
Checks |payload| for window, element, and session IDs that are in |id_map|,
and replaces them.
Args:
payload: payload in which to replace IDs. This is edited in-place.
id_map: mapping from old to new IDs that should be replaced.
"""
if isinstance(payload, dict):
for key, value in payload.items():
if isinstance(value, basestring) and value in id_map:
payload[key] = id_map[value]
else:
_ReplaceWindowAndElementIds(payload[key], id_map)
elif isinstance(payload, list):
for i, value in enumerate(payload):
if isinstance(value, basestring) and value in id_map:
payload[i] = id_map[value]
else:
_ReplaceWindowAndElementIds(payload[i], id_map)
def _ReplaceUrl(payload, base_url):
"""Swap out the base URL (starting with protocol) in this payload.
Useful when switching ports or URLs.
Args:
payload: payload in which to do the url replacement
base_url: url to replace any applicable urls in |payload| with.
"""
if base_url and "url" in payload:
payload["url"] = re.sub(r"^https?://((?!/).)*/",
base_url + "/", payload["url"])
def _ReplaceBinary(payload, binary):
"""Replace the binary path in |payload| with the one in |binary|.
If |binary| exists but there is no binary in |payload|, it is added at the
appropriate location. Operates in-place.
Args:
payload: InitSession payload as a dictionary to replace binary in
binary: new binary to replace in payload. If binary is not truthy, but
there is a binary path in |payload|, we remove the binary path, which will
trigger ChromeDriver's mechanism for locating the Chrome binary.
"""
if ("desiredCapabilities" in payload
and "goog:chromeOptions" in payload["desiredCapabilities"]):
if binary:
(payload["desiredCapabilities"]["goog:chromeOptions"]
["binary"]) = binary
elif "binary" in payload["desiredCapabilities"]["goog:chromeOptions"]:
del payload["desiredCapabilities"]["goog:chromeOptions"]["binary"]
elif binary:
if "desiredCapabilities" not in payload:
payload["desiredCapabilities"] = {
"goog:chromeOptions": {
"binary": binary
}
}
elif "goog:chromeOptions" not in payload["desiredCapabilities"]:
payload["desiredCapabilities"]["goog:chromeOptions"] = {
"binary": binary
}
def _ReplaceSessionId(payload, id_map):
"""Update session IDs in this payload to match the current session.
Operates in-place.
Args:
payload: payload in which to replace session IDs.
id_map: mapping from logged IDs to IDs in the current session
"""
if "sessionId" in payload and payload["sessionId"] in id_map:
payload["sessionId"] = id_map[payload["sessionId"]]
class _Payload(object):
"""Object containing a payload, which usually belongs to a LogEntry."""
def __init__(self, payload_string):
"""Initialize the payload object.
Parses the payload, represented as a string, into a Python object.
Payloads appear in the log as a multi-line (usually) JSON string starting
on the header line, like the following, where the payload starts after the
word InitSession:
[1532467931.153][INFO]: [<session_id>] COMMAND InitSession {
"desiredCapabilities": {
"goog:chromeOptions": {
"args": [ "no-sandbox", "disable-gpu" ],
"binary": "<binary_path>"
}
}
}
Payloads can also be "singular" entries, like "1", "false", be an error
string (signified by the payload starting with "ERROR") or be totally
nonexistent for a given command.
Args:
payload_string: payload represented as a string.
"""
self.is_empty = not payload_string
self.is_error = not self.is_empty and payload_string[:5] == "ERROR"
if self.is_error or self.is_empty:
self.payload_raw = payload_string
else:
self.payload_raw = json.loads(payload_string)
def AddSessionId(self, session_id):
"""Adds a session ID into this payload.
Args:
session_id: session ID to add.
"""
self.payload_raw["sessionId"] = session_id
def SubstituteIds(self, id_map, binary, base_url="", init_session=False):
"""Replace old IDs in the given payload with ones for the current session.
Args:
id_map: mapping from logged IDs to current-session ones
binary: binary to add into this command, if |init_session| is True
base_url: base url to replace in the payload for navigation commands
init_session: whether this payload belongs to an InitSession command.
"""
if self.is_error or self.is_empty:
return
_ReplaceWindowAndElementIds(self.payload_raw, id_map)
_ReplaceSessionId(self.payload_raw, id_map)
if init_session:
_ReplaceBinary(self.payload_raw, binary)
_ReplaceUrl(self.payload_raw, base_url)
def GetAnyElementIds(self):
return _GetAnyElementIds(self.payload_raw)
class _GetSessionsResponseEntry(object):
"""Special LogEntry object for GetSessions commands.
We need a separate class for GetSessions because we need to manually build
the payload from separate log entries in CommandSequence._HandleGetSessions.
This means that we cannot use the payload object that we use for other
commands. There is also no canonical session ID for GetSessions.
"""
def __init__(self, payload):
"""Initialize the _GetSessionsResponseEntry.
Args:
payload: python dict of the payload for this GetSessions response
"""
self._payload = payload
self.name = "GetSessions"
self.session_id = ""
def GetPayloadPrimitive(self):
"""Get the payload for this entry."""
return self._payload
class LogEntry(object):
"""A helper class that can store a command or a response.
Public attributes:
name: name of the command, like InitSession.
session_id: session ID for this command, let as "" for GetSessions.
payload: parameters for a command or the payload returned with a response.
"""
_COMMAND = "COMMAND"
_RESPONSE = "RESPONSE"
def __init__(self, header_line, payload_string):
"""Initialize the LogEntry.
Args:
header_line: the line from the log that has the header of this entry.
This also sometimes has part or all of the payload in it.
Header lines look like the following:
[1532467931.153][INFO]: [<session_id>] <COMMAND or RESPONSE> <command>
payload_string: string representing the payload (usually a JSON dict, but
occasionally a string, bool, or int).
"""
self.name = _GetCommandName(header_line)
self._type = _GetEntryType(header_line)
self.session_id = _GetSessionId(header_line)
self.payload = _Payload(payload_string)
def IsResponse(self):
"""Returns whether this instance is a response."""
return self._type == self._RESPONSE
def IsCommand(self):
"""Returns whether this instance is a command."""
return self._type == self._COMMAND
def UpdatePayloadForReplaySession(self,
id_map=None,
binary="",
base_url=None):
"""Processes IDs in the payload to match the current session.
This replaces old window, element, and session IDs in the payload to match
the ones in the current session as defined in |id_map|. It also replaces
the binary and the url if appropriate.
Args:
id_map:
dict matching element, session, and window IDs in the logged session
with the ones from the current (replaying) session.
binary:
Chrome binary to replace if this is an InitSession call. The binary
will be removed if this is not set. This will cause ChromeDriver to
use it's own algorithm to find an appropriate Chrome binary.
base_url:
Url to replace the ones in the log with in Navigate commands.
"""
self.payload.AddSessionId(self.session_id)
self.payload.SubstituteIds(
id_map, binary, base_url, self.name == "InitSession")
def GetPayloadPrimitive(self):
"""Returns the payload associated with this LogEntry as a primitive."""
return self.payload.payload_raw
class _ParserWithUndo(object):
def __init__(self, log_file):
"""Wrapper around _Parser that implements a UndoGetNext function.
Args:
log_file: file that we wish to open as the log. This should be a
Python file object, or something else with readline capability.
"""
self._parser = _Parser(log_file)
self._saved_log_entry = None
def GetNext(self):
"""Get the next client command or response in the log.
Returns:
LogEntry object representing the next command or response in the log.
"""
if self._saved_log_entry is not None:
log_entry = self._saved_log_entry
self._saved_log_entry = None
return log_entry
return self._parser.GetNext()
def UndoGetNext(self, log_entry):
"""Undo the most recent GetNext call that returned |log_entry|.
Simulates going backwards in the log file by storing |log_entry| and
returning that on the next GetNext call.
Args:
entry: the returned entry from the GetNext that we wish to "undo"
Raises:
ReplayException: if this is called multiple times in a row, which will
cause the object to lose the previously undone entry.
"""
if self._saved_log_entry is not None:
raise RuntimeError('Cannot undo multiple times in a row.')
self._saved_log_entry = log_entry
class _Parser(object):
"""Class responsible for parsing (and not interpreting) the log file."""
# Matches headers for client commands/responses only (not DevTools events)
_CLIENT_PREAMBLE_REGEX = re.compile(
r"^\[[0-9]{10}\.[0-9]{3}\]\[INFO\]: \[[a-f0-9]*\]")
# Matches headers for client commands/responses when readable-timestamp
#option is selected. Depending on OS, final component may be 3 or 6 digits
_CLIENT_PREAMBLE_REGEX_READABLE = re.compile(
r"^\[[0-9]{2}-[0-9]{2}-[0-9]{4} "
"[0-9]{2}:[0-9]{2}:[0-9]{2}.([0-9]{3}){1,2}\]\[INFO\]: \[[a-f0-9]*\]")
def __init__(self, log_file):
"""Initialize the _Parser instance.
Args:
log_file: file that we wish to open as the log. This should be a
Python file object, or something else with readline capability.
"""
self._log_file = log_file
def GetNext(self):
"""Get the next client command or response in the log.
Returns:
LogEntry object representing the next command or response in the log.
Returns None if at the end of the log
"""
header = self._GetNextClientHeaderLine()
if not header:
return None
payload_string = self._GetPayloadString(header)
return LogEntry(header, payload_string)
def _GetNextClientHeaderLine(self):
"""Get the next line that is a command or response for the client.
Returns:
String containing the header of the next client command/response, or
an empty string if we're at the end of the log file.
"""
while True:
next_line = self._log_file.readline()
if not next_line: # empty string indicates end of the log file.
return None
if re.match(self._CLIENT_PREAMBLE_REGEX, next_line):
return next_line
if re.match(self._CLIENT_PREAMBLE_REGEX_READABLE, next_line):
#Readable timestamp contains a space between date and time,
#which breaks other parsing of the header. Replace with underscore
next_line = next_line.replace(" ", "_", 1)
return next_line
def _GetPayloadString(self, header_line):
"""Gets the payload for the current command in self._logfile.
Parses the given header line, along with any additional lines as
applicable, to get a complete JSON payload object from the current
command in the log file. Note that the payload can be JSON, and error
(just a string), or something else like an int or a boolean.
Args:
header_line: the first line of this command
Raises:
ReplayException: if the JSON appears to be incomplete in the log
Returns:
payload of the command as a string
"""
min_header = 5
header_segments = header_line.split()
if len(header_segments) < min_header:
return None
payload = " ".join(header_segments[min_header-1:])
opening_char = header_segments[min_header-1]
if opening_char == "{":
closing_char = "}"
elif opening_char == "[":
closing_char = "]"
else:
return payload # payload is singular, like "1", "false", or an error
opening_char_count = (payload.count(opening_char)
- payload.count(closing_char))
while opening_char_count > 0:
next_line = self._log_file.readline()
if not next_line:
# It'd be quite surprising that the log is truncated in the middle of
# a JSON; far more likely that the parsing failed for some reason.
raise ReplayException(
"Reached end of file without reaching end of JSON payload")
payload += next_line
opening_char_count += _CountChar(next_line, opening_char,
closing_char)
return payload
class CommandSequence(object):
"""Interface to the sequence of commands in a log file."""
def __init__(self, log_path="", base_url=None, chrome_binary=None):
"""Initialize the CommandSequence.
Args:
log_path: file to read logs from (usually opened with with)
base_url: url to replace the base of logged urls with, if
applicable. Replaces port number as well.
chrome_binary: use this Chrome binary instead of the one in the log,
if not None.
"""
self._base_url = base_url
self._binary = chrome_binary
self._id_map = {}
self._parser = _ParserWithUndo(log_path)
self._staged_logged_ids = None
self._staged_logged_session_id = None
self._last_response = None
def NextCommand(self, previous_response):
"""Get the next command in the log file.
Gets start of next command, returning the command and response,
ready to be executed directly in the new session.
Args:
previous_response: the response payload from running the previous command
outputted by this function; None if this is the first command, or
element, session, and window ID substitution is not desired (i.e.
use the logged IDs). This provides the IDs that are then mapped
back onto the ones in the log to formulate future commands correctly.
Raises:
ReplayException: there is a problem with the log making it not
parseable.
Returns:
None if there are no remaining logs.
Otherwise, |command|, a LogEntry object with the following fields:
name: command name (e.g. InitSession)
type: either LogEntry.COMMAND or LogEntry.RESPONSE
payload: parameters passed with the command
session_id: intended session ID for the command, or "" if the
command is GetSessions.
"""
if previous_response:
self._IngestRealResponse(previous_response)
command = self._parser.GetNext()
if not command: # Reached end of log file
return None
if not command.IsCommand():
raise ReplayException("Command and Response unexpectedly out of order.")
if command.name == "GetSessions":
return self._HandleGetSessions(command)
command.UpdatePayloadForReplaySession(
self._id_map, self._binary, self._base_url)
response = self._parser.GetNext()
if not response:
return command
if not response.IsResponse():
raise ReplayException("Command and Response unexpectedly out of order.")
self._IngestLoggedResponse(response)
return command
def _IngestRealResponse(self, response):
"""Process the actual response from the previously issued command.
Ingests the given response that came from calling the last command on
the running ChromeDriver replay instance. This is the step where the
session and element IDs are matched between |response| and the logged
response.
Args:
response: Python dict of the real response to be analyzed for IDs.
"""
if "value" in response and self._staged_logged_ids:
real_ids = _GetAnyElementIds(response["value"])
if real_ids and self._staged_logged_ids:
for id_old, id_new in zip(self._staged_logged_ids, real_ids):
self._id_map[id_old] = id_new
self._staged_logged_ids = None
# In W3C format, the http response is a single key dict,
# where the value is None, a single value, or another dictionary
# sessionId is contained in the nested dictionary
if (self._staged_logged_session_id
and "value" in response and response["value"]
and isinstance(response["value"], dict)
and "sessionId" in response["value"]):
self._id_map[self._staged_logged_session_id] = (
response["value"]["sessionId"])
self._staged_logged_session_id = None
def _IngestLoggedResponse(self, response):
"""Reads the response at the current position in the log file.
Also matches IDs between the logged and new sessions.
Args:
response: the response from the log (from _parser.GetNext)
"""
self._last_response = response # store for testing purposes
self._staged_logged_ids = response.payload.GetAnyElementIds()
if response.name == "InitSession":
self._staged_logged_session_id = response.session_id
def _HandleGetSessions(self, first_command):
"""Special case handler for the GetSessions command.
Since it is dispatched to each session thread, GetSessions doesn't guarantee
command-response-command-response ordering in the log. This happens with
getSessions, which is broadcast to and logged by each of the active sessions
in the ChromeDriver instance. This simply consumes all the necessary logs
resulting from that command until it reaches the next command in the log.
This results in one returned |overall_response|, which is a list of the
responses from each GetSessions sub-call. This is not the same as what is
in the log file, but it is what ChromeDriver returns in real life.
Args:
first_command: The first GetSessions command from the log
Returns:
first_command: the command that triggered all of the calls absorbed by
this function
"""
command_response_pairs = collections.defaultdict(dict)
command_response_pairs[first_command.session_id] = (
{"command": first_command})
while True:
next_entry = self._parser.GetNext()
if not next_entry:
self._parser.UndoGetNext(next_entry)
break
if next_entry.IsResponse():
command_response_pairs[next_entry.session_id]["response"] = next_entry
elif next_entry.IsCommand():
if (next_entry.name != first_command.name
or next_entry.session_id in command_response_pairs):
self._parser.UndoGetNext(next_entry)
break
command_response_pairs[next_entry.session_id]["command"] = next_entry
response = [
{u"id": key, u"capabilities": val["response"].GetPayloadPrimitive()}
for key, val in command_response_pairs.items()
]
self._last_response = _GetSessionsResponseEntry(response)
return first_command
class Replayer(object):
"""Replays the commands in the log file, using CommandSequence internally.
This class provides the command-line functionality for this file.
"""
def __init__(self, logfile, server, chrome_binary, base_url=None):
"""Initialize the Replayer instance.
Args:
logfile: log file handle object to replay from.
options: command-line options; see below. Needs at least
options.chromedriver for the ChromeDriver binary.
base_url: string, base of the url to replace in the logged urls (useful
for when ports change). If any value is passed here, it overrides any
base url passed in options.
"""
# TODO(cwinstanley) Add Android support and perhaps support for other
# chromedriver command line options.
self.executor = command_executor.CommandExecutor(server.GetUrl())
self.command_sequence = CommandSequence(logfile, base_url=base_url,
chrome_binary=chrome_binary)
def Run(self):
"""Runs the replay."""
real_response = None
while True:
command = self.command_sequence.NextCommand(real_response)
if not command:
break
real_response = self.executor.Execute(_COMMANDS[command.name],
command.GetPayloadPrimitive())
def StartChromeDriverServer(chromedriver_binary,
output_log_path,
devtools_replay_path="",
replayable=False,
additional_args=None):
chromedriver = util.GetAbsolutePathOfUserPath(chromedriver_binary)
if (not os.path.exists(chromedriver) and
util.GetPlatformName() == "win" and
not chromedriver.lower().endswith(".exe")):
chromedriver = chromedriver + ".exe"
if output_log_path:
output_log_path = util.GetAbsolutePathOfUserPath(output_log_path)
chromedriver_server = server.Server(chromedriver_binary,
log_path=output_log_path,
devtools_replay_path=devtools_replay_path,
replayable=replayable,
additional_args=additional_args)
return chromedriver_server
def _CommandLineError(parser, message):
parser.error(message + '\nPlease run "%s --help" for help' % __file__)
def _GetCommandLineOptions():
"""Get, parse, and error check command line options for this file."""
usage = "usage: %prog <chromedriver binary> <input log path> [options]"
parser = optparse.OptionParser(usage=usage)
parser.add_option(
"", "--output-log-path",
help="Output verbose server logs to this file")
parser.add_option(
"", "--chrome", help="Path to a build of the chrome binary. If not\n"
"specified, uses ChromeDriver's own algorithm to find Chrome.")
parser.add_option(
"", "--base-url", help="Base url to replace logged urls (in "
"navigate, getUrl, and similar commands/responses).")
parser.add_option(
"", "--devtools-replay", help="Replay DevTools actions in addition\n"
"to client-side actions")
parser.add_option(
"", "--replayable", help="Generate logs that do not have truncated\n"
"strings so that they can be replayed again.")
parser.add_option(
'', '--additional-args', action='append',
help='Additional arguments to add on ChromeDriver command line')
options, args = parser.parse_args()
if len(args) < 2:
_CommandLineError(parser,
'ChromeDriver binary and/or input log path missing.')
if len(args) > 2:
_CommandLineError(parser, 'Too many command line arguments.')
if not os.path.exists(args[0]):
_CommandLineError(parser, 'Path given for chromedriver is invalid.')
if options.chrome and not os.path.exists(options.chrome):
_CommandLineError(parser, 'Path given by --chrome is invalid.')
if options.replayable and not options.output_log_path:
_CommandLineError(
parser, 'Replayable log option needs --output-log-path specified.')
return options, args
def main():
options, args = _GetCommandLineOptions()
devtools_replay_path = args[1] if options.devtools_replay else None
server = StartChromeDriverServer(args[0], options.output_log_path,
devtools_replay_path, options.replayable, options.additional_args)
input_log_path = util.GetAbsolutePathOfUserPath(args[1])
chrome_binary = (util.GetAbsolutePathOfUserPath(options.chrome)
if options.chrome else None)
with open(input_log_path) as logfile:
Replayer(logfile, server, chrome_binary, options.base_url).Run()
server.Kill()
if __name__ == "__main__":
main()
|
ric2b/Vivaldi-browser
|
chromium/chrome/test/chromedriver/log_replay/client_replay.py
|
Python
|
bsd-3-clause
| 37,637
|
import collections
import os
import unittest
import mpi4py.MPI
import pytest
from chainermn.communicators.mpi_communicator_base import MpiCommunicatorBase
class NodeAwareNaiveCommunicator(MpiCommunicatorBase):
def __init__(self, mpi_comm):
super(NodeAwareNaiveCommunicator, self).__init__(mpi_comm)
def allreduce_grad(self, model):
raise NotImplementedError()
class TestMpiCommunicatorBase(unittest.TestCase):
def setUp(self):
self.mpi_comm = mpi4py.MPI.COMM_WORLD
self.communicator = NodeAwareNaiveCommunicator(self.mpi_comm)
def test_intra_rank_with_env(self):
if 'MV2_COMM_WORLD_LOCAL_RANK' in os.environ: # MVAPICH
expected = int(os.environ['MV2_COMM_WORLD_LOCAL_RANK'])
elif 'OMPI_COMM_WORLD_LOCAL_RANK' in os.environ: # OpenMPI
expected = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
else:
pytest.skip('No MPI specified')
self.assertEqual(self.communicator.intra_rank, expected)
def test_intra_size_with_env(self):
if 'MV2_COMM_WORLD_LOCAL_SIZE' in os.environ: # MVAPICH
expected = int(os.environ['MV2_COMM_WORLD_LOCAL_RANK'])
elif 'OMPI_COMM_WORLD_LOCAL_SIZE' in os.environ: # OpenMPI
expected = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
else:
pytest.skip('No MPI specified')
self.assertEqual(self.communicator.intra_rank, expected)
def test_inter_rank_and_size(self):
ranks_and_sizes = self.mpi_comm.gather((
self.communicator.inter_rank, self.communicator.inter_size))
if self.mpi_comm.rank == 0:
for inter_rank, inter_size in ranks_and_sizes:
self.assertTrue(0 <= inter_rank < inter_size)
sizes = list(set(x[1] for x in ranks_and_sizes))
self.assertEqual(len(sizes), 1)
size = sizes[0]
ranks = list(sorted(set(x[0] for x in ranks_and_sizes)))
self.assertEqual(ranks, list(range(size)))
def test_intra_rank_and_size(self):
ranks_and_sizes = self.mpi_comm.gather((
self.communicator.intra_rank, self.communicator.intra_size,
self.communicator.inter_rank, self.communicator.inter_size))
if self.mpi_comm.rank == 0:
for intra_rank, intra_size, _, _ in ranks_and_sizes:
self.assertTrue(0 <= intra_rank < intra_size)
inter_rank_to_intra_ranks = collections.defaultdict(list)
for intra_rank, _, inter_rank, _ in ranks_and_sizes:
inter_rank_to_intra_ranks[inter_rank].append(intra_rank)
for ranks in inter_rank_to_intra_ranks.values():
ranks.sort()
for _, intra_size, inter_rank, _ in ranks_and_sizes:
self.assertEqual(
inter_rank_to_intra_ranks[inter_rank],
list(range(intra_size)))
|
rezoo/chainer
|
tests/chainermn_tests/communicator_tests/test_node_aware_communicator_base.py
|
Python
|
mit
| 2,933
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import datetime
from cryptography import utils, x509
from cryptography.exceptions import UnsupportedAlgorithm
from cryptography.hazmat.primitives import hashes
@utils.register_interface(x509.Certificate)
class _Certificate(object):
def __init__(self, backend, x509):
self._backend = backend
self._x509 = x509
def fingerprint(self, algorithm):
h = hashes.Hash(algorithm, self._backend)
bio = self._backend._create_mem_bio()
res = self._backend._lib.i2d_X509_bio(
bio, self._x509
)
assert res == 1
der = self._backend._read_mem_bio(bio)
h.update(der)
return h.finalize()
@property
def version(self):
version = self._backend._lib.X509_get_version(self._x509)
if version == 0:
return x509.Version.v1
elif version == 2:
return x509.Version.v3
else:
raise x509.InvalidVersion(
"{0} is not a valid X509 version".format(version), version
)
@property
def serial(self):
asn1_int = self._backend._lib.X509_get_serialNumber(self._x509)
assert asn1_int != self._backend._ffi.NULL
bn = self._backend._lib.ASN1_INTEGER_to_BN(
asn1_int, self._backend._ffi.NULL
)
assert bn != self._backend._ffi.NULL
bn = self._backend._ffi.gc(bn, self._backend._lib.BN_free)
return self._backend._bn_to_int(bn)
def public_key(self):
pkey = self._backend._lib.X509_get_pubkey(self._x509)
assert pkey != self._backend._ffi.NULL
pkey = self._backend._ffi.gc(pkey, self._backend._lib.EVP_PKEY_free)
return self._backend._evp_pkey_to_public_key(pkey)
@property
def not_valid_before(self):
asn1_time = self._backend._lib.X509_get_notBefore(self._x509)
return self._parse_asn1_time(asn1_time)
@property
def not_valid_after(self):
asn1_time = self._backend._lib.X509_get_notAfter(self._x509)
return self._parse_asn1_time(asn1_time)
def _parse_asn1_time(self, asn1_time):
assert asn1_time != self._backend._ffi.NULL
generalized_time = self._backend._lib.ASN1_TIME_to_generalizedtime(
asn1_time, self._backend._ffi.NULL
)
assert generalized_time != self._backend._ffi.NULL
generalized_time = self._backend._ffi.gc(
generalized_time, self._backend._lib.ASN1_GENERALIZEDTIME_free
)
time = self._backend._ffi.string(
self._backend._lib.ASN1_STRING_data(
self._backend._ffi.cast("ASN1_STRING *", generalized_time)
)
).decode("ascii")
return datetime.datetime.strptime(time, "%Y%m%d%H%M%SZ")
@property
def issuer(self):
issuer = self._backend._lib.X509_get_issuer_name(self._x509)
assert issuer != self._backend._ffi.NULL
return self._build_x509_name(issuer)
@property
def subject(self):
subject = self._backend._lib.X509_get_subject_name(self._x509)
assert subject != self._backend._ffi.NULL
return self._build_x509_name(subject)
def _build_x509_name(self, x509_name):
count = self._backend._lib.X509_NAME_entry_count(x509_name)
attributes = []
for x in range(count):
entry = self._backend._lib.X509_NAME_get_entry(x509_name, x)
obj = self._backend._lib.X509_NAME_ENTRY_get_object(entry)
assert obj != self._backend._ffi.NULL
data = self._backend._lib.X509_NAME_ENTRY_get_data(entry)
assert data != self._backend._ffi.NULL
buf = self._backend._ffi.new("unsigned char **")
res = self._backend._lib.ASN1_STRING_to_UTF8(buf, data)
assert res >= 0
assert buf[0] != self._backend._ffi.NULL
buf = self._backend._ffi.gc(
buf, lambda buf: self._backend._lib.OPENSSL_free(buf[0])
)
value = self._backend._ffi.buffer(buf[0], res)[:].decode('utf8')
oid = self._obj2txt(obj)
attributes.append(
x509.NameAttribute(
x509.ObjectIdentifier(oid), value
)
)
return x509.Name(attributes)
def _obj2txt(self, obj):
# Set to 80 on the recommendation of
# https://www.openssl.org/docs/crypto/OBJ_nid2ln.html#return_values
buf_len = 80
buf = self._backend._ffi.new("char[]", buf_len)
res = self._backend._lib.OBJ_obj2txt(buf, buf_len, obj, 1)
assert res > 0
return self._backend._ffi.buffer(buf, res)[:].decode()
@property
def signature_hash_algorithm(self):
oid = self._obj2txt(self._x509.sig_alg.algorithm)
try:
return x509._SIG_OIDS_TO_HASH[oid]
except KeyError:
raise UnsupportedAlgorithm(
"Signature algorithm OID:{0} not recognized".format(oid)
)
|
deandunbar/html2bwml
|
venv/lib/python2.7/site-packages/cryptography/hazmat/backends/openssl/x509.py
|
Python
|
mit
| 5,608
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import now_datetime, cint
def set_new_name(doc):
"""Sets the `name`` property for the document based on various rules.
1. If amened doc, set suffix.
3. If `autoname` method is declared, then call it.
4. If `autoname` property is set in the DocType (`meta`), then build it using the `autoname` property.
2. If `name` is already defined, use that name
5. If no rule defined, use hash.
#### Note:
:param doc: Document to be named."""
doc.run_method("before_naming")
autoname = frappe.get_meta(doc.doctype).autoname
if getattr(doc, "amended_from", None):
_set_amended_name(doc)
return
elif getattr(doc.meta, "issingle", False):
doc.name = doc.doctype
elif hasattr(doc, "autoname"):
doc.run_method("autoname")
elif autoname:
if autoname.startswith('field:'):
fieldname = autoname[6:]
doc.name = (doc.get(fieldname) or "").strip()
if not doc.name:
frappe.throw(_("{0} is required").format(doc.meta.get_label(fieldname)))
raise Exception, 'Name is required'
if autoname.startswith("naming_series:"):
set_name_by_naming_series(doc)
elif "#" in autoname:
doc.name = make_autoname(autoname)
elif autoname=='Prompt':
# set from __newname in save.py
if not doc.name:
frappe.throw(_("Name not set via Prompt"))
if not doc.name:
doc.name = make_autoname('hash', doc.doctype)
doc.name = validate_name(doc.doctype, doc.name)
def set_name_by_naming_series(doc):
"""Sets name by the `naming_series` property"""
if not doc.naming_series:
doc.naming_series = get_default_naming_series(doc.doctype)
if not doc.naming_series:
frappe.throw(frappe._("Naming Series mandatory"))
doc.name = make_autoname(doc.naming_series+'.#####')
def make_autoname(key, doctype=''):
"""
Creates an autoname from the given key:
**Autoname rules:**
* The key is separated by '.'
* '####' represents a series. The string before this part becomes the prefix:
Example: ABC.#### creates a series ABC0001, ABC0002 etc
* 'MM' represents the current month
* 'YY' and 'YYYY' represent the current year
*Example:*
* DE/./.YY./.MM./.##### will create a series like
DE/09/01/0001 where 09 is the year, 01 is the month and 0001 is the series
"""
if key=="hash":
return frappe.generate_hash(doctype, 10)
if not "#" in key:
key = key + ".#####"
elif not "." in key:
frappe.throw(_("Invalid naming series (. missing)") + (_(" for {0}").format(doctype) if doctype else ""))
n = ''
l = key.split('.')
series_set = False
today = now_datetime()
for e in l:
en = ''
if e.startswith('#'):
if not series_set:
digits = len(e)
en = getseries(n, digits, doctype)
series_set = True
elif e=='YY':
en = today.strftime('%y')
elif e=='MM':
en = today.strftime('%m')
elif e=='DD':
en = today.strftime("%d")
elif e=='YYYY':
en = today.strftime('%Y')
else: en = e
n+=en
return n
def getseries(key, digits, doctype=''):
# series created ?
current = frappe.db.sql("select `current` from `tabSeries` where name=%s for update", (key,))
if current and current[0][0] is not None:
current = current[0][0]
# yes, update it
frappe.db.sql("update tabSeries set current = current+1 where name=%s", (key,))
current = cint(current) + 1
else:
# no, create it
frappe.db.sql("insert into tabSeries (name, current) values (%s, 1)", (key,))
current = 1
return ('%0'+str(digits)+'d') % current
def revert_series_if_last(key, name):
if ".#" in key:
prefix, hashes = key.rsplit(".", 1)
if "#" not in hashes:
return
else:
prefix = key
count = cint(name.replace(prefix, ""))
current = frappe.db.sql("select `current` from `tabSeries` where name=%s for update", (prefix,))
if current and current[0][0]==count:
frappe.db.sql("update tabSeries set current=current-1 where name=%s", prefix)
def get_default_naming_series(doctype):
"""get default value for `naming_series` property"""
naming_series = frappe.get_meta(doctype).get_field("naming_series").options or ""
if naming_series:
naming_series = naming_series.split("\n")
return naming_series[0] or naming_series[1]
else:
return None
def validate_name(doctype, name, case=None, merge=False):
if not name: return 'No Name Specified for %s' % doctype
if name.startswith('New '+doctype):
frappe.throw(_('There were some errors setting the name, please contact the administrator'), frappe.NameError)
if case=='Title Case': name = name.title()
if case=='UPPER CASE': name = name.upper()
name = name.strip()
if not frappe.get_meta(doctype).get("issingle") and (doctype == name) and (name!="DocType"):
frappe.throw(_("Name of {0} cannot be {1}").format(doctype, name), frappe.NameError)
return name
def _set_amended_name(doc):
am_id = 1
am_prefix = doc.amended_from
if frappe.db.get_value(doc.doctype, doc.amended_from, "amended_from"):
am_id = cint(doc.amended_from.split('-')[-1]) + 1
am_prefix = '-'.join(doc.amended_from.split('-')[:-1]) # except the last hyphen
doc.name = am_prefix + '-' + str(am_id)
return doc.name
def append_number_if_name_exists(doc):
if frappe.db.exists(doc.doctype, doc.name):
last = frappe.db.sql("""select name from `tab{}`
where name regexp '{}-[[:digit:]]+'
order by length(name) desc, name desc limit 1""".format(doc.doctype, doc.name))
if last:
count = str(cint(last[0][0].rsplit("-", 1)[1]) + 1)
else:
count = "1"
doc.name = "{0}-{1}".format(doc.name, count)
def de_duplicate(doctype, name):
original_name = name
count = 0
while True:
if frappe.db.exists(doctype, name):
count += 1
name = "{0}-{1}".format(original_name, count)
else:
break
return name
|
sbktechnology/trufil-frappe
|
frappe/model/naming.py
|
Python
|
mit
| 5,880
|
from mbuild.lib.surfaces.amorphous_silica import AmorphousSilica
from mbuild.lib.surfaces.betacristobalite import Betacristobalite
|
ctk3b/mbuild
|
mbuild/lib/surfaces/__init__.py
|
Python
|
mit
| 130
|
"""Prepares the views for point scoreboard widget."""
import datetime
from apps.managers.challenge_mgr import challenge_mgr
from apps.managers.player_mgr import player_mgr
from apps.managers.team_mgr import team_mgr
def supply(request, page_name):
"""Supply the view_objects content for this widget, which is all the scoreboard data."""
user = request.user
team = user.get_profile().team
num_results = 10 if page_name != "status" else None
round_standings = {}
current_round = challenge_mgr.get_round_name()
today = datetime.datetime.today()
rounds = challenge_mgr.get_all_round_info()["rounds"]
for key in rounds.keys():
# 1. always display current round
# 2. if not future round
# a. display the round with the "display_scoreboard" flag
# b. display in the status page
if rounds[key]["start"] <= today and \
(rounds[key]["display_scoreboard"] or page_name == "status"):
round_standings[key] = {
"group_standings": team_mgr.group_points_leaders(num_results, key),
"team_standings": team_mgr.team_points_leaders(num_results, key),
"profile_standings": player_mgr.points_leaders(num_results, key),
"group_participation": team_mgr.group_active_participation(num_results, key) if \
page_name == "status" else None,
"team_participation": team_mgr.team_active_participation(num_results, key) if \
page_name == "status" else None,
"user_team_standings": team.points_leaders(num_results, key) if \
team and page_name != "status" else None,
}
# add an overall scoreboard
round_standings["Overall"] = {
"group_standings": team_mgr.group_points_leaders(num_results, "Overall"),
"team_standings": team_mgr.team_points_leaders(num_results, "Overall"),
"profile_standings": player_mgr.points_leaders(num_results, "Overall"),
"group_participation": team_mgr.group_active_participation(num_results, "Overall") if\
page_name == "status" else None,
"team_participation": team_mgr.team_active_participation(num_results, "Overall") if \
page_name == "status" else None,
}
count = len(rounds)
return {
"profile": user.get_profile(),
"team": team,
"current_round": current_round,
"round_standings": round_standings,
"no_carousel": page_name == "status",
"range": count,
"user": user,
}
def remote_supply(request, page_name):
"""Supplies data to remote views."""
return supply(request, page_name)
|
KendyllD/boukenda-project
|
makahiki/apps/widgets/scoreboard/views.py
|
Python
|
mit
| 2,747
|
#! /usr/bin/env python
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 3085 $
# Date: $Date: 2005-03-22 21:38:43 +0100 (Tue, 22 Mar 2005) $
# Copyright: This module has been placed in the public domain.
"""
Tests for docutils.transforms.components.Filter.
"""
from __init__ import DocutilsTestSupport
from docutils.parsers.rst import Parser
def suite():
parser = Parser()
s = DocutilsTestSupport.TransformTestSuite(parser)
s.generateTests(totest)
return s
totest = {}
totest['meta'] = ((), [
["""\
.. meta::
:description: The reStructuredText plaintext markup language
:keywords: plaintext,markup language
""",
"""\
<document source="test data">
<meta content="The reStructuredText plaintext markup language" name="description">
<meta content="plaintext,markup language" name="keywords">
"""],
])
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite')
|
pombreda/django-hotclub
|
libs/external_libs/docutils-0.4/test/test_transforms/test_filter.py
|
Python
|
mit
| 962
|
#!/usr/bin/env python
from distutils.core import setup
import os, sys
if 'sdist' in sys.argv:
os.system('./admin/epyrun')
# patch distutils if it can't cope with the "classifiers" or
# "download_url" keywords
if sys.version < '2.2.3':
from distutils.dist import DistributionMetadata
DistributionMetadata.classifiers = None
DistributionMetadata.download_url = None
version = '[library version:1.1.0]'[17:-1]
kwargs = {
'name': "python-yadis",
'version': version,
'url': "http://www.openidenabled.com/yadis/libraries/python/",
'download_url': "http://www.openidenabled.com/resources/downloads/python-yadis/python-yadis-%s.tar.gz" % (version,),
'author': "JanRain, Inc.",
'author_email': "openid@janrain.com",
'description': "Yadis service discovery library.",
'long_description': "Yadis is a protocol for discovering services "
"applicable to a URL. This package provides a client implementation "
"of the Yadis protocol.",
'packages': ['yadis',
],
'license': "LGPL",
'classifiers': [
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)",
"Operating System :: POSIX",
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: System :: Systems Administration :: Authentication/Directory",
]
}
setup(**kwargs)
|
alon/polinax
|
libs/external_libs/python-yadis-1.1.0/setup.py
|
Python
|
gpl-2.0
| 1,649
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Ansible, inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_command
short_description: Executes a command on a remote Windows node
version_added: 2.2
description:
- The C(win_command) module takes the command name followed by a list of space-delimited arguments.
- The given command will be executed on all selected nodes. It will not be
processed through the shell, so variables like C($env:HOME) and operations
like C("<"), C(">"), C("|"), and C(";") will not work (use the M(win_shell)
module if you need these features).
options:
free_form:
description:
- the win_command module takes a free form command to run. There is no parameter actually named 'free form'.
See the examples!
required: true
creates:
description:
- a path or path filter pattern; when the referenced path exists on the target host, the task will be skipped.
removes:
description:
- a path or path filter pattern; when the referenced path B(does not) exist on the target host, the task will be skipped.
chdir:
description:
- set the specified path as the current working directory before executing a command
notes:
- If you want to run a command through a shell (say you are using C(<),
C(>), C(|), etc), you actually want the M(win_shell) module instead. The
C(win_command) module is much more secure as it's not affected by the user's
environment.
- C(creates), C(removes), and C(chdir) can be specified after the command. For instance, if you only want to run a command if a certain file does not
exist, use this.
author:
- Matt Davis
'''
EXAMPLES = r'''
# Example from Ansible Playbooks.
- win_command: whoami
register: whoami_out
# Run the command only if the specified file does not exist.
- win_command: wbadmin -backupTarget:C:\backup\ creates=C:\backup\
# You can also use the 'args' form to provide the options. This command
# will change the working directory to C:\somedir\\ and will only run when
# C:\backup\ doesn't exist.
- win_command: wbadmin -backupTarget:C:\backup\ creates=C:\backup\
args:
chdir: C:\somedir\
creates: C:\backup\
'''
RETURN = r'''
msg:
description: changed
returned: always
type: boolean
sample: True
start:
description: The command execution start time
returned: always
type: string
sample: '2016-02-25 09:18:26.429568'
end:
description: The command execution end time
returned: always
type: string
sample: '2016-02-25 09:18:26.755339'
delta:
description: The command execution delta time
returned: always
type: string
sample: '0:00:00.325771'
stdout:
description: The command standard output
returned: always
type: string
sample: 'Clustering node rabbit@slave1 with rabbit@master ...'
stderr:
description: The command standard error
returned: always
type: string
sample: 'ls: cannot access foo: No such file or directory'
cmd:
description: The command executed by the task
returned: always
type: string
sample: 'rabbitmqctl join_cluster rabbit@master'
rc:
description: The command return code (0 means success)
returned: always
type: int
sample: 0
stdout_lines:
description: The command standard output split in lines
returned: always
type: list
sample: [u'Clustering node rabbit@slave1 with rabbit@master ...']
'''
|
dav1x/ansible
|
lib/ansible/modules/windows/win_command.py
|
Python
|
gpl-3.0
| 4,255
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def install(company):
docs = [
{'doctype': 'Salary Component', 'salary_component': 'Professional Tax', 'description': 'Professional Tax', 'type': 'Deduction'},
{'doctype': 'Salary Component', 'salary_component': 'Provident Fund', 'description': 'Provident fund', 'type': 'Deduction'},
{'doctype': 'Salary Component', 'salary_component': 'House Rent Allowance', 'description': 'House Rent Allowance', 'type': 'Earning'},
{'doctype': 'Salary Component', 'salary_component': 'Basic', 'description': 'Basic', 'type': 'Earning'}
]
for d in docs:
try:
doc = frappe.get_doc(d)
doc.flags.ignore_permissions = True
doc.insert()
except frappe.NameError:
pass
|
elba7r/lite-system
|
erpnext/setup/doctype/company/fixtures/india/__init__.py
|
Python
|
gpl-3.0
| 861
|
#!/usr/bin/env python
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This script was used to get white background on the pixmaps.
"""
import os
for fn in os.listdir("feta"):
s = open(os.path.join("feta", fn), "r").read()
s = s.replace("\"a c #FFF\"", "\"a c #FF\"")
f = open(os.path.join("feta", fn), "w")
f.write(s)
f.close()
|
RannyeriDev/Solfege
|
tools/fix_feta.py
|
Python
|
gpl-3.0
| 926
|
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation.
# All rights reserved.
"""Tokenization help for Python programs.
generate_tokens(readline) is a generator that breaks a stream of
text into Python tokens. It accepts a readline-like method which is called
repeatedly to get the next line of input (or "" for EOF). It generates
5-tuples with these members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators
Older entry points
tokenize_loop(readline, tokeneater)
tokenize(readline, tokeneater=printtoken)
are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
each time a new token is found."""
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__credits__ = \
'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
import string, re
from codecs import BOM_UTF8, lookup
from lib2to3.pgen2.token import *
from . import token
__all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize",
"generate_tokens", "untokenize"]
del token
try:
bytes
except NameError:
# Support bytes type in Python <= 2.5, so 2to3 turns itself into
# valid Python 3 code.
bytes = str
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'[a-zA-Z_]\w*'
Binnumber = r'0[bB][01]*'
Hexnumber = r'0[xX][\da-fA-F]*[lL]?'
Octnumber = r'0[oO]?[0-7]*[lL]?'
Decnumber = r'[1-9]\d*[lL]?'
Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?\d+'
Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
Expfloat = r'\d+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group("[ubUB]?[rR]?'''", '[ubUB]?[rR]?"""')
# Single-line ' or " string.
String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
r"//=?", r"->",
r"[+\-*/%&@|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'[:;.,`@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
tokenprog, pseudoprog, single3prog, double3prog = list(map(
re.compile, (Token, PseudoToken, Single3, Double3)))
endprogs = {"'": re.compile(Single), '"': re.compile(Double),
"'''": single3prog, '"""': double3prog,
"r'''": single3prog, 'r"""': double3prog,
"u'''": single3prog, 'u"""': double3prog,
"b'''": single3prog, 'b"""': double3prog,
"ur'''": single3prog, 'ur"""': double3prog,
"br'''": single3prog, 'br"""': double3prog,
"R'''": single3prog, 'R"""': double3prog,
"U'''": single3prog, 'U"""': double3prog,
"B'''": single3prog, 'B"""': double3prog,
"uR'''": single3prog, 'uR"""': double3prog,
"Ur'''": single3prog, 'Ur"""': double3prog,
"UR'''": single3prog, 'UR"""': double3prog,
"bR'''": single3prog, 'bR"""': double3prog,
"Br'''": single3prog, 'Br"""': double3prog,
"BR'''": single3prog, 'BR"""': double3prog,
'r': None, 'R': None,
'u': None, 'U': None,
'b': None, 'B': None}
triple_quoted = {}
for t in ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"u'''", 'u"""', "U'''", 'U"""',
"b'''", 'b"""', "B'''", 'B"""',
"ur'''", 'ur"""', "Ur'''", 'Ur"""',
"uR'''", 'uR"""', "UR'''", 'UR"""',
"br'''", 'br"""', "Br'''", 'Br"""',
"bR'''", 'bR"""', "BR'''", 'BR"""',):
triple_quoted[t] = t
single_quoted = {}
for t in ("'", '"',
"r'", 'r"', "R'", 'R"',
"u'", 'u"', "U'", 'U"',
"b'", 'b"', "B'", 'B"',
"ur'", 'ur"', "Ur'", 'Ur"',
"uR'", 'uR"', "UR'", 'UR"',
"br'", 'br"', "Br'", 'Br"',
"bR'", 'bR"', "BR'", 'BR"', ):
single_quoted[t] = t
tabsize = 8
class TokenError(Exception): pass
class StopTokenizing(Exception): pass
def printtoken(type, token, xxx_todo_changeme, xxx_todo_changeme1, line): # for testing
(srow, scol) = xxx_todo_changeme
(erow, ecol) = xxx_todo_changeme1
print("%d,%d-%d,%d:\t%s\t%s" % \
(srow, scol, erow, ecol, tok_name[type], repr(token)))
def tokenize(readline, tokeneater=printtoken):
"""
The tokenize() function accepts two parameters: one representing the
input stream, and one providing an output mechanism for tokenize().
The first parameter, readline, must be a callable object which provides
the same interface as the readline() method of built-in file objects.
Each call to the function should return one line of input as a string.
The second parameter, tokeneater, must also be a callable object. It is
called once for each token, with five arguments, corresponding to the
tuples generated by generate_tokens().
"""
try:
tokenize_loop(readline, tokeneater)
except StopTokenizing:
pass
# backwards compatible interface
def tokenize_loop(readline, tokeneater):
for token_info in generate_tokens(readline):
tokeneater(*token_info)
class Untokenizer:
def __init__(self):
self.tokens = []
self.prev_row = 1
self.prev_col = 0
def add_whitespace(self, start):
row, col = start
assert row <= self.prev_row
col_offset = col - self.prev_col
if col_offset:
self.tokens.append(" " * col_offset)
def untokenize(self, iterable):
for t in iterable:
if len(t) == 2:
self.compat(t, iterable)
break
tok_type, token, start, end, line = t
self.add_whitespace(start)
self.tokens.append(token)
self.prev_row, self.prev_col = end
if tok_type in (NEWLINE, NL):
self.prev_row += 1
self.prev_col = 0
return "".join(self.tokens)
def compat(self, token, iterable):
startline = False
indents = []
toks_append = self.tokens.append
toknum, tokval = token
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum in (NEWLINE, NL):
startline = True
for tok in iterable:
toknum, tokval = tok[:2]
if toknum in (NAME, NUMBER, ASYNC, AWAIT):
tokval += ' '
if toknum == INDENT:
indents.append(tokval)
continue
elif toknum == DEDENT:
indents.pop()
continue
elif toknum in (NEWLINE, NL):
startline = True
elif startline and indents:
toks_append(indents[-1])
startline = False
toks_append(tokval)
cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII)
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argument, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read
in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present, but
disagree, a SyntaxError will be raised. If the encoding cookie is an invalid
charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return bytes()
def find_cookie(line):
try:
line_string = line.decode('ascii')
except UnicodeDecodeError:
return None
match = cookie_re.match(line_string)
if not match:
return None
encoding = _get_normal_name(match.group(1))
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
raise SyntaxError("unknown encoding: " + encoding)
if bom_found:
if codec.name != 'utf-8':
# This behaviour mimics the Python interpreter
raise SyntaxError('encoding problem: utf-8')
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
if not blank_re.match(first):
return default, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
def untokenize(iterable):
"""Transform tokens back into Python source code.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited intput:
# Output text will tokenize the back to the input
t1 = [tok[:2] for tok in generate_tokens(f.readline)]
newcode = untokenize(t1)
readline = iter(newcode.splitlines(1)).next
t2 = [tok[:2] for tokin generate_tokens(readline)]
assert t1 == t2
"""
ut = Untokenizer()
return ut.untokenize(iterable)
def generate_tokens(readline):
"""
The generate_tokens() generator requires one argument, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as a string. Alternately, readline
can be a callable function terminating with StopIteration:
readline = open(myfile).next # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
"""
lnum = parenlev = continued = 0
namechars, numchars = string.ascii_letters + '_', '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
# 'stashed' and 'async_*' are used for async/await parsing
stashed = None
async_def = False
async_def_indent = 0
async_def_nl = False
while 1: # loop over lines in stream
try:
line = readline()
except StopIteration:
line = ''
lnum = lnum + 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield (STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield (ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ': column = column + 1
elif line[pos] == '\t': column = (column//tabsize + 1)*tabsize
elif line[pos] == '\f': column = 0
else: break
pos = pos + 1
if pos == max: break
if stashed:
yield stashed
stashed = None
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
nl_pos = pos + len(comment_token)
yield (COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
yield (NL, line[nl_pos:],
(lnum, nl_pos), (lnum, len(line)), line)
else:
yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
if async_def and async_def_indent >= indents[-1]:
async_def = False
async_def_nl = False
async_def_indent = 0
yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
if async_def and async_def_nl and async_def_indent >= indents[-1]:
async_def = False
async_def_nl = False
async_def_indent = 0
else: # continued statement
if not line:
raise TokenError("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = pseudoprog.match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
token, initial = line[start:end], line[start]
if initial in numchars or \
(initial == '.' and token != '.'): # ordinary number
yield (NUMBER, token, spos, epos, line)
elif initial in '\r\n':
newline = NEWLINE
if parenlev > 0:
newline = NL
elif async_def:
async_def_nl = True
if stashed:
yield stashed
stashed = None
yield (newline, token, spos, epos, line)
elif initial == '#':
assert not token.endswith("\n")
if stashed:
yield stashed
stashed = None
yield (COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = endprogs[token]
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
if stashed:
yield stashed
stashed = None
yield (STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
strstart = (lnum, start)
endprog = (endprogs[initial] or endprogs[token[1]] or
endprogs[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
if stashed:
yield stashed
stashed = None
yield (STRING, token, spos, epos, line)
elif initial in namechars: # ordinary name
if token in ('async', 'await'):
if async_def:
yield (ASYNC if token == 'async' else AWAIT,
token, spos, epos, line)
continue
tok = (NAME, token, spos, epos, line)
if token == 'async' and not stashed:
stashed = tok
continue
if token == 'def':
if (stashed
and stashed[0] == NAME
and stashed[1] == 'async'):
async_def = True
async_def_indent = indents[-1]
yield (ASYNC, stashed[1],
stashed[2], stashed[3],
stashed[4])
stashed = None
if stashed:
yield stashed
stashed = None
yield tok
elif initial == '\\': # continued stmt
# This yield is new; needed for better idempotency:
if stashed:
yield stashed
stashed = None
yield (NL, token, spos, (lnum, pos), line)
continued = 1
else:
if initial in '([{': parenlev = parenlev + 1
elif initial in ')]}': parenlev = parenlev - 1
if stashed:
yield stashed
stashed = None
yield (OP, token, spos, epos, line)
else:
yield (ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos = pos + 1
if stashed:
yield stashed
stashed = None
for indent in indents[1:]: # pop remaining indent levels
yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
if __name__ == '__main__': # testing
import sys
if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
else: tokenize(sys.stdin.readline)
|
Microvellum/Fluid-Designer
|
win64-vc/2.78/python/lib/lib2to3/pgen2/tokenize.py
|
Python
|
gpl-3.0
| 21,803
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class XcbUtilCursor(AutotoolsPackage):
"""The XCB util modules provides a number of libraries which sit on top
of libxcb, the core X protocol library, and some of the extension
libraries. These experimental libraries provide convenience functions
and interfaces which make the raw X protocol more usable. Some of the
libraries also provide client-side code which is not strictly part of
the X protocol but which have traditionally been provided by Xlib."""
homepage = "https://xcb.freedesktop.org/"
url = "https://xcb.freedesktop.org/dist/xcb-util-cursor-0.1.3.tar.gz"
version('0.1.3', '4b0768fa497127131a47f07e5c8cf745')
depends_on('libxcb@1.4:')
depends_on('xcb-util-renderutil')
depends_on('xcb-util-image')
depends_on('pkgconfig', type='build')
|
krafczyk/spack
|
var/spack/repos/builtin/packages/xcb-util-cursor/package.py
|
Python
|
lgpl-2.1
| 2,067
|
# -*- encoding: utf-8 -*-
# Pilas engine - A video game framework.
#
# Copyright 2010 - Hugo Ruscitti
# License: LGPLv3 (see http://www.gnu.org/licenses/lgpl.html)
#
# Website - http://www.pilas-engine.com.ar
from pilasengine.actores.actor import Actor
class EstrellaNinja(Actor):
""" Representa una estrella ninja. """
def pre_iniciar(self, x=0, y=0):
self.x = x
self.y = y
self.imagen = self.pilas.imagenes.cargar('disparos/estrella.png')
self.rotacion = 0
self.escala = 0.5
self.radio_de_colision = 20
## TODO: buscar la forma de poder cambiar la velocidad y el angulo
## de movimiento desde esta clase.
self.hacer(self.pilas.comportamientos.Proyectil, velocidad_maxima=1,
aceleracion=1,
angulo_de_movimiento=0,
gravedad=0)
def actualizar(self):
self.rotacion += 10
|
apehua/pilas
|
pilasengine/actores/estrella_ninja.py
|
Python
|
lgpl-3.0
| 1,023
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for reduction ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import math_ops
class ReducedShapeTest(tf.test.TestCase):
def _check(self, shape, axes, result):
output = math_ops.reduced_shape(shape, axes=axes)
self.assertAllEqual(output.eval(), result)
def testSimple(self):
with self.test_session():
self._check([3], [], [3])
self._check([3], [0], [1])
self._check([5, 3], [], [5, 3])
self._check([5, 3], [0], [1, 3])
self._check([5, 3], [1], [5, 1])
self._check([5, 3], [0, 1], [1, 1])
def testZeros(self):
"""Check that reduced_shape does the right thing with zero dimensions."""
with self.test_session():
self._check([0], [], [0])
self._check([0], [0], [1])
self._check([0, 3], [], [0, 3])
self._check([0, 3], [0], [1, 3])
self._check([0, 3], [1], [0, 1])
self._check([0, 3], [0, 1], [1, 1])
self._check([3, 0], [], [3, 0])
self._check([3, 0], [0], [1, 0])
self._check([3, 0], [1], [3, 1])
self._check([3, 0], [0, 1], [1, 1])
def testNegAxes(self):
with self.test_session():
self._check([10, 10, 10], [-1], [10, 10, 1])
self._check([10, 10, 10], [-1, 2], [10, 10, 1])
self._check([10, 10, 10], [-1, -1], [10, 10, 1])
self._check([10, 10, 10], [-1, 0], [1, 10, 1])
self._check([10, 10, 10], [-3], [1, 10, 10])
class SumReductionTest(tf.test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False,
feed_dict=None):
np_ans = x
if reduction_axes is None:
np_ans = np.sum(np_ans, keepdims=keep_dims)
else:
reduction_axes = np.array(reduction_axes).astype(np.int32)
for ra in reduction_axes.ravel()[::-1]:
np_ans = np.sum(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session(use_gpu=use_gpu) as sess:
tf_ans = tf.reduce_sum(x, reduction_axes, keep_dims)
out = sess.run(tf_ans, feed_dict)
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes, feed_dict=None):
if reduction_axes is not None and np.shape(reduction_axes) == (1,):
# Test scalar reduction_axes argument
self._compareAll(x, reduction_axes[0])
self._compare(x, reduction_axes, False, use_gpu=True, feed_dict=feed_dict)
self._compare(x, reduction_axes, False, use_gpu=False, feed_dict=feed_dict)
self._compare(x, reduction_axes, True, use_gpu=True, feed_dict=feed_dict)
self._compare(x, reduction_axes, True, use_gpu=False, feed_dict=feed_dict)
def testFloatReduce1D(self):
# Create a 1D array of floats
np_arr = np.arange(1, 6).reshape([5]).astype(np.float32)
self._compareAll(np_arr, [0])
def testFloatReduce2D(self):
# Create a 2D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [0, 1])
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
self._compareAll(np_arr, [-1])
self._compareAll(np_arr, [-1, -3])
self._compareAll(np_arr, [-1, 1])
def testFloatReduce4D(self):
# Create a 4D array of floats and reduce across some
# dimensions
np_arr = np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
# Need specialization for reduce(4D, [0, 2])
# self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
self._compareAll(np_arr, [1, 2, 3])
self._compareAll(np_arr, [0, 1, 2, 3])
def testFloatReduce5D(self):
# Create a 5D array of floats and reduce across some dimensions
np_arr = np.arange(0, 840).reshape([2, 3, 5, 7, 4]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
# Need specialization for reduce(4D, [0, 2])
# self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
self._compareAll(np_arr, [1, 2, 3])
self._compareAll(np_arr, [0, 1, 2, 3])
self._compareAll(np_arr, [1, 2, 3, 4])
self._compareAll(np_arr, [0, 1, 2, 3, 4])
# Simple tests for various types.
def testDoubleReduce1D(self):
np_arr = np.arange(1, 6).reshape([5]).astype(np.float64)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
def testInt32Reduce1D(self):
np_arr = np.arange(1, 6).reshape([5]).astype(np.int32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
def testComplex64Reduce1D(self):
np_arr = np.arange(1, 6).reshape([5]).astype(np.complex64)
self._compare(np_arr, [], False)
self._compare(np_arr, [0], False)
def testComplex128Reduce1D(self):
np_arr = np.arange(1, 6).reshape([5]).astype(np.complex128)
self._compare(np_arr, [], False)
self._compare(np_arr, [0], False)
def testInvalidIndex(self):
np_arr = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
input_tensor = tf.convert_to_tensor(np_arr)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in str(e)):
tf.reduce_sum(input_tensor, [-3])
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in str(e)):
tf.reduce_sum(input_tensor, [2])
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Invalid reduction dimension" in str(e)):
tf.reduce_sum(input_tensor, [0, 2])
def testPartialShapes(self):
np.random.seed(1618)
# Input shape is unknown.
reduction_axes = [1, 2]
c_unknown = tf.placeholder(tf.float32)
s_unknown = tf.reduce_sum(c_unknown, reduction_axes)
self.assertEqual(tensor_shape.unknown_shape(), s_unknown.get_shape())
np_input = np.random.randn(3, 3, 3)
self._compareAll(np_input, reduction_axes, {c_unknown: np_input})
# Input shape only has known rank.
c_known_rank = tf.placeholder(tf.float32)
c_known_rank.set_shape(tensor_shape.unknown_shape(ndims=3))
s_known_rank = tf.reduce_sum(c_known_rank, reduction_axes, keep_dims=True)
self.assertEqual(3, s_known_rank.get_shape().ndims)
np_input = np.random.randn(3, 3, 3)
self._compareAll(np_input, reduction_axes, {c_known_rank: np_input})
# Reduction indices are unknown.
unknown_indices = tf.placeholder(tf.int32)
c_unknown_indices = tf.constant([[10.0], [20.0]])
s_unknown_indices = tf.reduce_sum(c_unknown_indices, unknown_indices,
keep_dims=False)
self.assertEqual(tensor_shape.unknown_shape(),
s_unknown_indices.get_shape())
s_unknown_indices_keep = tf.reduce_sum(c_unknown_indices, unknown_indices,
keep_dims=True)
self.assertEqual(2, s_unknown_indices_keep.get_shape().ndims)
# Int64??
def _compareGradient(self, shape, sum_shape, reduction_axes):
if reduction_axes is not None and np.shape(reduction_axes) == (1,):
# Test scalar reduction_axes argument
self._compareGradient(shape, sum_shape, reduction_axes[0])
x = np.arange(1.0, 49.0).reshape(shape).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_sum(t, reduction_axes)
jacob_t, jacob_n = tf.test.compute_gradient(t,
shape,
su,
sum_shape,
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient(self):
self._compareGradient([2, 3, 4, 2], [2, 2], [1, 2])
def testGradient2(self):
self._compareGradient([2, 3, 4, 2], [2, 4, 2], [1])
def testGradient3(self):
self._compareGradient([2, 3, 4, 2], [2, 3, 2], [2])
def testGradient4(self):
self._compareGradient([2, 3, 4, 2], [], None)
def testHighRank(self):
# Do a bunch of random high dimensional reductions
np.random.seed(42)
for _ in range(20):
rank = np.random.randint(4, 10 + 1)
axes, = np.nonzero(np.random.randint(2, size=rank))
shape = tuple(np.random.randint(1, 3 + 1, size=rank))
data = np.random.randint(1024, size=shape)
self._compareAll(data, axes)
# Check some particular axis patterns
for rank in 4, 7, 10:
shape = tuple(np.random.randint(1, 3 + 1, size=rank))
data = np.random.randint(1024, size=shape)
for axes in ([], np.arange(rank), np.arange(0, rank, 2),
np.arange(1, rank, 2)):
self._compareAll(data, axes)
def testExpand(self):
# Reduce an empty tensor to a nonempty tensor
x = np.zeros((5, 0))
self._compareAll(x, [1])
def testEmptyGradients(self):
with self.test_session():
x = tf.zeros([0, 3])
y = tf.reduce_sum(x, [1])
error = tf.test.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
def testDegenerate(self):
for use_gpu in False, True:
with self.test_session(use_gpu=use_gpu):
for dtype in (tf.float16, tf.float32, tf.float64, tf.complex64,
tf.complex128):
# A large number is needed to get Eigen to die
x = tf.zeros((0, 9938), dtype=dtype)
y = tf.reduce_sum(x, [0])
self.assertAllEqual(y.eval(), np.zeros(9938))
class MeanReductionTest(tf.test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.mean(np_ans, keepdims=keep_dims)
else:
reduction_axes = np.array(reduction_axes).astype(np.int32)
count = 1
for ra in reduction_axes.ravel()[::-1]:
np_ans = np.sum(np_ans, axis=ra, keepdims=keep_dims)
count *= x.shape[ra]
np_ans /= count
with self.test_session(use_gpu=use_gpu):
tf_ans = tf.reduce_mean(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=False)
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testDoubleReduce3D(self):
# Create a 3D array of doubles and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float64)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testGradient(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float32)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_mean(t, [1, 2])
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[2, 2],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
su = tf.reduce_mean(t, [0, 1, 2, 3])
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[1],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
su = tf.reduce_mean(t, [])
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[2, 3, 4, 2],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
def testEmptyGradients(self):
with self.test_session():
x = tf.zeros([0, 3])
y = tf.reduce_mean(x, [1])
error = tf.test.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
def testDegenerate(self):
for use_gpu in False, True:
with self.test_session(use_gpu=use_gpu):
for dtype in (tf.float16, tf.float32, tf.float64):
# A large number is needed to get Eigen to die
x = tf.zeros((0, 9938), dtype=dtype)
y = tf.reduce_mean(x, [0]).eval()
self.assertEqual(y.shape, (9938,))
self.assertTrue(np.all(np.isnan(y)))
class ProdReductionTest(tf.test.TestCase):
def _compare(self, x, reduction_axes, keep_dims):
np_ans = x
if reduction_axes is None:
np_ans = np.prod(np_ans, keepdims=keep_dims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.prod(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session():
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = tf.reduce_prod(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False)
self._compare(x, reduction_axes, True)
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def _compareGradient(self, x):
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_prod(t, [])
jacob_t, jacob_n = tf.test.compute_gradient(t,
x.shape,
su,
[2, 3, 4, 2],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
su = tf.reduce_prod(t, [1, 2])
jacob_t, jacob_n = tf.test.compute_gradient(t,
x.shape,
su,
[2, 2],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
su = tf.reduce_prod(t, [0, 1, 2, 3])
jacob_t, jacob_n = tf.test.compute_gradient(t,
x.shape,
su,
[1],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
def testGradientWithZeros(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float32) / 20.
# No zeros in input
self._compareGradient(x)
# Zero at beginning
x1 = x.copy()
x1[:,:,0,:] = 0
self._compareGradient(x1)
# Zero at end
x2 = x.copy()
x2[:,:,-1,:] = 0
self._compareGradient(x2)
# Zero in middle
x3 = x.copy()
x3[:,:,2,:] = 0
self._compareGradient(x3)
# All zeros
x4 = x.copy()
x4[:,:,:,:] = 0
self._compareGradient(x4)
def testEmptyGradients(self):
with self.test_session():
x = tf.zeros([0, 3])
y = tf.reduce_prod(x, [1])
error = tf.test.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
def testDegenerate(self):
for use_gpu in False, True:
with self.test_session(use_gpu=use_gpu):
for dtype in (tf.float16, tf.float32, tf.float64):
# A large number is needed to get Eigen to die
x = tf.zeros((0, 9938), dtype=dtype)
y = tf.reduce_prod(x, [0])
self.assertAllEqual(y.eval(), np.ones(9938))
class MinReductionTest(tf.test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.amin(np_ans, keepdims=keep_dims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.amin(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = tf.reduce_min(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testDoubleReduce3D(self):
# Create a 3D array of doubles and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float64)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testGradient(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_min(t, [1, 2])
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[2, 2],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient2(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_min(t, [1])
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[2, 4, 2],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient3(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_min(t, [2])
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[2, 3, 2],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient4(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_min(t)
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[1],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testEmptyGradients(self):
with self.test_session():
x = tf.zeros([0, 3])
y = tf.reduce_min(x, [1])
error = tf.test.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
class MaxReductionTest(tf.test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.amax(np_ans, keepdims=keep_dims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.amax(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = tf.reduce_max(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testFloatReduce3D(self):
# Create a 3D array of floats and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float32)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testDoubleReduce3D(self):
# Create a 3D array of doubles and reduce across all possible
# dimensions
np_arr = np.arange(0, 30).reshape([2, 3, 5]).astype(np.float64)
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testGradient(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_max(t, [1, 2])
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[2, 2],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient2(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_max(t, [1])
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[2, 4, 2],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient3(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_max(t, [2])
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[2, 3, 2],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient4(self):
s = [2, 3, 4, 2]
x = np.arange(1.0, 49.0).reshape(s).astype(np.float64)
with self.test_session():
t = tf.convert_to_tensor(x)
su = tf.reduce_max(t)
jacob_t, jacob_n = tf.test.compute_gradient(t,
s,
su,
[1],
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testEmptyGradients(self):
with self.test_session():
x = tf.zeros([0, 3])
y = tf.reduce_max(x, [1])
error = tf.test.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
class AllReductionTest(tf.test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.all(np_ans, keepdims=keep_dims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.all(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = tf.reduce_all(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testAll3D(self):
# Create a 3D array of bools and reduce across all possible
# dimensions
np_arr = (np.random.uniform(0, 1, 30) > 0.1).reshape([2, 3, 5])
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testEmpty(self):
self._compareAll([], [0])
class AnyReductionTest(tf.test.TestCase):
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.any(np_ans, keepdims=keep_dims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.any(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = tf.reduce_any(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def _compareAll(self, x, reduction_axes):
self._compare(x, reduction_axes, False, use_gpu=True)
self._compare(x, reduction_axes, False, use_gpu=False)
self._compare(x, reduction_axes, True, use_gpu=True)
self._compare(x, reduction_axes, True, use_gpu=False)
def testAll3D(self):
# Create a 3D array of bools and reduce across all possible
# dimensions
np_arr = (np.random.uniform(0, 1, 30) > 0.9).reshape([2, 3, 5])
self._compareAll(np_arr, None)
self._compareAll(np_arr, [])
self._compareAll(np_arr, [0])
self._compareAll(np_arr, [1])
self._compareAll(np_arr, [2])
self._compareAll(np_arr, [0, 1])
self._compareAll(np_arr, [1, 2])
self._compareAll(np_arr, [0, 2])
self._compareAll(np_arr, [0, 1, 2])
def testEmpty(self):
self._compareAll([], [0])
if __name__ == "__main__":
tf.test.main()
|
HaebinShin/tensorflow
|
tensorflow/python/kernel_tests/reduction_ops_test.py
|
Python
|
apache-2.0
| 31,377
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Fashion-MNIST dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import numpy as np
from tensorflow.python.keras._impl.keras.utils.data_utils import get_file
def load_data():
"""Loads the Fashion-MNIST dataset.
Returns:
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
"""
dirname = os.path.join('datasets', 'fashion-mnist')
base = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/'
files = [
'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz',
't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz'
]
paths = []
for fname in files:
paths.append(get_file(fname, origin=base + fname, cache_subdir=dirname))
with gzip.open(paths[0], 'rb') as lbpath:
y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)
with gzip.open(paths[1], 'rb') as imgpath:
x_train = np.frombuffer(
imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28)
with gzip.open(paths[2], 'rb') as lbpath:
y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)
with gzip.open(paths[3], 'rb') as imgpath:
x_test = np.frombuffer(
imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28)
return (x_train, y_train), (x_test, y_test)
|
rabipanda/tensorflow
|
tensorflow/python/keras/_impl/keras/datasets/fashion_mnist.py
|
Python
|
apache-2.0
| 2,033
|
"""Implements the graph generation for computation of gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import warnings
import tensorflow.python.platform
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import types
# pylint: disable=unused-import
from tensorflow.python.ops import array_grad
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import control_flow_grad
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import linalg_grad
from tensorflow.python.ops import math_grad
# pylint: enable=unused-import
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.platform import logging
# Warn the user if we convert a sparse representation to dense with at
# least this number of elements.
_LARGE_SPARSE_NUM_ELEMENTS = 100000000
def _IndexedSlicesToTensor(value, dtype=None, name=None):
"""Converts an IndexedSlices object `value` to a Tensor.
NOTE(mrry): This function is potentially expensive.
Args:
value: An ops.IndexedSlices object.
dtype: The dtype of the Tensor to be returned.
name: Optional name to use for the returned Tensor.
Returns:
A dense Tensor representing the values in the given IndexedSlices.
Raises:
ValueError: If the IndexedSlices does not have the same dtype.
"""
if dtype and not dtype.is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for IndexedSlices with dtype %s"
% (dtype.name, value.dtype.name))
if value.dense_shape is None:
raise ValueError(
"Tensor conversion requested for IndexedSlices without dense_shape: %s"
% str(value))
# TODO(mrry): Consider adding static shape information to
# IndexedSlices, to avoid using numpy here.
dense_shape_value = tensor_util.ConstantValue(value.dense_shape)
if dense_shape_value is not None:
num_elements = np.prod(dense_shape_value)
if num_elements >= _LARGE_SPARSE_NUM_ELEMENTS:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor with %d elements. "
"This may consume a large amount of memory." % num_elements)
else:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor of unknown shape. "
"This may consume a large amount of memory.")
return math_ops.unsorted_segment_sum(
value.values, value.indices, value.dense_shape[0], name=name)
ops.register_tensor_conversion_function(ops.IndexedSlices, _IndexedSlicesToTensor)
def _MarkReachedOps(from_ops, reached_ops):
"""Mark all ops reached from "from_ops".
Args:
from_ops: list of Operations.
reached_ops: list of booleans, indexed by operation id.
"""
queue = collections.deque()
queue.extend(from_ops)
while queue:
op = queue.popleft()
if not reached_ops[op._id]:
reached_ops[op._id] = True
for output in op.outputs:
queue.extend(output.consumers())
def _GatherInputs(to_ops, reached_ops):
"""List all inputs of to_ops that are in reached_ops.
Args:
to_ops: list of Operations.
reached_ops: list of booleans, indexed by operation id.
Returns:
The list of all inputs of to_ops that are in reached_ops.
That list includes all elements of to_ops.
"""
inputs = []
queue = collections.deque()
queue.extend(to_ops)
while queue:
op = queue.popleft()
# We are interested in this op.
if reached_ops[op._id]:
inputs.append(op)
# Clear the boolean so we won't add the inputs again.
reached_ops[op._id] = False
for inp in op.inputs:
queue.append(inp.op)
return inputs
def _GetGradsDevice(op, colocate_gradients_with_ops):
"""Gets the device to which to assign gradients of "op".
Args:
op: an Operation.
colocate_gradients_with_ops: If True, try colocating gradients with the
corresponding op.
Returns:
A device string.
"""
if colocate_gradients_with_ops and op.device:
return op.device
else:
return op.graph.get_default_device()
def _PendingCount(graph, to_ops, from_ops):
"""Initialize the pending count for ops between two lists of Operations.
'pending_count[op._id]' indicates the number of backprop inputs
to this operation.
Args:
graph: a Graph.
to_ops: list of Operations.
from_ops: list of Operations.
Returns:
A tuple containing: (1) a list of integers indexed by operation id,
indicating the number of backprop inputs to this operation, and (2)
a boolean which is True if any of the ops in between from_ops and to_ops
contain control flow loops.
"""
# Mark reachable ops from from_ops.
reached_ops = [False] * (graph._last_id + 1)
for op in to_ops:
reached_ops[op._id] = True
_MarkReachedOps(from_ops, reached_ops)
# Mark between ops.
between_ops = [False] * (graph._last_id + 1)
between_op_list = []
queue = collections.deque()
queue.extend(to_ops)
while queue:
op = queue.popleft()
# We are interested in this op.
if reached_ops[op._id]:
between_ops[op._id] = True
between_op_list.append(op)
# Clear the boolean so we won't add the inputs again.
reached_ops[op._id] = False
for inp in op.inputs:
queue.append(inp.op)
# Initialize pending count for between ops.
pending_count = [0] * (graph._last_id + 1)
has_control_flow = False
for op in between_op_list:
for x in op.inputs:
if between_ops[x.op._id]:
pending_count[x.op._id] += 1
for x in op.control_inputs:
if between_ops[x._id]:
pending_count[x._id] += 1
if op.type == "Exit":
has_control_flow = True
return pending_count, has_control_flow
def _AsList(x):
return x if isinstance(x, (list, tuple)) else [x]
def _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops):
"""Fill in default values for grad_ys.
Args:
grad_ys: List of gradients, can contain None.
ys: List of tensors.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
Returns:
A list of gradients to use, without None.
Raises:
ValueError: If one of the grad_ys is invalid.
"""
if len(grad_ys) != len(ys):
raise ValueError("Passed %d grad_ys for %d ys" % (len(grad_ys), len(ys)))
grad_ys = ops.convert_n_to_tensor_or_indexed_slices(grad_ys, name="grad_y")
for i in xrange(len(grad_ys)):
grad_y = grad_ys[i]
y = ys[i]
if grad_y is None:
with ops.device(_GetGradsDevice(y.op, colocate_gradients_with_ops)):
grad_ys[i] = array_ops.fill(array_ops.shape(y),
constant_op.constant(1, dtype=y.dtype))
else:
if grad_y.dtype != y.dtype:
raise ValueError("Y and ys_grad must be of the same type, "
"not y: %s, ys_grad: %s " %
(types.as_dtype(y.dtype).name,
types.as_dtype(grad_y.dtype).name))
return grad_ys
def _VerifyGeneratedGradients(grads, op):
"""Verify that gradients are valid in number and type.
Args:
grads: List of generated gradients.
op: Operation for which the gradients where generated.
Raises:
ValueError: if the gradients are invalid.
"""
if len(grads) != len(op.inputs):
raise ValueError("Num gradients %d generated for op %s do not match num "
"inputs %d" % (len(grads), op.node_def, len(op.inputs)))
for i in xrange(len(grads)):
grad = grads[i]
inp = op.inputs[i]
if grad is not None:
if not grad.dtype.is_compatible_with(inp.dtype):
raise ValueError(
"Gradient type %s generated for op %s does "
"not match input type %s" %
(types.as_dtype(grad.dtype).name, op.node_def,
types.as_dtype(inp.dtype).name))
def _StopOps(from_ops, pending_count):
"""The set of ops that terminate the gradient computation.
This computes the frontier of the forward graph *before* which backprop
should stop. Operations in the returned set will not be differentiated.
This set is defined as the subset of `from_ops` containing ops that have
no predecessor in `from_ops`. `pending_count` is the result of
`_PendingCount(g, xs, from_ops)`. An 'op' has predecessors in `from_ops`
iff pending_count[op._id] > 0.
Args:
from_ops: list of Operations.
pending_count: List of integers, indexed by operation id.
Returns:
The set of operations.
"""
stop_ops = set()
for op in from_ops:
is_stop_op = True
for inp in op.inputs:
if pending_count[inp.op._id] > 0:
is_stop_op = False
break
if is_stop_op:
stop_ops.add(op._id)
return stop_ops
def gradients(ys, xs, grad_ys=None, name="gradients",
colocate_gradients_with_ops=False,
gate_gradients=False,
aggregation_method=None):
"""Constructs symbolic partial derivatives of `ys` w.r.t. x in `xs`.
`ys` and `xs` are each a `Tensor` or a list of tensors. `grad_ys`
is a list of `Tensor`, holding the gradients received by the
`ys`. The list must be the same length as `ys`.
`gradients()` adds ops to the graph to output the partial
derivatives of `ys` with respect to `xs`. It returns a list of
`Tensor` of length `len(xs)` where each tensor is the `sum(dy/dx)`
for y in `ys`.
`grad_ys` is a list of tensors of the same length as `ys` that holds
the initial gradients for each y in `ys`. When `grad_ys` is None,
we fill in a tensor of '1's of the shape of y for each y in `ys`. A
user can provide their own initial 'grad_ys` to compute the
derivatives using a different initial gradient for each y (e.g., if
one wanted to weight the gradient differently for each value in
each y).
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
grad_ys: Optional. A `Tensor` or list of tensors the same size as
`ys` and holding the gradients computed for each y in `ys`.
name: Optional name to use for grouping all the gradient ops together.
defaults to 'gradients'.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
gate_gradients: If True, add a tuple around the gradients returned
for an operations. This avoids some race conditions.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
Returns:
A list of `sum(dy/dx)` for each x in `xs`.
Raises:
LookupError: if one of the operations between `x` and `y` does not
have a registered gradient function.
ValueError: if the arguments are invalid.
"""
ys = _AsList(ys)
xs = _AsList(xs)
if grad_ys is None:
grad_ys = [None] * len(ys)
else:
grad_ys = _AsList(grad_ys)
with ops.op_scope(ys + xs + grad_ys, name, "gradients"):
ys = ops.convert_n_to_tensor_or_indexed_slices(ys, name="y")
xs = ops.convert_n_to_tensor_or_indexed_slices(xs, name="x")
grad_ys = _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops)
# The approach we take here is as follows: Create a list of all ops in the
# subgraph between the ys and xs. Visit these ops in reverse order of ids
# to ensure that when we visit an op the gradients w.r.t its outputs have
# been collected. Then aggregate these gradients if needed, call the op's
# gradient function, and add the generated gradients to the gradients for
# its input.
# Initialize the pending count for ops in the connected subgraph from ys
# to the xs.
to_ops = [t.op for t in ys]
from_ops = [t.op for t in xs]
pending_count, has_control_flow = _PendingCount(
ops.get_default_graph(), to_ops, from_ops)
# Iterate over the collected ops.
#
# grads: op => list of gradients received on each output endpoint of the
# op. The gradients for each endpoint are initially collected as a list.
# When it is time to call the op's gradient function, for each endpoint we
# aggregate the list of received gradients into a Add() Operation if there
# is more than one.
grads = {}
# Add the initial gradients for the ys.
for y, grad_y in zip(ys, grad_ys):
_SetGrad(grads, y, grad_y)
# Initialize queue with to_ops.
queue = collections.deque()
# Add the ops in 'to_ops' into the queue.
to_ops_set = set()
for op in to_ops:
if op._id not in to_ops_set:
to_ops_set.add(op._id)
queue.append(op)
# The set of 'from_ops'.
stop_ops = _StopOps(from_ops, pending_count)
while queue:
# generate gradient subgraph for op.
op = queue.popleft()
with ops.device(_GetGradsDevice(op, colocate_gradients_with_ops)):
if has_control_flow:
control_flow_ops.EnterGradWhileContext(op)
out_grads = _AggregatedGrads(grads, op, has_control_flow,
aggregation_method)
grad_fn = None
if any(out_grads) and op._id not in stop_ops:
# A grad_fn must be defined, either as a function or as None
# for ops that do not have gradients.
try:
grad_fn = ops.get_gradient_function(op)
except LookupError:
raise LookupError(
"No gradient defined for operation '%s' (op type: %s)" %
(op.name, op.type))
if grad_fn and any(out_grads):
# NOTE: If _AggregatedGrads didn't compute a value for the i'th
# output, it means that the cost does not depend on output[i],
# therefore dC/doutput[i] is 0.
for i, out_grad in enumerate(out_grads):
if (not out_grad
and types.as_dtype(op.outputs[i].dtype).base_dtype in (
types.float32, types.float64)):
# Only floating-point outputs get a zero gradient. Gradient
# functions should ignore the gradient for other outputs.
out_grads[i] = array_ops.zeros_like(op.outputs[i])
with ops.name_scope(op.name + "_grad"):
# pylint: disable=protected-access
with ops.get_default_graph()._original_op(op):
# pylint: enable=protected-access
op_wrapper = op
if has_control_flow:
op_wrapper = control_flow_ops.MakeWrapper(op)
in_grads = _AsList(grad_fn(op_wrapper, *out_grads))
_VerifyGeneratedGradients(in_grads, op)
if gate_gradients and len(in_grads) > 1:
in_grads = control_flow_ops.tuple(in_grads)
logging.vlog(1, "Gradient for '" + op.name + "'")
logging.vlog(1, " in --> %s",
", ".join([x.name for x in out_grads if x]))
logging.vlog(1, " out --> %s",
", ".join([x.name for x in in_grads if x]))
else:
# If no grad_fn is defined or none of out_grads is available,
# just propagates a list of None backwards.
in_grads = [None] * len(op.inputs)
for t_in, in_grad in zip(op.inputs, in_grads):
if in_grad:
_SetGrad(grads, t_in, in_grad)
if has_control_flow:
control_flow_ops.ExitGradWhileContext(op)
# update pending count for the inputs of op.
for x in op.inputs:
pending_count[x.op._id] -= 1
ready = (pending_count[x.op._id] == 0)
if has_control_flow and not ready:
ready = (pending_count[x.op._id] > 0 and
control_flow_ops.IsLoopSwitch(x.op))
if ready:
queue.append(x.op)
for x in op.control_inputs:
pending_count[x._id] -= 1
if pending_count[x._id] is 0:
queue.append(x)
return [_GetGrad(grads, x) for x in xs]
def _SetGrad(grads, t, grad):
"""Sets gradient "grad" in "grads" for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
op_grads = [[] for _ in xrange(len(op.outputs))]
grads[op] = op_grads
t_grads = op_grads[t.value_index]
if isinstance(t_grads, list):
t_grads.append(grad)
else:
assert op.type == "Switch"
op_grads[t.value_index] = grad
def _GetGrad(grads, t):
"""Gets gradient for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads: return None
t_grad = op_grads[t.value_index]
assert not isinstance(t_grad, list), (
"gradients list should have been aggregated by now.")
return t_grad
def _GetGrads(grads, op):
"""Gets all gradients for op."""
if op in grads:
return grads[op]
else:
return [[] for _ in xrange(len(op.outputs))]
def _HandleNestedIndexedSlices(grad):
assert isinstance(grad, ops.IndexedSlices)
if isinstance(grad.values, ops.Tensor):
return grad
else:
assert isinstance(grad.values, ops.IndexedSlices)
g = _HandleNestedIndexedSlices(grad.values)
return ops.IndexedSlices(
g.values, array_ops.gather(grad.indices, g.indices), g.dense_shape)
def _AccumulatorShape(inputs):
shape = tensor_shape.unknown_shape()
for i in inputs:
if isinstance(i, ops.Tensor):
shape = shape.merge_with(i.get_shape())
return shape
class AggregationMethod(object):
"""A class listing aggregation methods used to combine gradients.
Computing partial derivatives can require aggregating gradient
contributions. This class lists the various methods that can
be used to combine gradients in the graph:
* `ADD_N`: All of the gradient terms are summed as part of one
operation using the "AddN" op. It has the property that all
gradients must be ready before any aggregation is performed.
* `DEFAULT`: The system-chosen default aggregation method.
"""
ADD_N = 0
DEFAULT = ADD_N
# The following are experimental and may not be supported in future releases.
EXPERIMENTAL_TREE = 1
EXPERIMENTAL_ACCUMULATE_N = 2
def _AggregatedGrads(grads, op, has_control_flow, aggregation_method=None):
"""Get the aggregated gradients for op.
Args:
grads: The map of memoized gradients.
op: The op to get gradients for.
has_control_flow: True iff the graph contains control flow ops.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
Returns:
A list of gradients, one per each output of `op`. If the gradients
for a particular output is a list, this function aggregates it
before returning.
Raises:
TypeError: if the incoming grads are not Tensors or IndexedSlices.
ValueError: if the arguments are invalid.
"""
if aggregation_method is None:
aggregation_method = AggregationMethod.DEFAULT
if aggregation_method not in [AggregationMethod.ADD_N,
AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N]:
raise ValueError("Invalid aggregation_method specified.")
out_grads = _GetGrads(grads, op)
for i, out_grad in enumerate(out_grads):
if has_control_flow:
if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
assert op.type == "Switch"
continue
# Grads have to be Tensors or IndexedSlices
if not all([isinstance(g, (ops.Tensor, ops.IndexedSlices))
for g in out_grad if g]):
raise TypeError("gradients have to be either all Tensors "
"or all IndexedSlices")
# Aggregate multiple gradients, and convert [] to None.
if out_grad:
if all([isinstance(g, ops.Tensor) for g in out_grad if g]):
tensor_shape = _AccumulatorShape(out_grad)
if len(out_grad) < 2:
used = "nop"
out_grads[i] = out_grad[0]
elif (aggregation_method == AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
and len(out_grad) > 2 and tensor_shape.is_fully_defined()):
# The benefit of using AccumulateN is that its inputs can be combined
# in any order and this can allow the expression to be evaluated with
# a smaller memory footprint. When used with gpu_allocator_retry,
# it is possible to compute a sum of terms which are much larger than
# total GPU memory.
# AccumulateN can currently only be used if we know the shape for
# an accumulator variable. If this is not known, or if we only have
# 2 grads then we fall through to the "tree" case below.
used = "accumulate_n"
out_grads[i] = math_ops.accumulate_n(out_grad)
elif aggregation_method in [AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
]:
# Aggregate all gradients by doing pairwise sums: this may
# reduce performance, but it can improve memory because the
# gradients can be released earlier.
#
# TODO(vrv): Consider replacing this with a version of
# tf.AddN() that eagerly frees its inputs as soon as they are
# ready, so the order of this tree does not become a problem.
used = "tree"
with ops.name_scope(op.name + "_gradient_sum"):
running_sum = out_grad[0]
for grad in out_grad[1:]:
running_sum = math_ops.add_n([running_sum, grad])
out_grads[i] = running_sum
else:
used = "add_n"
out_grads[i] = math_ops.add_n(out_grad)
logging.vlog(2, " _AggregatedGrads %d x %s using %s", len(out_grad),
tensor_shape, used)
else:
out_grad = math_ops._as_indexed_slices_list([g for g in out_grad if g])
out_grad = [_HandleNestedIndexedSlices(x) for x in out_grad]
# Form IndexedSlices out of the concatenated values and
# indices.
out_grads[i] = ops.IndexedSlices(
array_ops.concat(0, [x.values for x in out_grad]),
array_ops.concat(0, [x.indices for x in out_grad]),
out_grad[0].dense_shape)
else:
out_grads[i] = []
return out_grads
# TODO(vrv): Make this available when we want to make it public.
def _hessian_vector_product(ys, xs, v):
"""Multiply the Hessian of `ys` wrt `xs` by `v`.
This is an efficient construction that uses a backprop-like approach
to compute the product between the Hessian and another vector. The
Hessian is usually too large to be explicitly computed or even
represented, but this method allows us to at least multiply by it
for the same big-O cost as backprop.
Implicit Hessian-vector products are the main practical, scalable way
of using second derivatives with neural networks. They allow us to
do things like construct Krylov subspaces and approximate conjugate
gradient descent.
Example: if `y` = 1/2 `x`^T A `x`, then `hessian_vector_product(y,
x, v)` will return an expression that evaluates to the same values
as (A + A.T) `v`.
Args:
ys: A scalar value, or a tensor or list of tensors to be summed to
yield a scalar.
xs: A list of tensors that we should construct the Hessian over.
v: A list of tensors, with the same shapes as xs, that we want to
multiply by the Hessian.
Returns:
A list of tensors (or if the list would be length 1, a single tensor)
containing the product between the Hessian and `v`.
Raises:
ValueError: `xs` and `v` have different length.
"""
# Validate the input
length = len(xs)
if len(v) != length:
raise ValueError("xs and v must have the same length.")
# First backprop
grads = gradients(ys, xs)
assert len(grads) == length
elemwise_products = [math_ops.mul(grad_elem, array_ops.stop_gradient(v_elem))
for grad_elem, v_elem in zip(grads, v)
if grad_elem is not None]
# Second backprop
return gradients(elemwise_products, xs)
|
arunhotra/tensorflow
|
tensorflow/python/ops/gradients.py
|
Python
|
apache-2.0
| 24,433
|
"""Tests for the Z-Wave init."""
import asyncio
from collections import OrderedDict
from datetime import datetime
from unittest.mock import MagicMock, patch
import pytest
import voluptuous as vol
from homeassistant.bootstrap import async_setup_component
from homeassistant.components import zwave
from homeassistant.components.zwave import (
CONF_DEVICE_CONFIG_GLOB,
CONFIG_SCHEMA,
DATA_NETWORK,
const,
)
from homeassistant.const import ATTR_NAME
from homeassistant.helpers import device_registry as dr, entity_registry as er
from homeassistant.util import dt as dt_util
from tests.common import async_fire_time_changed, mock_registry
from tests.mock.zwave import MockEntityValues, MockNetwork, MockNode, MockValue
@pytest.fixture(autouse=True)
def mock_storage(hass_storage):
"""Autouse hass_storage for the TestCase tests."""
@pytest.fixture
async def zwave_setup(hass):
"""Zwave setup."""
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
@pytest.fixture
async def zwave_setup_ready(hass, zwave_setup):
"""Zwave setup and set network to ready."""
zwave_network = hass.data[DATA_NETWORK]
zwave_network.state = MockNetwork.STATE_READY
await hass.async_start()
async def test_valid_device_config(hass, mock_openzwave):
"""Test valid device config."""
device_config = {"light.kitchen": {"ignored": "true"}}
result = await async_setup_component(
hass, "zwave", {"zwave": {"device_config": device_config}}
)
await hass.async_block_till_done()
assert result
async def test_invalid_device_config(hass, mock_openzwave):
"""Test invalid device config."""
device_config = {"light.kitchen": {"some_ignored": "true"}}
result = await async_setup_component(
hass, "zwave", {"zwave": {"device_config": device_config}}
)
await hass.async_block_till_done()
assert not result
def test_config_access_error():
"""Test threading error accessing config values."""
node = MagicMock()
def side_effect():
raise RuntimeError
node.values.values.side_effect = side_effect
result = zwave.get_config_value(node, 1)
assert result is None
async def test_network_options(hass, mock_openzwave):
"""Test network options."""
result = await async_setup_component(
hass,
"zwave",
{"zwave": {"usb_path": "mock_usb_path", "config_path": "mock_config_path"}},
)
await hass.async_block_till_done()
assert result
network = hass.data[zwave.DATA_NETWORK]
assert network.options.device == "mock_usb_path"
assert network.options.config_path == "mock_config_path"
async def test_network_key_validation(hass, mock_openzwave):
"""Test network key validation."""
test_values = [
(
"0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, "
"0x0C, 0x0D, 0x0E, 0x0F, 0x10"
),
(
"0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,"
"0x0E,0x0F,0x10"
),
]
for value in test_values:
result = zwave.CONFIG_SCHEMA({"zwave": {"network_key": value}})
assert result["zwave"]["network_key"] == value
async def test_erronous_network_key_fails_validation(hass, mock_openzwave):
"""Test failing erroneous network key validation."""
test_values = [
(
"0x 01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, "
"0x0C, 0x0D, 0x0E, 0x0F, 0x10"
),
(
"0X01,0X02,0X03,0X04,0X05,0X06,0X07,0X08,0X09,0X0A,0X0B,0X0C,0X0D,"
"0X0E,0X0F,0X10"
),
"invalid",
"1234567",
1234567,
]
for value in test_values:
with pytest.raises(vol.Invalid):
zwave.CONFIG_SCHEMA({"zwave": {"network_key": value}})
async def test_auto_heal_midnight(hass, mock_openzwave, legacy_patchable_time):
"""Test network auto-heal at midnight."""
await async_setup_component(hass, "zwave", {"zwave": {"autoheal": True}})
await hass.async_block_till_done()
network = hass.data[zwave.DATA_NETWORK]
assert not network.heal.called
time = datetime(2017, 5, 6, 0, 0, 0, tzinfo=dt_util.UTC)
async_fire_time_changed(hass, time)
await hass.async_block_till_done()
await hass.async_block_till_done()
assert network.heal.called
assert len(network.heal.mock_calls) == 1
async def test_auto_heal_disabled(hass, mock_openzwave):
"""Test network auto-heal disabled."""
await async_setup_component(hass, "zwave", {"zwave": {"autoheal": False}})
await hass.async_block_till_done()
network = hass.data[zwave.DATA_NETWORK]
assert not network.heal.called
time = datetime(2017, 5, 6, 0, 0, 0, tzinfo=dt_util.UTC)
async_fire_time_changed(hass, time)
await hass.async_block_till_done()
assert not network.heal.called
async def test_setup_platform(hass, mock_openzwave):
"""Test invalid device config."""
mock_device = MagicMock()
hass.data[DATA_NETWORK] = MagicMock()
hass.data[zwave.DATA_DEVICES] = {456: mock_device}
async_add_entities = MagicMock()
result = await zwave.async_setup_platform(hass, None, async_add_entities, None)
assert not result
assert not async_add_entities.called
result = await zwave.async_setup_platform(
hass, None, async_add_entities, {const.DISCOVERY_DEVICE: 123}
)
assert not result
assert not async_add_entities.called
result = await zwave.async_setup_platform(
hass, None, async_add_entities, {const.DISCOVERY_DEVICE: 456}
)
assert result
assert async_add_entities.called
assert len(async_add_entities.mock_calls) == 1
assert async_add_entities.mock_calls[0][1][0] == [mock_device]
async def test_zwave_ready_wait(hass, mock_openzwave, zwave_setup):
"""Test that zwave continues after waiting for network ready."""
sleeps = []
def utcnow():
return datetime.fromtimestamp(len(sleeps))
asyncio_sleep = asyncio.sleep
async def sleep(duration, loop=None):
if duration > 0:
sleeps.append(duration)
await asyncio_sleep(0)
with patch("homeassistant.components.zwave.dt_util.utcnow", new=utcnow), patch(
"asyncio.sleep", new=sleep
), patch.object(zwave, "_LOGGER") as mock_logger:
hass.data[DATA_NETWORK].state = MockNetwork.STATE_STARTED
await hass.async_start()
assert len(sleeps) == const.NETWORK_READY_WAIT_SECS
assert mock_logger.warning.called
assert len(mock_logger.warning.mock_calls) == 1
assert mock_logger.warning.mock_calls[0][1][1] == const.NETWORK_READY_WAIT_SECS
async def test_device_entity(hass, mock_openzwave):
"""Test device entity base class."""
node = MockNode(node_id="10", name="Mock Node")
value = MockValue(
data=False,
node=node,
instance=2,
object_id="11",
label="Sensor",
command_class=const.COMMAND_CLASS_SENSOR_BINARY,
)
power_value = MockValue(
data=50.123456, node=node, precision=3, command_class=const.COMMAND_CLASS_METER
)
values = MockEntityValues(primary=value, power=power_value)
device = zwave.ZWaveDeviceEntity(values, "zwave")
device.hass = hass
device.value_added()
device.update_properties()
await hass.async_block_till_done()
assert not device.should_poll
assert device.unique_id == "10-11"
assert device.name == "Mock Node Sensor"
assert device.extra_state_attributes[zwave.ATTR_POWER] == 50.123
async def test_node_removed(hass, mock_openzwave):
"""Test node removed in base class."""
# Create a mock node & node entity
node = MockNode(node_id="10", name="Mock Node")
value = MockValue(
data=False,
node=node,
instance=2,
object_id="11",
label="Sensor",
command_class=const.COMMAND_CLASS_SENSOR_BINARY,
)
power_value = MockValue(
data=50.123456, node=node, precision=3, command_class=const.COMMAND_CLASS_METER
)
values = MockEntityValues(primary=value, power=power_value)
device = zwave.ZWaveDeviceEntity(values, "zwave")
device.hass = hass
device.entity_id = "zwave.mock_node"
device.value_added()
device.update_properties()
await hass.async_block_till_done()
# Save it to the entity registry
registry = mock_registry(hass)
registry.async_get_or_create("zwave", "zwave", device.unique_id)
device.entity_id = registry.async_get_entity_id("zwave", "zwave", device.unique_id)
# Create dummy entity registry entries for other integrations
hue_entity = registry.async_get_or_create("light", "hue", 1234)
zha_entity = registry.async_get_or_create("sensor", "zha", 5678)
# Verify our Z-Wave entity is registered
assert registry.async_is_registered(device.entity_id)
# Remove it
entity_id = device.entity_id
await device.node_removed()
# Verify registry entry for our Z-Wave node is gone
assert not registry.async_is_registered(entity_id)
# Verify registry entries for our other entities remain
assert registry.async_is_registered(hue_entity.entity_id)
assert registry.async_is_registered(zha_entity.entity_id)
async def test_node_discovery(hass, mock_openzwave):
"""Test discovery of a node."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_NODE_ADDED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
assert len(mock_receivers) == 1
node = MockNode(node_id=14)
await hass.async_add_executor_job(mock_receivers[0], node)
await hass.async_block_till_done()
assert hass.states.get("zwave.mock_node").state == "unknown"
async def test_unparsed_node_discovery(hass, mock_openzwave):
"""Test discovery of a node."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_NODE_ADDED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
assert len(mock_receivers) == 1
node = MockNode(node_id=14, manufacturer_name=None, name=None, is_ready=False)
sleeps = []
def utcnow():
return datetime.fromtimestamp(len(sleeps))
asyncio_sleep = asyncio.sleep
async def sleep(duration, loop=None):
if duration > 0:
sleeps.append(duration)
await asyncio_sleep(0)
with patch("homeassistant.components.zwave.dt_util.utcnow", new=utcnow), patch(
"asyncio.sleep", new=sleep
), patch.object(zwave, "_LOGGER") as mock_logger:
await hass.async_add_executor_job(mock_receivers[0], node)
await hass.async_block_till_done()
assert len(sleeps) == const.NODE_READY_WAIT_SECS
assert mock_logger.warning.called
assert len(mock_logger.warning.mock_calls) == 1
assert mock_logger.warning.mock_calls[0][1][1:] == (
14,
const.NODE_READY_WAIT_SECS,
)
assert hass.states.get("zwave.unknown_node_14").state == "unknown"
async def test_node_ignored(hass, mock_openzwave):
"""Test discovery of a node."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_NODE_ADDED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(
hass,
"zwave",
{"zwave": {"device_config": {"zwave.mock_node": {"ignored": True}}}},
)
await hass.async_block_till_done()
assert len(mock_receivers) == 1
node = MockNode(node_id=14)
await hass.async_add_executor_job(mock_receivers[0], node)
await hass.async_block_till_done()
assert hass.states.get("zwave.mock_node") is None
async def test_value_discovery(hass, mock_openzwave):
"""Test discovery of a node."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_VALUE_ADDED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
assert len(mock_receivers) == 1
node = MockNode(node_id=11, generic=const.GENERIC_TYPE_SENSOR_BINARY)
value = MockValue(
data=False,
node=node,
index=12,
instance=13,
command_class=const.COMMAND_CLASS_SENSOR_BINARY,
type=const.TYPE_BOOL,
genre=const.GENRE_USER,
)
await hass.async_add_executor_job(mock_receivers[0], node, value)
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.mock_node_mock_value").state == "off"
async def test_value_entities(hass, mock_openzwave):
"""Test discovery of a node."""
mock_receivers = {}
def mock_connect(receiver, signal, *args, **kwargs):
mock_receivers[signal] = receiver
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
zwave_network = hass.data[DATA_NETWORK]
zwave_network.state = MockNetwork.STATE_READY
await hass.async_start()
assert mock_receivers
await hass.async_add_executor_job(
mock_receivers[MockNetwork.SIGNAL_ALL_NODES_QUERIED]
)
node = MockNode(node_id=11, generic=const.GENERIC_TYPE_SENSOR_BINARY)
zwave_network.nodes = {node.node_id: node}
value = MockValue(
data=False,
node=node,
index=12,
instance=1,
command_class=const.COMMAND_CLASS_SENSOR_BINARY,
type=const.TYPE_BOOL,
genre=const.GENRE_USER,
)
node.values = {"primary": value, value.value_id: value}
value2 = MockValue(
data=False,
node=node,
index=12,
instance=2,
label="Mock Value B",
command_class=const.COMMAND_CLASS_SENSOR_BINARY,
type=const.TYPE_BOOL,
genre=const.GENRE_USER,
)
node.values[value2.value_id] = value2
await hass.async_add_executor_job(
mock_receivers[MockNetwork.SIGNAL_NODE_ADDED], node
)
await hass.async_add_executor_job(
mock_receivers[MockNetwork.SIGNAL_VALUE_ADDED], node, value
)
await hass.async_add_executor_job(
mock_receivers[MockNetwork.SIGNAL_VALUE_ADDED], node, value2
)
await hass.async_block_till_done()
assert hass.states.get("binary_sensor.mock_node_mock_value").state == "off"
assert hass.states.get("binary_sensor.mock_node_mock_value_b").state == "off"
ent_reg = er.async_get(hass)
dev_reg = dr.async_get(hass)
entry = ent_reg.async_get("zwave.mock_node")
assert entry is not None
assert entry.unique_id == f"node-{node.node_id}"
node_dev_id = entry.device_id
entry = ent_reg.async_get("binary_sensor.mock_node_mock_value")
assert entry is not None
assert entry.unique_id == f"{node.node_id}-{value.object_id}"
assert entry.name is None
assert entry.device_id == node_dev_id
entry = ent_reg.async_get("binary_sensor.mock_node_mock_value_b")
assert entry is not None
assert entry.unique_id == f"{node.node_id}-{value2.object_id}"
assert entry.name is None
assert entry.device_id != node_dev_id
device_id_b = entry.device_id
device = dev_reg.async_get(node_dev_id)
assert device is not None
assert device.name == node.name
old_device = device
device = dev_reg.async_get(device_id_b)
assert device is not None
assert device.name == f"{node.name} ({value2.instance})"
# test renaming without updating
await hass.services.async_call(
"zwave",
"rename_node",
{const.ATTR_NODE_ID: node.node_id, ATTR_NAME: "Demo Node"},
)
await hass.async_block_till_done()
assert node.name == "Demo Node"
entry = ent_reg.async_get("zwave.mock_node")
assert entry is not None
entry = ent_reg.async_get("binary_sensor.mock_node_mock_value")
assert entry is not None
entry = ent_reg.async_get("binary_sensor.mock_node_mock_value_b")
assert entry is not None
device = dev_reg.async_get(node_dev_id)
assert device is not None
assert device.id == old_device.id
assert device.name == node.name
device = dev_reg.async_get(device_id_b)
assert device is not None
assert device.name == f"{node.name} ({value2.instance})"
# test renaming
await hass.services.async_call(
"zwave",
"rename_node",
{
const.ATTR_NODE_ID: node.node_id,
const.ATTR_UPDATE_IDS: True,
ATTR_NAME: "New Node",
},
)
await hass.async_block_till_done()
assert node.name == "New Node"
entry = ent_reg.async_get("zwave.new_node")
assert entry is not None
assert entry.unique_id == f"node-{node.node_id}"
entry = ent_reg.async_get("binary_sensor.new_node_mock_value")
assert entry is not None
assert entry.unique_id == f"{node.node_id}-{value.object_id}"
device = dev_reg.async_get(node_dev_id)
assert device is not None
assert device.id == old_device.id
assert device.name == node.name
device = dev_reg.async_get(device_id_b)
assert device is not None
assert device.name == f"{node.name} ({value2.instance})"
await hass.services.async_call(
"zwave",
"rename_value",
{
const.ATTR_NODE_ID: node.node_id,
const.ATTR_VALUE_ID: value.object_id,
const.ATTR_UPDATE_IDS: True,
ATTR_NAME: "New Label",
},
)
await hass.async_block_till_done()
entry = ent_reg.async_get("binary_sensor.new_node_new_label")
assert entry is not None
assert entry.unique_id == f"{node.node_id}-{value.object_id}"
async def test_value_discovery_existing_entity(hass, mock_openzwave):
"""Test discovery of a node."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_VALUE_ADDED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
assert len(mock_receivers) == 1
node = MockNode(
node_id=11,
generic=const.GENERIC_TYPE_THERMOSTAT,
specific=const.SPECIFIC_TYPE_THERMOSTAT_GENERAL_V2,
)
thermostat_mode = MockValue(
data="Heat",
data_items=["Off", "Heat"],
node=node,
command_class=const.COMMAND_CLASS_THERMOSTAT_MODE,
genre=const.GENRE_USER,
)
setpoint_heating = MockValue(
data=22.0,
node=node,
command_class=const.COMMAND_CLASS_THERMOSTAT_SETPOINT,
index=1,
genre=const.GENRE_USER,
)
await hass.async_add_executor_job(mock_receivers[0], node, thermostat_mode)
await hass.async_block_till_done()
def mock_update(self):
self.hass.add_job(self.async_update_ha_state)
with patch.object(
zwave.node_entity.ZWaveBaseEntity, "maybe_schedule_update", new=mock_update
):
await hass.async_add_executor_job(mock_receivers[0], node, setpoint_heating)
await hass.async_block_till_done()
assert (
hass.states.get("climate.mock_node_mock_value").attributes["temperature"]
== 22.0
)
assert (
hass.states.get("climate.mock_node_mock_value").attributes[
"current_temperature"
]
is None
)
with patch.object(
zwave.node_entity.ZWaveBaseEntity, "maybe_schedule_update", new=mock_update
):
temperature = MockValue(
data=23.5,
node=node,
index=1,
command_class=const.COMMAND_CLASS_SENSOR_MULTILEVEL,
genre=const.GENRE_USER,
units="C",
)
await hass.async_add_executor_job(mock_receivers[0], node, temperature)
await hass.async_block_till_done()
assert (
hass.states.get("climate.mock_node_mock_value").attributes["temperature"]
== 22.0
)
assert (
hass.states.get("climate.mock_node_mock_value").attributes[
"current_temperature"
]
== 23.5
)
async def test_value_discovery_legacy_thermostat(hass, mock_openzwave):
"""Test discovery of a node. Special case for legacy thermostats."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_VALUE_ADDED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
assert len(mock_receivers) == 1
node = MockNode(
node_id=11,
generic=const.GENERIC_TYPE_THERMOSTAT,
specific=const.SPECIFIC_TYPE_SETPOINT_THERMOSTAT,
)
setpoint_heating = MockValue(
data=22.0,
node=node,
command_class=const.COMMAND_CLASS_THERMOSTAT_SETPOINT,
index=1,
genre=const.GENRE_USER,
)
await hass.async_add_executor_job(mock_receivers[0], node, setpoint_heating)
await hass.async_block_till_done()
assert (
hass.states.get("climate.mock_node_mock_value").attributes["temperature"]
== 22.0
)
async def test_power_schemes(hass, mock_openzwave):
"""Test power attribute."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_VALUE_ADDED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
assert len(mock_receivers) == 1
node = MockNode(node_id=11, generic=const.GENERIC_TYPE_SWITCH_BINARY)
switch = MockValue(
data=True,
node=node,
index=12,
instance=13,
command_class=const.COMMAND_CLASS_SWITCH_BINARY,
genre=const.GENRE_USER,
type=const.TYPE_BOOL,
)
await hass.async_add_executor_job(mock_receivers[0], node, switch)
await hass.async_block_till_done()
assert hass.states.get("switch.mock_node_mock_value").state == "on"
assert (
"power_consumption"
not in hass.states.get("switch.mock_node_mock_value").attributes
)
def mock_update(self):
self.hass.add_job(self.async_update_ha_state)
with patch.object(
zwave.node_entity.ZWaveBaseEntity, "maybe_schedule_update", new=mock_update
):
power = MockValue(
data=23.5,
node=node,
index=const.INDEX_SENSOR_MULTILEVEL_POWER,
instance=13,
command_class=const.COMMAND_CLASS_SENSOR_MULTILEVEL,
genre=const.GENRE_USER, # to avoid exception
)
await hass.async_add_executor_job(mock_receivers[0], node, power)
await hass.async_block_till_done()
assert (
hass.states.get("switch.mock_node_mock_value").attributes["power_consumption"]
== 23.5
)
async def test_network_ready(hass, mock_openzwave):
"""Test Node network ready event."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_ALL_NODES_QUERIED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
assert len(mock_receivers) == 1
events = []
def listener(event):
events.append(event)
hass.bus.async_listen(const.EVENT_NETWORK_COMPLETE, listener)
await hass.async_add_executor_job(mock_receivers[0])
await hass.async_block_till_done()
assert len(events) == 1
async def test_network_complete(hass, mock_openzwave):
"""Test Node network complete event."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_AWAKE_NODES_QUERIED:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
assert len(mock_receivers) == 1
events = []
def listener(event):
events.append(event)
hass.bus.async_listen(const.EVENT_NETWORK_READY, listener)
await hass.async_add_executor_job(mock_receivers[0])
await hass.async_block_till_done()
assert len(events) == 1
async def test_network_complete_some_dead(hass, mock_openzwave):
"""Test Node network complete some dead event."""
mock_receivers = []
def mock_connect(receiver, signal, *args, **kwargs):
if signal == MockNetwork.SIGNAL_ALL_NODES_QUERIED_SOME_DEAD:
mock_receivers.append(receiver)
with patch("pydispatch.dispatcher.connect", new=mock_connect):
await async_setup_component(hass, "zwave", {"zwave": {}})
await hass.async_block_till_done()
assert len(mock_receivers) == 1
events = []
def listener(event):
events.append(event)
hass.bus.async_listen(const.EVENT_NETWORK_COMPLETE_SOME_DEAD, listener)
await hass.async_add_executor_job(mock_receivers[0])
await hass.async_block_till_done()
assert len(events) == 1
async def test_entity_discovery(
hass, mock_discovery, mock_import_module, mock_values, mock_openzwave, zwave_setup
):
"""Test the creation of a new entity."""
(node, value_class, mock_schema) = mock_values
registry = mock_registry(hass)
entity_id = "mock_component.mock_node_mock_value"
zwave_config = {"zwave": {}}
device_config = {entity_id: {}}
with patch.object(zwave, "discovery", mock_discovery), patch.object(
zwave, "import_module", mock_import_module
):
values = zwave.ZWaveDeviceEntityValues(
hass=hass,
schema=mock_schema,
primary_value=value_class.primary,
zwave_config=zwave_config,
device_config=device_config,
registry=registry,
)
assert not mock_discovery.async_load_platform.called
assert values.primary is value_class.primary
assert len(list(values)) == 3
assert sorted(values, key=lambda a: id(a)) == sorted(
[value_class.primary, None, None], key=lambda a: id(a)
)
with patch.object(zwave, "discovery", mock_discovery), patch.object(
zwave, "import_module", mock_import_module
):
values.check_value(value_class.secondary)
await hass.async_block_till_done()
assert mock_discovery.async_load_platform.called
assert len(mock_discovery.async_load_platform.mock_calls) == 1
args = mock_discovery.async_load_platform.mock_calls[0][1]
assert args[0] == hass
assert args[1] == "mock_component"
assert args[2] == "zwave"
assert args[3] == {
const.DISCOVERY_DEVICE: mock_import_module().get_device().unique_id
}
assert args[4] == zwave_config
assert values.secondary is value_class.secondary
assert len(list(values)) == 3
assert sorted(values, key=lambda a: id(a)) == sorted(
[value_class.primary, value_class.secondary, None], key=lambda a: id(a)
)
mock_discovery.async_load_platform.reset_mock()
with patch.object(zwave, "discovery", mock_discovery), patch.object(
zwave, "import_module", mock_import_module
):
values.check_value(value_class.optional)
values.check_value(value_class.duplicate_secondary)
values.check_value(value_class.no_match_value)
await hass.async_block_till_done()
assert not mock_discovery.async_load_platform.called
assert values.optional is value_class.optional
assert len(list(values)) == 3
assert sorted(values, key=lambda a: id(a)) == sorted(
[value_class.primary, value_class.secondary, value_class.optional],
key=lambda a: id(a),
)
assert values._entity.value_added.called
assert len(values._entity.value_added.mock_calls) == 1
assert values._entity.value_changed.called
assert len(values._entity.value_changed.mock_calls) == 1
async def test_entity_existing_values(
hass, mock_discovery, mock_import_module, mock_values, mock_openzwave, zwave_setup
):
"""Test the loading of already discovered values."""
(node, value_class, mock_schema) = mock_values
registry = mock_registry(hass)
entity_id = "mock_component.mock_node_mock_value"
zwave_config = {"zwave": {}}
device_config = {entity_id: {}}
node.values = {
value_class.primary.value_id: value_class.primary,
value_class.secondary.value_id: value_class.secondary,
value_class.optional.value_id: value_class.optional,
value_class.no_match_value.value_id: value_class.no_match_value,
}
with patch.object(zwave, "discovery", mock_discovery), patch.object(
zwave, "import_module", mock_import_module
):
values = zwave.ZWaveDeviceEntityValues(
hass=hass,
schema=mock_schema,
primary_value=value_class.primary,
zwave_config=zwave_config,
device_config=device_config,
registry=registry,
)
await hass.async_block_till_done()
assert mock_discovery.async_load_platform.called
assert len(mock_discovery.async_load_platform.mock_calls) == 1
args = mock_discovery.async_load_platform.mock_calls[0][1]
assert args[0] == hass
assert args[1] == "mock_component"
assert args[2] == "zwave"
assert args[3] == {
const.DISCOVERY_DEVICE: mock_import_module().get_device().unique_id
}
assert args[4] == zwave_config
assert not value_class.primary.enable_poll.called
assert values.primary is value_class.primary
assert values.secondary is value_class.secondary
assert values.optional is value_class.optional
assert len(list(values)) == 3
assert sorted(values, key=lambda a: id(a)) == sorted(
[value_class.primary, value_class.secondary, value_class.optional],
key=lambda a: id(a),
)
async def test_node_schema_mismatch(
hass, mock_discovery, mock_import_module, mock_values, mock_openzwave, zwave_setup
):
"""Test node schema mismatch."""
(node, value_class, mock_schema) = mock_values
registry = mock_registry(hass)
entity_id = "mock_component.mock_node_mock_value"
zwave_config = {"zwave": {}}
device_config = {entity_id: {}}
node.generic = "no_match"
node.values = {
value_class.primary.value_id: value_class.primary,
value_class.secondary.value_id: value_class.secondary,
}
mock_schema[const.DISC_GENERIC_DEVICE_CLASS] = ["generic_match"]
with patch.object(zwave, "discovery", mock_discovery), patch.object(
zwave, "import_module", mock_import_module
):
values = zwave.ZWaveDeviceEntityValues(
hass=hass,
schema=mock_schema,
primary_value=value_class.primary,
zwave_config=zwave_config,
device_config=device_config,
registry=registry,
)
values._check_entity_ready()
await hass.async_block_till_done()
assert not mock_discovery.async_load_platform.called
async def test_entity_workaround_component(
hass, mock_discovery, mock_import_module, mock_values, mock_openzwave, zwave_setup
):
"""Test component workaround."""
(node, value_class, mock_schema) = mock_values
registry = mock_registry(hass)
node.manufacturer_id = "010f"
node.product_type = "0b00"
value_class.primary.command_class = const.COMMAND_CLASS_SENSOR_ALARM
entity_id = "binary_sensor.mock_node_mock_value"
zwave_config = {"zwave": {}}
device_config = {entity_id: {}}
mock_schema = {
const.DISC_COMPONENT: "mock_component",
const.DISC_VALUES: {
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_BINARY]
}
},
}
with patch.object(
zwave, "async_dispatcher_send"
) as mock_dispatch_send, patch.object(
zwave, "discovery", mock_discovery
), patch.object(
zwave, "import_module", mock_import_module
):
values = zwave.ZWaveDeviceEntityValues(
hass=hass,
schema=mock_schema,
primary_value=value_class.primary,
zwave_config=zwave_config,
device_config=device_config,
registry=registry,
)
values._check_entity_ready()
await hass.async_block_till_done()
assert mock_dispatch_send.called
assert len(mock_dispatch_send.mock_calls) == 1
args = mock_dispatch_send.mock_calls[0][1]
assert args[1] == "zwave_new_binary_sensor"
async def test_entity_workaround_ignore(
hass, mock_discovery, mock_import_module, mock_values, mock_openzwave, zwave_setup
):
"""Test ignore workaround."""
(node, value_class, mock_schema) = mock_values
registry = mock_registry(hass)
entity_id = "mock_component.mock_node_mock_value"
zwave_config = {"zwave": {}}
device_config = {entity_id: {}}
node.manufacturer_id = "010f"
node.product_type = "0301"
value_class.primary.command_class = const.COMMAND_CLASS_SWITCH_BINARY
mock_schema = {
const.DISC_COMPONENT: "mock_component",
const.DISC_VALUES: {
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_BINARY]
}
},
}
with patch.object(zwave, "discovery", mock_discovery), patch.object(
zwave, "import_module", mock_import_module
):
values = zwave.ZWaveDeviceEntityValues(
hass=hass,
schema=mock_schema,
primary_value=value_class.primary,
zwave_config=zwave_config,
device_config=device_config,
registry=registry,
)
values._check_entity_ready()
await hass.async_block_till_done()
assert not mock_discovery.async_load_platform.called
async def test_entity_config_ignore(
hass, mock_discovery, mock_import_module, mock_values, mock_openzwave, zwave_setup
):
"""Test ignore config."""
(node, value_class, mock_schema) = mock_values
registry = mock_registry(hass)
entity_id = "mock_component.mock_node_mock_value"
zwave_config = {"zwave": {}}
device_config = {entity_id: {}}
node.values = {
value_class.primary.value_id: value_class.primary,
value_class.secondary.value_id: value_class.secondary,
}
device_config = {entity_id: {zwave.CONF_IGNORED: True}}
with patch.object(zwave, "discovery", mock_discovery), patch.object(
zwave, "import_module", mock_import_module
):
values = zwave.ZWaveDeviceEntityValues(
hass=hass,
schema=mock_schema,
primary_value=value_class.primary,
zwave_config=zwave_config,
device_config=device_config,
registry=registry,
)
values._check_entity_ready()
await hass.async_block_till_done()
assert not mock_discovery.async_load_platform.called
async def test_entity_config_ignore_with_registry(
hass, mock_discovery, mock_import_module, mock_values, mock_openzwave, zwave_setup
):
"""Test ignore config.
The case when the device is in entity registry.
"""
(node, value_class, mock_schema) = mock_values
registry = mock_registry(hass)
entity_id = "mock_component.mock_node_mock_value"
zwave_config = {"zwave": {}}
device_config = {entity_id: {}}
node.values = {
value_class.primary.value_id: value_class.primary,
value_class.secondary.value_id: value_class.secondary,
}
device_config = {"mock_component.registry_id": {zwave.CONF_IGNORED: True}}
with patch.object(registry, "async_schedule_save"):
registry.async_get_or_create(
"mock_component",
zwave.DOMAIN,
"567-1000",
suggested_object_id="registry_id",
)
with patch.object(zwave, "discovery", mock_discovery), patch.object(
zwave, "import_module", mock_import_module
):
zwave.ZWaveDeviceEntityValues(
hass=hass,
schema=mock_schema,
primary_value=value_class.primary,
zwave_config=zwave_config,
device_config=device_config,
registry=registry,
)
await hass.async_block_till_done()
assert not mock_discovery.async_load_platform.called
async def test_entity_platform_ignore(
hass, mock_discovery, mock_import_module, mock_values, mock_openzwave, zwave_setup
):
"""Test platform ignore device."""
(node, value_class, mock_schema) = mock_values
registry = mock_registry(hass)
entity_id = "mock_component.mock_node_mock_value"
zwave_config = {"zwave": {}}
device_config = {entity_id: {}}
node.values = {
value_class.primary.value_id: value_class.primary,
value_class.secondary.value_id: value_class.secondary,
}
import_module = MagicMock()
platform = MagicMock()
import_module.return_value = platform
platform.get_device.return_value = None
with patch.object(zwave, "discovery", mock_discovery), patch.object(
zwave, "import_module", import_module
):
zwave.ZWaveDeviceEntityValues(
hass=hass,
schema=mock_schema,
primary_value=value_class.primary,
zwave_config=zwave_config,
device_config=device_config,
registry=registry,
)
await hass.async_block_till_done()
assert not mock_discovery.async_load_platform.called
async def test_config_polling_intensity(
hass, mock_discovery, mock_import_module, mock_values, mock_openzwave, zwave_setup
):
"""Test polling intensity."""
(node, value_class, mock_schema) = mock_values
registry = mock_registry(hass)
entity_id = "mock_component.mock_node_mock_value"
zwave_config = {"zwave": {}}
device_config = {entity_id: {}}
node.values = {
value_class.primary.value_id: value_class.primary,
value_class.secondary.value_id: value_class.secondary,
}
device_config = {entity_id: {zwave.CONF_POLLING_INTENSITY: 123}}
with patch.object(zwave, "discovery", mock_discovery), patch.object(
zwave, "import_module", mock_import_module
):
values = zwave.ZWaveDeviceEntityValues(
hass=hass,
schema=mock_schema,
primary_value=value_class.primary,
zwave_config=zwave_config,
device_config=device_config,
registry=registry,
)
values._check_entity_ready()
await hass.async_block_till_done()
assert mock_discovery.async_load_platform.called
assert value_class.primary.enable_poll.called
assert len(value_class.primary.enable_poll.mock_calls) == 1
assert value_class.primary.enable_poll.mock_calls[0][1][0] == 123
async def test_device_config_glob_is_ordered():
"""Test that device_config_glob preserves order."""
conf = CONFIG_SCHEMA({"zwave": {CONF_DEVICE_CONFIG_GLOB: OrderedDict()}})
assert isinstance(conf["zwave"][CONF_DEVICE_CONFIG_GLOB], OrderedDict)
async def test_add_node(hass, mock_openzwave, zwave_setup_ready):
"""Test zwave add_node service."""
zwave_network = hass.data[DATA_NETWORK]
await hass.services.async_call("zwave", "add_node", {})
await hass.async_block_till_done()
assert zwave_network.controller.add_node.called
assert len(zwave_network.controller.add_node.mock_calls) == 1
assert len(zwave_network.controller.add_node.mock_calls[0][1]) == 0
async def test_add_node_secure(hass, mock_openzwave, zwave_setup_ready):
"""Test zwave add_node_secure service."""
zwave_network = hass.data[DATA_NETWORK]
await hass.services.async_call("zwave", "add_node_secure", {})
await hass.async_block_till_done()
assert zwave_network.controller.add_node.called
assert len(zwave_network.controller.add_node.mock_calls) == 1
assert zwave_network.controller.add_node.mock_calls[0][1][0] is True
async def test_remove_node(hass, mock_openzwave, zwave_setup_ready):
"""Test zwave remove_node service."""
zwave_network = hass.data[DATA_NETWORK]
await hass.services.async_call("zwave", "remove_node", {})
await hass.async_block_till_done()
assert zwave_network.controller.remove_node.called
assert len(zwave_network.controller.remove_node.mock_calls) == 1
async def test_cancel_command(hass, mock_openzwave, zwave_setup_ready):
"""Test zwave cancel_command service."""
zwave_network = hass.data[DATA_NETWORK]
await hass.services.async_call("zwave", "cancel_command", {})
await hass.async_block_till_done()
assert zwave_network.controller.cancel_command.called
assert len(zwave_network.controller.cancel_command.mock_calls) == 1
async def test_heal_network(hass, mock_openzwave, zwave_setup_ready):
"""Test zwave heal_network service."""
zwave_network = hass.data[DATA_NETWORK]
await hass.services.async_call("zwave", "heal_network", {})
await hass.async_block_till_done()
assert zwave_network.heal.called
assert len(zwave_network.heal.mock_calls) == 1
async def test_soft_reset(hass, mock_openzwave, zwave_setup_ready):
"""Test zwave soft_reset service."""
zwave_network = hass.data[DATA_NETWORK]
await hass.services.async_call("zwave", "soft_reset", {})
await hass.async_block_till_done()
assert zwave_network.controller.soft_reset.called
assert len(zwave_network.controller.soft_reset.mock_calls) == 1
async def test_test_network(hass, mock_openzwave, zwave_setup_ready):
"""Test zwave test_network service."""
zwave_network = hass.data[DATA_NETWORK]
await hass.services.async_call("zwave", "test_network", {})
await hass.async_block_till_done()
assert zwave_network.test.called
assert len(zwave_network.test.mock_calls) == 1
async def test_stop_network(hass, mock_openzwave, zwave_setup_ready):
"""Test zwave stop_network service."""
zwave_network = hass.data[DATA_NETWORK]
with patch.object(hass.bus, "fire") as mock_fire:
await hass.services.async_call("zwave", "stop_network", {})
await hass.async_block_till_done()
assert zwave_network.stop.called
assert len(zwave_network.stop.mock_calls) == 1
assert mock_fire.called
assert len(mock_fire.mock_calls) == 1
assert mock_fire.mock_calls[0][1][0] == const.EVENT_NETWORK_STOP
async def test_rename_node(hass, mock_openzwave, zwave_setup_ready):
"""Test zwave rename_node service."""
zwave_network = hass.data[DATA_NETWORK]
zwave_network.nodes = {11: MagicMock()}
await hass.services.async_call(
"zwave",
"rename_node",
{const.ATTR_NODE_ID: 11, ATTR_NAME: "test_name"},
)
await hass.async_block_till_done()
assert zwave_network.nodes[11].name == "test_name"
async def test_rename_value(hass, mock_openzwave, zwave_setup_ready):
"""Test zwave rename_value service."""
zwave_network = hass.data[DATA_NETWORK]
node = MockNode(node_id=14)
value = MockValue(index=12, value_id=123456, label="Old Label")
node.values = {123456: value}
zwave_network.nodes = {11: node}
assert value.label == "Old Label"
await hass.services.async_call(
"zwave",
"rename_value",
{
const.ATTR_NODE_ID: 11,
const.ATTR_VALUE_ID: 123456,
ATTR_NAME: "New Label",
},
)
await hass.async_block_till_done()
assert value.label == "New Label"
async def test_set_poll_intensity_enable(hass, mock_openzwave, zwave_setup_ready):
"""Test zwave set_poll_intensity service, successful set."""
zwave_network = hass.data[DATA_NETWORK]
node = MockNode(node_id=14)
value = MockValue(index=12, value_id=123456, poll_intensity=0)
node.values = {123456: value}
zwave_network.nodes = {11: node}
assert value.poll_intensity == 0
await hass.services.async_call(
"zwave",
"set_poll_intensity",
{
const.ATTR_NODE_ID: 11,
const.ATTR_VALUE_ID: 123456,
const.ATTR_POLL_INTENSITY: 4,
},
)
await hass.async_block_till_done()
enable_poll = value.enable_poll
assert value.enable_poll.called
assert len(enable_poll.mock_calls) == 2
assert enable_poll.mock_calls[0][1][0] == 4
async def test_set_poll_intensity_enable_failed(
hass, mock_openzwave, zwave_setup_ready
):
"""Test zwave set_poll_intensity service, failed set."""
zwave_network = hass.data[DATA_NETWORK]
node = MockNode(node_id=14)
value = MockValue(index=12, value_id=123456, poll_intensity=0)
value.enable_poll.return_value = False
node.values = {123456: value}
zwave_network.nodes = {11: node}
assert value.poll_intensity == 0
await hass.services.async_call(
"zwave",
"set_poll_intensity",
{
const.ATTR_NODE_ID: 11,
const.ATTR_VALUE_ID: 123456,
const.ATTR_POLL_INTENSITY: 4,
},
)
await hass.async_block_till_done()
enable_poll = value.enable_poll
assert value.enable_poll.called
assert len(enable_poll.mock_calls) == 1
async def test_set_poll_intensity_disable(hass, mock_openzwave, zwave_setup_ready):
"""Test zwave set_poll_intensity service, successful disable."""
zwave_network = hass.data[DATA_NETWORK]
node = MockNode(node_id=14)
value = MockValue(index=12, value_id=123456, poll_intensity=4)
node.values = {123456: value}
zwave_network.nodes = {11: node}
assert value.poll_intensity == 4
await hass.services.async_call(
"zwave",
"set_poll_intensity",
{
const.ATTR_NODE_ID: 11,
const.ATTR_VALUE_ID: 123456,
const.ATTR_POLL_INTENSITY: 0,
},
)
await hass.async_block_till_done()
disable_poll = value.disable_poll
assert value.disable_poll.called
assert len(disable_poll.mock_calls) == 2
async def test_set_poll_intensity_disable_failed(
hass, mock_openzwave, zwave_setup_ready
):
"""Test zwave set_poll_intensity service, failed disable."""
zwave_network = hass.data[DATA_NETWORK]
node = MockNode(node_id=14)
value = MockValue(index=12, value_id=123456, poll_intensity=4)
value.disable_poll.return_value = False
node.values = {123456: value}
zwave_network.nodes = {11: node}
assert value.poll_intensity == 4
await hass.services.async_call(
"zwave",
"set_poll_intensity",
{
const.ATTR_NODE_ID: 11,
const.ATTR_VALUE_ID: 123456,
const.ATTR_POLL_INTENSITY: 0,
},
)
await hass.async_block_till_done()
disable_poll = value.disable_poll
assert value.disable_poll.called
assert len(disable_poll.mock_calls) == 1
async def test_remove_failed_node(hass, mock_openzwave, zwave_setup_ready):
"""Test zwave remove_failed_node service."""
zwave_network = hass.data[DATA_NETWORK]
await hass.services.async_call(
"zwave", "remove_failed_node", {const.ATTR_NODE_ID: 12}
)
await hass.async_block_till_done()
remove_failed_node = zwave_network.controller.remove_failed_node
assert remove_failed_node.called
assert len(remove_failed_node.mock_calls) == 1
assert remove_failed_node.mock_calls[0][1][0] == 12
async def test_replace_failed_node(hass, mock_openzwave, zwave_setup_ready):
"""Test zwave replace_failed_node service."""
zwave_network = hass.data[DATA_NETWORK]
await hass.services.async_call(
"zwave", "replace_failed_node", {const.ATTR_NODE_ID: 13}
)
await hass.async_block_till_done()
replace_failed_node = zwave_network.controller.replace_failed_node
assert replace_failed_node.called
assert len(replace_failed_node.mock_calls) == 1
assert replace_failed_node.mock_calls[0][1][0] == 13
async def test_set_config_parameter(hass, mock_openzwave, zwave_setup_ready):
"""Test zwave set_config_parameter service."""
zwave_network = hass.data[DATA_NETWORK]
value_byte = MockValue(
index=12,
command_class=const.COMMAND_CLASS_CONFIGURATION,
type=const.TYPE_BYTE,
)
value_list = MockValue(
index=13,
command_class=const.COMMAND_CLASS_CONFIGURATION,
type=const.TYPE_LIST,
data_items=["item1", "item2", "item3"],
)
value_button = MockValue(
index=14,
command_class=const.COMMAND_CLASS_CONFIGURATION,
type=const.TYPE_BUTTON,
)
value_list_int = MockValue(
index=15,
command_class=const.COMMAND_CLASS_CONFIGURATION,
type=const.TYPE_LIST,
data_items=["1", "2", "3"],
)
value_bool = MockValue(
index=16,
command_class=const.COMMAND_CLASS_CONFIGURATION,
type=const.TYPE_BOOL,
)
node = MockNode(node_id=14)
node.get_values.return_value = {
12: value_byte,
13: value_list,
14: value_button,
15: value_list_int,
16: value_bool,
}
zwave_network.nodes = {14: node}
# Byte
await hass.services.async_call(
"zwave",
"set_config_parameter",
{
const.ATTR_NODE_ID: 14,
const.ATTR_CONFIG_PARAMETER: 12,
const.ATTR_CONFIG_VALUE: 7,
},
)
await hass.async_block_till_done()
assert value_byte.data == 7
# List
await hass.services.async_call(
"zwave",
"set_config_parameter",
{
const.ATTR_NODE_ID: 14,
const.ATTR_CONFIG_PARAMETER: 13,
const.ATTR_CONFIG_VALUE: "item3",
},
)
await hass.async_block_till_done()
assert value_list.data == "item3"
# Button
await hass.services.async_call(
"zwave",
"set_config_parameter",
{
const.ATTR_NODE_ID: 14,
const.ATTR_CONFIG_PARAMETER: 14,
const.ATTR_CONFIG_VALUE: True,
},
)
await hass.async_block_till_done()
assert zwave_network.manager.pressButton.called
assert zwave_network.manager.releaseButton.called
# List of Ints
await hass.services.async_call(
"zwave",
"set_config_parameter",
{
const.ATTR_NODE_ID: 14,
const.ATTR_CONFIG_PARAMETER: 15,
const.ATTR_CONFIG_VALUE: 3,
},
)
await hass.async_block_till_done()
assert value_list_int.data == "3"
# Boolean Truthy
await hass.services.async_call(
"zwave",
"set_config_parameter",
{
const.ATTR_NODE_ID: 14,
const.ATTR_CONFIG_PARAMETER: 16,
const.ATTR_CONFIG_VALUE: "True",
},
)
await hass.async_block_till_done()
assert value_bool.data == 1
# Boolean Falsy
await hass.services.async_call(
"zwave",
"set_config_parameter",
{
const.ATTR_NODE_ID: 14,
const.ATTR_CONFIG_PARAMETER: 16,
const.ATTR_CONFIG_VALUE: "False",
},
)
await hass.async_block_till_done()
assert value_bool.data == 0
# Different Parameter Size
await hass.services.async_call(
"zwave",
"set_config_parameter",
{
const.ATTR_NODE_ID: 14,
const.ATTR_CONFIG_PARAMETER: 19,
const.ATTR_CONFIG_VALUE: 0x01020304,
const.ATTR_CONFIG_SIZE: 4,
},
)
await hass.async_block_till_done()
assert node.set_config_param.called
assert len(node.set_config_param.mock_calls) == 1
assert node.set_config_param.mock_calls[0][1][0] == 19
assert node.set_config_param.mock_calls[0][1][1] == 0x01020304
assert node.set_config_param.mock_calls[0][1][2] == 4
node.set_config_param.reset_mock()
async def test_print_config_parameter(hass, mock_openzwave, zwave_setup_ready, caplog):
"""Test zwave print_config_parameter service."""
zwave_network = hass.data[DATA_NETWORK]
value1 = MockValue(
index=12, command_class=const.COMMAND_CLASS_CONFIGURATION, data=1234
)
value2 = MockValue(
index=13, command_class=const.COMMAND_CLASS_CONFIGURATION, data=2345
)
node = MockNode(node_id=14)
node.values = {12: value1, 13: value2}
zwave_network.nodes = {14: node}
caplog.clear()
await hass.services.async_call(
"zwave",
"print_config_parameter",
{const.ATTR_NODE_ID: 14, const.ATTR_CONFIG_PARAMETER: 13},
)
await hass.async_block_till_done()
assert "Config parameter 13 on Node 14: 2345" in caplog.text
async def test_print_node(hass, mock_openzwave, zwave_setup_ready):
"""Test zwave print_node_parameter service."""
zwave_network = hass.data[DATA_NETWORK]
node = MockNode(node_id=14)
zwave_network.nodes = {14: node}
with patch.object(zwave, "_LOGGER") as mock_logger:
await hass.services.async_call("zwave", "print_node", {const.ATTR_NODE_ID: 14})
await hass.async_block_till_done()
assert "FOUND NODE " in mock_logger.info.mock_calls[0][1][0]
async def test_set_wakeup(hass, mock_openzwave, zwave_setup_ready):
"""Test zwave set_wakeup service."""
zwave_network = hass.data[DATA_NETWORK]
value = MockValue(index=12, command_class=const.COMMAND_CLASS_WAKE_UP)
node = MockNode(node_id=14)
node.values = {12: value}
node.get_values.return_value = node.values
zwave_network.nodes = {14: node}
await hass.services.async_call(
"zwave", "set_wakeup", {const.ATTR_NODE_ID: 14, const.ATTR_CONFIG_VALUE: 15}
)
await hass.async_block_till_done()
assert value.data == 15
node.can_wake_up_value = False
await hass.services.async_call(
"zwave", "set_wakeup", {const.ATTR_NODE_ID: 14, const.ATTR_CONFIG_VALUE: 20}
)
await hass.async_block_till_done()
assert value.data == 15
async def test_reset_node_meters(hass, mock_openzwave, zwave_setup_ready):
"""Test zwave reset_node_meters service."""
zwave_network = hass.data[DATA_NETWORK]
value = MockValue(
instance=1, index=8, data=99.5, command_class=const.COMMAND_CLASS_METER
)
reset_value = MockValue(
instance=1, index=33, command_class=const.COMMAND_CLASS_METER
)
node = MockNode(node_id=14)
node.values = {8: value, 33: reset_value}
node.get_values.return_value = node.values
zwave_network.nodes = {14: node}
await hass.services.async_call(
"zwave",
"reset_node_meters",
{const.ATTR_NODE_ID: 14, const.ATTR_INSTANCE: 2},
)
await hass.async_block_till_done()
assert not zwave_network.manager.pressButton.called
assert not zwave_network.manager.releaseButton.called
await hass.services.async_call(
"zwave", "reset_node_meters", {const.ATTR_NODE_ID: 14}
)
await hass.async_block_till_done()
assert zwave_network.manager.pressButton.called
(value_id,) = zwave_network.manager.pressButton.mock_calls.pop(0)[1]
assert value_id == reset_value.value_id
assert zwave_network.manager.releaseButton.called
(value_id,) = zwave_network.manager.releaseButton.mock_calls.pop(0)[1]
assert value_id == reset_value.value_id
async def test_add_association(hass, mock_openzwave, zwave_setup_ready):
"""Test zwave change_association service."""
zwave_network = hass.data[DATA_NETWORK]
ZWaveGroup = mock_openzwave.group.ZWaveGroup
group = MagicMock()
ZWaveGroup.return_value = group
value = MockValue(index=12, command_class=const.COMMAND_CLASS_WAKE_UP)
node = MockNode(node_id=14)
node.values = {12: value}
node.get_values.return_value = node.values
zwave_network.nodes = {14: node}
await hass.services.async_call(
"zwave",
"change_association",
{
const.ATTR_ASSOCIATION: "add",
const.ATTR_NODE_ID: 14,
const.ATTR_TARGET_NODE_ID: 24,
const.ATTR_GROUP: 3,
const.ATTR_INSTANCE: 5,
},
)
await hass.async_block_till_done()
assert ZWaveGroup.called
assert len(ZWaveGroup.mock_calls) == 2
assert ZWaveGroup.mock_calls[0][1][0] == 3
assert ZWaveGroup.mock_calls[0][1][2] == 14
assert group.add_association.called
assert len(group.add_association.mock_calls) == 1
assert group.add_association.mock_calls[0][1][0] == 24
assert group.add_association.mock_calls[0][1][1] == 5
async def test_remove_association(hass, mock_openzwave, zwave_setup_ready):
"""Test zwave change_association service."""
zwave_network = hass.data[DATA_NETWORK]
ZWaveGroup = mock_openzwave.group.ZWaveGroup
group = MagicMock()
ZWaveGroup.return_value = group
value = MockValue(index=12, command_class=const.COMMAND_CLASS_WAKE_UP)
node = MockNode(node_id=14)
node.values = {12: value}
node.get_values.return_value = node.values
zwave_network.nodes = {14: node}
await hass.services.async_call(
"zwave",
"change_association",
{
const.ATTR_ASSOCIATION: "remove",
const.ATTR_NODE_ID: 14,
const.ATTR_TARGET_NODE_ID: 24,
const.ATTR_GROUP: 3,
const.ATTR_INSTANCE: 5,
},
)
await hass.async_block_till_done()
assert ZWaveGroup.called
assert len(ZWaveGroup.mock_calls) == 2
assert ZWaveGroup.mock_calls[0][1][0] == 3
assert ZWaveGroup.mock_calls[0][1][2] == 14
assert group.remove_association.called
assert len(group.remove_association.mock_calls) == 1
assert group.remove_association.mock_calls[0][1][0] == 24
assert group.remove_association.mock_calls[0][1][1] == 5
async def test_refresh_node(hass, mock_openzwave, zwave_setup_ready):
"""Test zwave refresh_node service."""
zwave_network = hass.data[DATA_NETWORK]
node = MockNode(node_id=14)
zwave_network.nodes = {14: node}
await hass.services.async_call("zwave", "refresh_node", {const.ATTR_NODE_ID: 14})
await hass.async_block_till_done()
assert node.refresh_info.called
assert len(node.refresh_info.mock_calls) == 1
async def test_set_node_value(hass, mock_openzwave, zwave_setup_ready):
"""Test zwave set_node_value service."""
zwave_network = hass.data[DATA_NETWORK]
value = MockValue(index=12, command_class=const.COMMAND_CLASS_INDICATOR, data=4)
node = MockNode(node_id=14, command_classes=[const.COMMAND_CLASS_INDICATOR])
node.values = {12: value}
node.get_values.return_value = node.values
zwave_network.nodes = {14: node}
await hass.services.async_call(
"zwave",
"set_node_value",
{
const.ATTR_NODE_ID: 14,
const.ATTR_VALUE_ID: 12,
const.ATTR_CONFIG_VALUE: 2,
},
)
await hass.async_block_till_done()
assert zwave_network.nodes[14].values[12].data == 2
async def test_set_node_value_with_long_id_and_text_value(
hass, mock_openzwave, zwave_setup_ready
):
"""Test zwave set_node_value service."""
zwave_network = hass.data[DATA_NETWORK]
value = MockValue(
index=87512398541236578,
command_class=const.COMMAND_CLASS_SWITCH_COLOR,
data="#ff0000",
)
node = MockNode(node_id=14, command_classes=[const.COMMAND_CLASS_SWITCH_COLOR])
node.values = {87512398541236578: value}
node.get_values.return_value = node.values
zwave_network.nodes = {14: node}
await hass.services.async_call(
"zwave",
"set_node_value",
{
const.ATTR_NODE_ID: 14,
const.ATTR_VALUE_ID: "87512398541236578",
const.ATTR_CONFIG_VALUE: "#00ff00",
},
)
await hass.async_block_till_done()
assert zwave_network.nodes[14].values[87512398541236578].data == "#00ff00"
async def test_refresh_node_value(hass, mock_openzwave, zwave_setup_ready):
"""Test zwave refresh_node_value service."""
zwave_network = hass.data[DATA_NETWORK]
node = MockNode(
node_id=14,
command_classes=[const.COMMAND_CLASS_INDICATOR],
network=zwave_network,
)
value = MockValue(
node=node, index=12, command_class=const.COMMAND_CLASS_INDICATOR, data=2
)
value.refresh = MagicMock()
node.values = {12: value}
node.get_values.return_value = node.values
zwave_network.nodes = {14: node}
await hass.services.async_call(
"zwave",
"refresh_node_value",
{const.ATTR_NODE_ID: 14, const.ATTR_VALUE_ID: 12},
)
await hass.async_block_till_done()
assert value.refresh.called
async def test_heal_node(hass, mock_openzwave, zwave_setup_ready):
"""Test zwave heal_node service."""
zwave_network = hass.data[DATA_NETWORK]
node = MockNode(node_id=19)
zwave_network.nodes = {19: node}
await hass.services.async_call("zwave", "heal_node", {const.ATTR_NODE_ID: 19})
await hass.async_block_till_done()
assert node.heal.called
assert len(node.heal.mock_calls) == 1
async def test_test_node(hass, mock_openzwave, zwave_setup_ready):
"""Test the zwave test_node service."""
zwave_network = hass.data[DATA_NETWORK]
node = MockNode(node_id=19)
zwave_network.nodes = {19: node}
await hass.services.async_call("zwave", "test_node", {const.ATTR_NODE_ID: 19})
await hass.async_block_till_done()
assert node.test.called
assert len(node.test.mock_calls) == 1
|
jawilson/home-assistant
|
tests/components/zwave/test_init.py
|
Python
|
apache-2.0
| 62,662
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for remote procedure calls using queue
"""
import ddt
import mock
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_db import exception as db_exc
from cinder import context
from cinder import db
from cinder import exception
from cinder import manager
from cinder import objects
from cinder import rpc
from cinder import service
from cinder import test
test_service_opts = [
cfg.StrOpt("fake_manager",
default="cinder.tests.unit.test_service.FakeManager",
help="Manager for testing"),
cfg.StrOpt("test_service_listen",
help="Host to bind test service to"),
cfg.IntOpt("test_service_listen_port",
default=0,
help="Port number to bind test service to"), ]
CONF = cfg.CONF
CONF.register_opts(test_service_opts)
class FakeManager(manager.Manager):
"""Fake manager for tests."""
def __init__(self, host=None,
db_driver=None, service_name=None, cluster=None):
super(FakeManager, self).__init__(host=host,
db_driver=db_driver,
cluster=cluster)
def test_method(self):
return 'manager'
class ExtendedService(service.Service):
def test_method(self):
return 'service'
class ServiceManagerTestCase(test.TestCase):
"""Test cases for Services."""
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
return_value=False)
def test_message_gets_to_manager(self, is_upgrading_mock):
serv = service.Service('test',
'test',
'test',
'cinder.tests.unit.test_service.FakeManager')
serv.start()
self.assertEqual('manager', serv.test_method())
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
return_value=False)
def test_override_manager_method(self, is_upgrading_mock):
serv = ExtendedService('test',
'test',
'test',
'cinder.tests.unit.test_service.FakeManager')
serv.start()
self.assertEqual('service', serv.test_method())
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
return_value=False)
@mock.patch('cinder.rpc.LAST_OBJ_VERSIONS', {'test': '1.5'})
@mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'test': '1.3'})
def test_reset(self, is_upgrading_mock):
serv = service.Service('test',
'test',
'test',
'cinder.tests.unit.test_service.FakeManager')
serv.start()
serv.reset()
self.assertEqual({}, rpc.LAST_OBJ_VERSIONS)
self.assertEqual({}, rpc.LAST_RPC_VERSIONS)
class ServiceFlagsTestCase(test.TestCase):
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
return_value=False)
def test_service_enabled_on_create_based_on_flag(self,
is_upgrading_mock=False):
ctxt = context.get_admin_context()
self.flags(enable_new_services=True)
host = 'foo'
binary = 'cinder-fake'
cluster = 'cluster'
app = service.Service.create(host=host, binary=binary, cluster=cluster)
ref = db.service_get(ctxt, app.service_id)
db.service_destroy(ctxt, app.service_id)
self.assertFalse(ref.disabled)
# Check that the cluster is also enabled
db_cluster = objects.ClusterList.get_all(ctxt)[0]
self.assertFalse(db_cluster.disabled)
db.cluster_destroy(ctxt, db_cluster.id)
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
return_value=False)
def test_service_disabled_on_create_based_on_flag(self, is_upgrading_mock):
ctxt = context.get_admin_context()
self.flags(enable_new_services=False)
host = 'foo'
binary = 'cinder-fake'
cluster = 'cluster'
app = service.Service.create(host=host, binary=binary, cluster=cluster)
ref = db.service_get(ctxt, app.service_id)
db.service_destroy(ctxt, app.service_id)
self.assertTrue(ref.disabled)
# Check that the cluster is also enabled
db_cluster = objects.ClusterList.get_all(ctxt)[0]
self.assertTrue(db_cluster.disabled)
db.cluster_destroy(ctxt, db_cluster.id)
@ddt.ddt
class ServiceTestCase(test.TestCase):
"""Test cases for Services."""
def setUp(self):
super(ServiceTestCase, self).setUp()
self.host = 'foo'
self.binary = 'cinder-fake'
self.topic = 'fake'
self.service_ref = {'host': self.host,
'binary': self.binary,
'topic': self.topic,
'report_count': 0,
'availability_zone': 'nova',
'id': 1}
self.ctxt = context.get_admin_context()
def _check_app(self, app, cluster=None, cluster_exists=None,
is_upgrading=False, svc_id=None, added_to_cluster=None):
"""Check that Service instance and DB service and cluster are ok."""
self.assertIsNotNone(app)
# Check that we have the service ID
self.assertTrue(hasattr(app, 'service_id'))
if svc_id:
self.assertEqual(svc_id, app.service_id)
# Check that cluster has been properly set
self.assertEqual(cluster, app.cluster)
# Check that the entry has been really created in the DB
svc = objects.Service.get_by_id(self.ctxt, app.service_id)
cluster_name = cluster if cluster_exists is not False else None
# Check that cluster name matches
self.assertEqual(cluster_name, svc.cluster_name)
clusters = objects.ClusterList.get_all(self.ctxt)
if added_to_cluster is None:
added_to_cluster = not is_upgrading
if cluster_name:
# Make sure we have created the cluster in the DB
self.assertEqual(1, len(clusters))
cluster = clusters[0]
self.assertEqual(cluster_name, cluster.name)
self.assertEqual(self.binary, cluster.binary)
else:
# Make sure we haven't created any cluster in the DB
self.assertListEqual([], clusters.objects)
self.assertEqual(added_to_cluster, app.added_to_cluster)
@ddt.data(False, True)
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n')
def test_create(self, is_upgrading, is_upgrading_mock):
"""Test non clustered service creation."""
is_upgrading_mock.return_value = is_upgrading
# NOTE(vish): Create was moved out of mock replay to make sure that
# the looping calls are created in StartService.
app = service.Service.create(host=self.host,
binary=self.binary,
topic=self.topic)
self._check_app(app, is_upgrading=is_upgrading)
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
return_value=False)
def test_create_with_cluster_not_upgrading(self, is_upgrading_mock):
"""Test DB cluster creation when service is created."""
cluster_name = 'cluster'
app = service.Service.create(host=self.host, binary=self.binary,
cluster=cluster_name, topic=self.topic)
self._check_app(app, cluster_name)
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
return_value=True)
def test_create_with_cluster_upgrading(self, is_upgrading_mock):
"""Test that we don't create the cluster while we are upgrading."""
cluster_name = 'cluster'
app = service.Service.create(host=self.host, binary=self.binary,
cluster=cluster_name, topic=self.topic)
self._check_app(app, cluster_name, cluster_exists=False,
is_upgrading=True)
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
return_value=False)
def test_create_svc_exists_upgrade_cluster(self, is_upgrading_mock):
"""Test that we update cluster_name field when cfg has changed."""
# Create the service in the DB
db_svc = db.service_create(context.get_admin_context(),
{'host': self.host, 'binary': self.binary,
'topic': self.topic,
'cluster_name': None})
cluster_name = 'cluster'
app = service.Service.create(host=self.host, binary=self.binary,
cluster=cluster_name, topic=self.topic)
self._check_app(app, cluster_name, svc_id=db_svc.id)
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
return_value=True)
def test_create_svc_exists_not_upgrade_cluster(self, is_upgrading_mock):
"""Test we don't update cluster_name on cfg change when upgrading."""
# Create the service in the DB
db_svc = db.service_create(context.get_admin_context(),
{'host': self.host, 'binary': self.binary,
'topic': self.topic,
'cluster': None})
cluster_name = 'cluster'
app = service.Service.create(host=self.host, binary=self.binary,
cluster=cluster_name, topic=self.topic)
self._check_app(app, cluster_name, cluster_exists=False,
is_upgrading=True, svc_id=db_svc.id)
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
return_value=False)
@mock.patch.object(objects.service.Service, 'get_by_args')
@mock.patch.object(objects.service.Service, 'get_by_id')
def test_report_state_newly_disconnected(self, get_by_id, get_by_args,
is_upgrading_mock):
get_by_args.side_effect = exception.NotFound()
get_by_id.side_effect = db_exc.DBConnectionError()
with mock.patch.object(objects.service, 'db') as mock_db:
mock_db.service_create.return_value = self.service_ref
serv = service.Service(
self.host,
self.binary,
self.topic,
'cinder.tests.unit.test_service.FakeManager'
)
serv.start()
serv.report_state()
self.assertTrue(serv.model_disconnected)
self.assertFalse(mock_db.service_update.called)
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
return_value=False)
@mock.patch.object(objects.service.Service, 'get_by_args')
@mock.patch.object(objects.service.Service, 'get_by_id')
def test_report_state_disconnected_DBError(self, get_by_id, get_by_args,
is_upgrading_mock):
get_by_args.side_effect = exception.NotFound()
get_by_id.side_effect = db_exc.DBError()
with mock.patch.object(objects.service, 'db') as mock_db:
mock_db.service_create.return_value = self.service_ref
serv = service.Service(
self.host,
self.binary,
self.topic,
'cinder.tests.unit.test_service.FakeManager'
)
serv.start()
serv.report_state()
self.assertTrue(serv.model_disconnected)
self.assertFalse(mock_db.service_update.called)
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
return_value=False)
@mock.patch('cinder.db.sqlalchemy.api.service_update')
@mock.patch('cinder.db.sqlalchemy.api.service_get')
def test_report_state_newly_connected(self, get_by_id, service_update,
is_upgrading_mock):
get_by_id.return_value = self.service_ref
serv = service.Service(
self.host,
self.binary,
self.topic,
'cinder.tests.unit.test_service.FakeManager'
)
serv.start()
serv.model_disconnected = True
serv.report_state()
self.assertFalse(serv.model_disconnected)
self.assertTrue(service_update.called)
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
return_value=False)
def test_report_state_manager_not_working(self, is_upgrading_mock):
with mock.patch('cinder.db') as mock_db:
mock_db.service_get.return_value = self.service_ref
serv = service.Service(
self.host,
self.binary,
self.topic,
'cinder.tests.unit.test_service.FakeManager'
)
serv.manager.is_working = mock.Mock(return_value=False)
serv.start()
serv.report_state()
serv.manager.is_working.assert_called_once_with()
self.assertFalse(mock_db.service_update.called)
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
return_value=False)
def test_service_with_long_report_interval(self, is_upgrading_mock):
self.override_config('service_down_time', 10)
self.override_config('report_interval', 10)
service.Service.create(
binary="test_service",
manager="cinder.tests.unit.test_service.FakeManager")
self.assertEqual(25, CONF.service_down_time)
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
return_value=False)
@mock.patch.object(rpc, 'get_server')
@mock.patch('cinder.db')
def test_service_stop_waits_for_rpcserver(self, mock_db, mock_rpc,
is_upgrading_mock):
serv = service.Service(
self.host,
self.binary,
self.topic,
'cinder.tests.unit.test_service.FakeManager'
)
serv.start()
serv.stop()
serv.wait()
serv.rpcserver.start.assert_called_once_with()
serv.rpcserver.stop.assert_called_once_with()
serv.rpcserver.wait.assert_called_once_with()
@mock.patch('cinder.service.Service.is_svc_upgrading_to_n',
return_value=False)
@mock.patch('cinder.service.Service.report_state')
@mock.patch('cinder.service.Service.periodic_tasks')
@mock.patch.object(service.loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(rpc, 'get_server')
@mock.patch('cinder.db')
def test_service_stop_waits_for_timers(self, mock_db, mock_rpc,
mock_loopcall, mock_periodic,
mock_report, is_upgrading_mock):
"""Test that we wait for loopcalls only if stop succeeds."""
serv = service.Service(
self.host,
self.binary,
self.topic,
'cinder.tests.unit.test_service.FakeManager',
report_interval=5,
periodic_interval=10,
)
# One of the loopcalls will raise an exception on stop
mock_loopcall.side_effect = (
mock.Mock(**{'stop.side_effect': Exception}),
mock.Mock())
serv.start()
serv.stop()
serv.wait()
serv.rpcserver.start.assert_called_once_with()
serv.rpcserver.stop.assert_called_once_with()
serv.rpcserver.wait.assert_called_once_with()
# The first loopcall will have failed on the stop call, so we will not
# have waited for it to stop
self.assertEqual(1, serv.timers[0].start.call_count)
self.assertEqual(1, serv.timers[0].stop.call_count)
self.assertFalse(serv.timers[0].wait.called)
# We will wait for the second loopcall
self.assertEqual(1, serv.timers[1].start.call_count)
self.assertEqual(1, serv.timers[1].stop.call_count)
self.assertEqual(1, serv.timers[1].wait.call_count)
@mock.patch('cinder.manager.Manager.init_host')
@mock.patch.object(service.loopingcall, 'FixedIntervalLoopingCall')
@mock.patch('oslo_messaging.Target')
@mock.patch.object(rpc, 'get_server')
def _check_rpc_servers_and_init_host(self, app, added_to_cluster, cluster,
rpc_mock, target_mock, loop_mock,
init_host_mock):
app.start()
# Since we have created the service entry we call init_host with
# added_to_cluster=True
init_host_mock.assert_called_once_with(
added_to_cluster=added_to_cluster)
expected_target_calls = [mock.call(topic=self.topic, server=self.host)]
expected_rpc_calls = [mock.call(target_mock.return_value, mock.ANY,
mock.ANY),
mock.call().start()]
if cluster and added_to_cluster:
self.assertIsNotNone(app.cluster_rpcserver)
expected_target_calls.append(mock.call(topic=self.topic,
server=cluster))
expected_rpc_calls.extend(expected_rpc_calls[:])
# Check that we create message targets for host and cluster
target_mock.assert_has_calls(expected_target_calls)
# Check we get and start rpc services for host and cluster
rpc_mock.assert_has_calls(expected_rpc_calls)
self.assertIsNotNone(app.rpcserver)
app.stop()
@mock.patch('cinder.objects.Service.get_minimum_obj_version',
return_value='1.6')
def test_start_rpc_and_init_host_no_cluster(self, is_upgrading_mock):
"""Test that without cluster we don't create rpc service."""
app = service.Service.create(host=self.host, binary='cinder-volume',
cluster=None, topic=self.topic)
self._check_rpc_servers_and_init_host(app, True, None)
@ddt.data('1.3', '1.7')
@mock.patch('cinder.objects.Service.get_minimum_obj_version')
def test_start_rpc_and_init_host_cluster(self, obj_version,
get_min_obj_mock):
"""Test that with cluster we create the rpc service."""
get_min_obj_mock.return_value = obj_version
cluster = 'cluster'
app = service.Service.create(host=self.host, binary='cinder-volume',
cluster=cluster, topic=self.topic)
self._check_rpc_servers_and_init_host(app, obj_version != '1.3',
cluster)
class TestWSGIService(test.TestCase):
def setUp(self):
super(TestWSGIService, self).setUp()
@mock.patch('oslo_service.wsgi.Loader')
def test_service_random_port(self, mock_loader):
test_service = service.WSGIService("test_service")
self.assertEqual(0, test_service.port)
test_service.start()
self.assertNotEqual(0, test_service.port)
test_service.stop()
self.assertTrue(mock_loader.called)
@mock.patch('oslo_service.wsgi.Loader')
def test_reset_pool_size_to_default(self, mock_loader):
test_service = service.WSGIService("test_service")
test_service.start()
# Stopping the service, which in turn sets pool size to 0
test_service.stop()
self.assertEqual(0, test_service.server._pool.size)
# Resetting pool size to default
test_service.reset()
test_service.start()
self.assertEqual(cfg.CONF.wsgi_default_pool_size,
test_service.server._pool.size)
self.assertTrue(mock_loader.called)
@mock.patch('oslo_service.wsgi.Loader')
def test_workers_set_default(self, mock_loader):
self.override_config('osapi_volume_listen_port',
CONF.test_service_listen_port)
test_service = service.WSGIService("osapi_volume")
self.assertEqual(processutils.get_worker_count(),
test_service.workers)
self.assertTrue(mock_loader.called)
@mock.patch('oslo_service.wsgi.Loader')
def test_workers_set_good_user_setting(self, mock_loader):
self.override_config('osapi_volume_listen_port',
CONF.test_service_listen_port)
self.override_config('osapi_volume_workers', 8)
test_service = service.WSGIService("osapi_volume")
self.assertEqual(8, test_service.workers)
self.assertTrue(mock_loader.called)
@mock.patch('oslo_service.wsgi.Loader')
def test_workers_set_zero_user_setting(self, mock_loader):
self.override_config('osapi_volume_listen_port',
CONF.test_service_listen_port)
self.override_config('osapi_volume_workers', 0)
test_service = service.WSGIService("osapi_volume")
# If a value less than 1 is used, defaults to number of procs
# available
self.assertEqual(processutils.get_worker_count(),
test_service.workers)
self.assertTrue(mock_loader.called)
@mock.patch('oslo_service.wsgi.Loader')
def test_workers_set_negative_user_setting(self, mock_loader):
self.override_config('osapi_volume_workers', -1)
self.assertRaises(exception.InvalidInput,
service.WSGIService, "osapi_volume")
self.assertTrue(mock_loader.called)
@mock.patch('oslo_service.wsgi.Server')
@mock.patch('oslo_service.wsgi.Loader')
def test_ssl_enabled(self, mock_loader, mock_server):
self.override_config('osapi_volume_use_ssl', True)
service.WSGIService("osapi_volume")
mock_server.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY,
port=mock.ANY, host=mock.ANY,
use_ssl=True)
self.assertTrue(mock_loader.called)
class OSCompatibilityTestCase(test.TestCase):
def _test_service_launcher(self, fake_os):
# Note(lpetrut): The cinder-volume service needs to be spawned
# differently on Windows due to an eventlet bug. For this reason,
# we must check the process launcher used.
fake_process_launcher = mock.MagicMock()
with mock.patch('os.name', fake_os):
with mock.patch('cinder.service.process_launcher',
fake_process_launcher):
launcher = service.get_launcher()
if fake_os == 'nt':
self.assertEqual(service.Launcher, type(launcher))
else:
self.assertEqual(fake_process_launcher(), launcher)
def test_process_launcher_on_windows(self):
self._test_service_launcher('nt')
def test_process_launcher_on_linux(self):
self._test_service_launcher('posix')
|
Hybrid-Cloud/cinder
|
cinder/tests/unit/test_service.py
|
Python
|
apache-2.0
| 23,882
|
#
# Copyright 2006 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import pywraps2 as s2
class PyWrapS2TestCase(unittest.TestCase):
def testContainsIsWrappedCorrectly(self):
london = s2.S2LatLngRect(s2.S2LatLng.FromDegrees(51.3368602, 0.4931979),
s2.S2LatLng.FromDegrees(51.7323965, 0.1495211))
e14lj = s2.S2LatLngRect(s2.S2LatLng.FromDegrees(51.5213527, -0.0476026),
s2.S2LatLng.FromDegrees(51.5213527, -0.0476026))
self.assertTrue(london.Contains(e14lj))
def testS2CellIdEqualsIsWrappedCorrectly(self):
london = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355)
cell = s2.S2CellId(london)
same_cell = s2.S2CellId(london)
self.assertEqual(cell, same_cell)
def testS2CellIdComparsionIsWrappedCorrectly(self):
london = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355)
cell = s2.S2CellId(london)
self.assertLess(cell, cell.next())
self.assertGreater(cell.next(), cell)
def testS2CellIdGetEdgeNeighborsIsWrappedCorrectly(self):
cell = s2.S2CellId(0x466d319000000000)
expected_neighbors = [s2.S2CellId(0x466d31b000000000),
s2.S2CellId(0x466d317000000000),
s2.S2CellId(0x466d323000000000),
s2.S2CellId(0x466d31f000000000)]
neighbors = cell.GetEdgeNeighbors()
self.assertEqual(neighbors, expected_neighbors)
def testS2CellIdIntersectsIsTrueForOverlap(self):
cell1 = s2.S2CellId(0x89c259c000000000)
cell2 = s2.S2CellId(0x89c2590000000000)
self.assertTrue(cell1.intersects(cell2))
def testS2CellIdIntersectsIsFalseForNonOverlap(self):
cell1 = s2.S2CellId(0x89c259c000000000)
cell2 = s2.S2CellId(0x89e83d0000000000)
self.assertFalse(cell1.intersects(cell2))
def testS2HashingIsWrappedCorrectly(self):
london = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355)
cell = s2.S2CellId(london)
same_cell = s2.S2CellId(london)
self.assertEqual(hash(cell), hash(same_cell))
def testCovererIsWrappedCorrectly(self):
london = s2.S2LatLngRect(s2.S2LatLng.FromDegrees(51.3368602, 0.4931979),
s2.S2LatLng.FromDegrees(51.7323965, 0.1495211))
e14lj = s2.S2LatLngRect(s2.S2LatLng.FromDegrees(51.5213527, -0.0476026),
s2.S2LatLng.FromDegrees(51.5213527, -0.0476026))
coverer = s2.S2RegionCoverer()
coverer.set_max_cells(6)
self.assertEqual(6, coverer.max_cells())
covering = coverer.GetCovering(e14lj)
self.assertLessEqual(len(covering), 6)
for cellid in covering:
self.assertTrue(london.Contains(s2.S2Cell(cellid)))
interior = coverer.GetInteriorCovering(e14lj)
for cellid in interior:
self.assertTrue(london.Contains(s2.S2Cell(cellid)))
def testS2CellUnionIsWrappedCorrectly(self):
cell_union = s2.S2CellUnion()
cell_union.Init([0x466d319000000000, 0x466d31b000000000])
self.assertEqual(cell_union.num_cells(), 2)
trondheim = s2.S2LatLng.FromDegrees(63.431052, 10.395083)
self.assertTrue(cell_union.Contains(s2.S2CellId(trondheim)))
def testS2PolygonIsWrappedCorrectly(self):
london = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355)
polygon = s2.S2Polygon(s2.S2Cell(s2.S2CellId(london)))
self.assertEqual(polygon.num_loops(), 1)
point = london.ToPoint()
self.assertTrue(polygon.Contains(point))
def testS2LoopIsWrappedCorrectly(self):
london = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355)
polygon = s2.S2Polygon(s2.S2Cell(s2.S2CellId(london)))
loop = polygon.loop(0)
self.assertTrue(loop.IsValid())
self.assertEqual(0, loop.depth())
self.assertFalse(loop.is_hole())
self.assertEqual(4, loop.num_vertices())
point = london.ToPoint()
self.assertTrue(loop.Contains(point))
def testS2PolygonCopiesLoopInConstructorBecauseItTakesOwnership(self):
london = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355)
loop = s2.S2Loop(s2.S2Cell(s2.S2CellId(london)))
s2.S2Polygon(loop)
def testS2PolygonInitNestedIsWrappedCorrectly(self):
london = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355)
small_loop = s2.S2Loop(s2.S2Cell(s2.S2CellId(london)))
big_loop = s2.S2Loop(s2.S2Cell(s2.S2CellId(london).parent(1)))
polygon = s2.S2Polygon()
polygon.InitNested([big_loop, small_loop])
def testS2PolygonInitNestedWithIncorrectTypeIsWrappedCorrectly(self):
london = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355)
loop = s2.S2Loop(s2.S2Cell(s2.S2CellId(london)))
polygon = s2.S2Polygon()
with self.assertRaises(TypeError):
polygon.InitNested([loop, s2.S2CellId()])
def testS2PolygonGetAreaIsWrappedCorrectly(self):
# Cell at level 10 containing central London.
london_level_10 = s2.S2CellId(
s2.S2LatLng.FromDegrees(51.5001525, -0.1262355)).parent(10)
polygon = s2.S2Polygon(s2.S2Cell(london_level_10))
# Because S2Cell.ExactArea() isn't swigged, compare S2Polygon.GetArea() with
# S2CellUnion.ExactArea().
cell_union = s2.S2CellUnion()
cell_union.Init([london_level_10.id()])
self.assertAlmostEqual(cell_union.ExactArea(), polygon.GetArea(), places=10)
def testGetS2LatLngVertexIsWrappedCorrectly(self):
london = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355)
polygon = s2.S2Polygon(s2.S2Cell(s2.S2CellId(london)))
loop = polygon.loop(0)
first_vertex = loop.GetS2LatLngVertex(0)
self.assertIsInstance(first_vertex, s2.S2LatLng)
self.assertEqual("51.500152,-0.126235", first_vertex.ToStringInDegrees())
second_vertex = loop.GetS2LatLngVertex(1)
self.assertIsInstance(second_vertex, s2.S2LatLng)
self.assertEqual("51.500153,-0.126235", second_vertex.ToStringInDegrees())
def testS2PolylineInitFromS2LatLngs(self):
e7_10deg = 0x5f5e100
list_ll = []
for lat, lng in [(0, 0), (0, e7_10deg), (e7_10deg, e7_10deg)]:
list_ll.append(s2.S2LatLng.FromE7(lat, lng))
line = s2.S2Polyline()
line.InitFromS2LatLngs(list_ll)
self.assertAlmostEqual(20.0, line.GetLength().degrees())
def testS2PolylineInitFromS2Points(self):
e7_10deg = 0x5f5e100
list_points = []
for lat, lng in [(0, 0), (0, e7_10deg), (e7_10deg, e7_10deg)]:
list_points.append(s2.S2LatLng.FromE7(lat, lng).ToPoint())
line = s2.S2Polyline()
line.InitFromS2Points(list_points)
self.assertAlmostEqual(20.0, line.GetLength().degrees())
def testS2PointsCanBeNormalized(self):
line = s2.S2Polyline()
line.InitFromS2LatLngs([s2.S2LatLng.FromDegrees(37.794484, -122.394871),
s2.S2LatLng.FromDegrees(37.762699, -122.435158)])
self.assertNotAlmostEqual(line.GetCentroid().Norm(), 1.0)
self.assertAlmostEqual(line.GetCentroid().Normalize().Norm(), 1.0)
def testS1AngleComparsionIsWrappedCorrectly(self):
ten_degrees = s2.S1Angle.Degrees(10)
one_hundred_degrees = s2.S1Angle.Degrees(100)
self.assertLess(ten_degrees, one_hundred_degrees)
self.assertGreater(one_hundred_degrees, ten_degrees)
def testS2PolygonIntersectsWithPolyline(self):
london = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355)
polygon = s2.S2Polygon(s2.S2Cell(s2.S2CellId(london).parent(15)))
line = s2.S2Polyline()
line.InitFromS2LatLngs([s2.S2LatLng.FromDegrees(51.5, -0.128),
s2.S2LatLng.FromDegrees(51.5, -0.125)])
intersections = polygon.IntersectWithPolyline(line)
self.assertEqual(1, len(intersections))
def testCrossingSign(self):
a = s2.S2LatLng.FromDegrees(-1, 0).ToPoint()
b = s2.S2LatLng.FromDegrees(1, 0).ToPoint()
c = s2.S2LatLng.FromDegrees(0, -1).ToPoint()
d = s2.S2LatLng.FromDegrees(0, 1).ToPoint()
# SWIG flattens namespaces, so this is just s2.CrossingSign,
# not s2.S2.CrossingSign.
self.assertEqual(1, s2.CrossingSign(a, b, c, d))
def testGetIntersection(self):
a = s2.S2LatLng.FromDegrees(-1, 0).ToPoint()
b = s2.S2LatLng.FromDegrees(1, 0).ToPoint()
c = s2.S2LatLng.FromDegrees(0, -1).ToPoint()
d = s2.S2LatLng.FromDegrees(0, 1).ToPoint()
# SWIG namespace flattening as above.
intersection = s2.GetIntersection(a, b, c, d)
self.assertEqual(
"0.000000,0.000000", s2.S2LatLng(intersection).ToStringInDegrees())
def testS2CellDistance(self):
# Level-0 cell (i.e. face) centered at (0, 0)
cell = s2.S2Cell(s2.S2CellId(0x1000000000000000))
p1 = s2.S2LatLng.FromDegrees(0, 0).ToPoint()
self.assertTrue(cell.Contains(p1))
d1 = cell.GetDistance(p1).ToAngle().degrees()
# Inside, so distance is 0, but boundary distance is not.
self.assertEqual(0.0, d1)
bd1 = cell.GetBoundaryDistance(p1).ToAngle().degrees()
self.assertEqual(45.0, bd1)
p2 = s2.S2LatLng.FromDegrees(0, 90).ToPoint()
self.assertFalse(cell.Contains(p2))
d2 = cell.GetDistance(p2).ToAngle().degrees()
self.assertAlmostEqual(45.0, d2)
bd2 = cell.GetBoundaryDistance(p2).ToAngle().degrees()
# Outside, so distance and boundary distance are the same.
self.assertAlmostEqual(45.0, bd2)
def testS2Rotate(self):
mtv_a = s2.S2LatLng.FromDegrees(37.4402777, -121.9638888).ToPoint()
mtv_b = s2.S2LatLng.FromDegrees(37.3613888, -121.9283333).ToPoint()
angle = s2.S1Angle.Radians(0.039678)
point = s2.Rotate(mtv_a, mtv_b, angle)
self.assertEqual("37.439095,-121.967802",
s2.S2LatLng(point).ToStringInDegrees())
def testS2TurnAngle(self):
mtv_a = s2.S2LatLng.FromDegrees(37.4402777, -121.9638888).ToPoint()
mtv_b = s2.S2LatLng.FromDegrees(37.3613888, -121.9283333).ToPoint()
mtv_c = s2.S2LatLng.FromDegrees(37.3447222, -122.0308333).ToPoint()
angle = s2.TurnAngle(mtv_a, mtv_b, mtv_c)
self.assertAlmostEqual(-1.7132025, angle)
def testEncodeDecode(self):
london = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355)
polygon = s2.S2Polygon(s2.S2Cell(s2.S2CellId(london).parent(15)))
self.assertEqual(polygon.num_loops(), 1)
encoder = s2.Encoder()
polygon.Encode(encoder)
encoded = encoder.buffer()
decoder = s2.Decoder(encoded)
decoded_polygon = s2.S2Polygon()
self.assertTrue(decoded_polygon.Decode(decoder))
self.assertEqual(decoded_polygon.num_loops(), 1)
self.assertTrue(decoded_polygon.Equals(polygon))
def testS2CapRegion(self):
center = s2.S2LatLng.FromDegrees(2.0, 3.0).ToPoint()
cap = s2.S2Cap(center, s2.S1Angle.Degrees(1.0))
inside = s2.S2LatLng.FromDegrees(2.1, 2.9).ToPoint()
outside = s2.S2LatLng.FromDegrees(0.0, 0.0).ToPoint()
self.assertTrue(cap.Contains(inside))
self.assertFalse(cap.Contains(outside))
self.assertTrue(cap.Contains(s2.S2Cell(inside)))
self.assertFalse(cap.Contains(s2.S2Cell(outside)))
self.assertTrue(cap.MayIntersect(s2.S2Cell(inside)))
self.assertFalse(cap.MayIntersect(s2.S2Cell(outside)))
self.assertTrue(cap.ApproxEquals(cap.GetCapBound()))
rect_bound = cap.GetRectBound()
self.assertTrue(rect_bound.Contains(inside))
self.assertFalse(rect_bound.Contains(outside))
def testS2LatLngRectRegion(self):
rect = s2.S2LatLngRect(s2.S2LatLng.FromDegrees(1.0, 2.0),
s2.S2LatLng.FromDegrees(3.0, 4.0))
inside = s2.S2LatLng.FromDegrees(2.0, 3.0).ToPoint()
outside = s2.S2LatLng.FromDegrees(0.0, 0.0).ToPoint()
self.assertTrue(rect.Contains(inside))
self.assertFalse(rect.Contains(outside))
self.assertTrue(rect.Contains(s2.S2Cell(inside)))
self.assertFalse(rect.Contains(s2.S2Cell(outside)))
self.assertTrue(rect.MayIntersect(s2.S2Cell(inside)))
self.assertFalse(rect.MayIntersect(s2.S2Cell(outside)))
cap_bound = rect.GetCapBound()
self.assertTrue(cap_bound.Contains(inside))
self.assertFalse(cap_bound.Contains(outside))
self.assertTrue(rect.ApproxEquals(rect.GetRectBound()))
def testS2CellRegion(self):
cell = s2.S2Cell(s2.S2CellId(s2.S2LatLng.FromDegrees(3.0, 4.0)).parent(8))
inside = s2.S2LatLng.FromDegrees(3.0, 4.0).ToPoint()
outside = s2.S2LatLng.FromDegrees(30.0, 40.0).ToPoint()
self.assertTrue(cell.Contains(inside))
self.assertFalse(cell.Contains(outside))
self.assertTrue(cell.Contains(s2.S2Cell(inside)))
self.assertFalse(cell.Contains(s2.S2Cell(outside)))
self.assertTrue(cell.MayIntersect(s2.S2Cell(inside)))
self.assertFalse(cell.MayIntersect(s2.S2Cell(outside)))
cap_bound = cell.GetCapBound()
self.assertTrue(cap_bound.Contains(inside))
self.assertFalse(cap_bound.Contains(outside))
rect_bound = cell.GetRectBound()
self.assertTrue(rect_bound.Contains(inside))
self.assertFalse(rect_bound.Contains(outside))
def testS2CellUnionRegion(self):
cell_id = s2.S2CellId(s2.S2LatLng.FromDegrees(3.0, 4.0)).parent(8)
cell_union = s2.S2CellUnion()
cell_union.Init([cell_id.id()])
inside = s2.S2LatLng.FromDegrees(3.0, 4.0).ToPoint()
outside = s2.S2LatLng.FromDegrees(30.0, 40.0).ToPoint()
self.assertTrue(cell_union.Contains(inside))
self.assertFalse(cell_union.Contains(outside))
self.assertTrue(cell_union.Contains(s2.S2Cell(inside)))
self.assertFalse(cell_union.Contains(s2.S2Cell(outside)))
self.assertTrue(cell_union.MayIntersect(s2.S2Cell(inside)))
self.assertFalse(cell_union.MayIntersect(s2.S2Cell(outside)))
cap_bound = cell_union.GetCapBound()
self.assertTrue(cap_bound.Contains(inside))
self.assertFalse(cap_bound.Contains(outside))
rect_bound = cell_union.GetRectBound()
self.assertTrue(rect_bound.Contains(inside))
self.assertFalse(rect_bound.Contains(outside))
def testS2LoopRegion(self):
cell = s2.S2Cell(s2.S2CellId(s2.S2LatLng.FromDegrees(3.0, 4.0)).parent(8))
loop = s2.S2Loop(cell)
inside = s2.S2LatLng.FromDegrees(3.0, 4.0).ToPoint()
outside = s2.S2LatLng.FromDegrees(30.0, 40.0).ToPoint()
self.assertTrue(loop.Contains(inside))
self.assertFalse(loop.Contains(outside))
self.assertTrue(loop.Contains(s2.S2Cell(inside)))
self.assertFalse(loop.Contains(s2.S2Cell(outside)))
self.assertTrue(loop.MayIntersect(s2.S2Cell(inside)))
self.assertFalse(loop.MayIntersect(s2.S2Cell(outside)))
cap_bound = loop.GetCapBound()
self.assertTrue(cap_bound.Contains(inside))
self.assertFalse(cap_bound.Contains(outside))
rect_bound = loop.GetRectBound()
self.assertTrue(rect_bound.Contains(inside))
self.assertFalse(rect_bound.Contains(outside))
def testS2PolygonRegion(self):
cell = s2.S2Cell(s2.S2CellId(s2.S2LatLng.FromDegrees(3.0, 4.0)).parent(8))
polygon = s2.S2Polygon(cell)
inside = s2.S2LatLng.FromDegrees(3.0, 4.0).ToPoint()
outside = s2.S2LatLng.FromDegrees(30.0, 40.0).ToPoint()
self.assertTrue(polygon.Contains(inside))
self.assertFalse(polygon.Contains(outside))
self.assertTrue(polygon.Contains(s2.S2Cell(inside)))
self.assertFalse(polygon.Contains(s2.S2Cell(outside)))
self.assertTrue(polygon.MayIntersect(s2.S2Cell(inside)))
self.assertFalse(polygon.MayIntersect(s2.S2Cell(outside)))
cap_bound = polygon.GetCapBound()
self.assertTrue(cap_bound.Contains(inside))
self.assertFalse(cap_bound.Contains(outside))
rect_bound = polygon.GetRectBound()
self.assertTrue(rect_bound.Contains(inside))
self.assertFalse(rect_bound.Contains(outside))
def testS2PolylineRegion(self):
polyline = s2.S2Polyline()
polyline.InitFromS2LatLngs([s2.S2LatLng.FromDegrees(0.0, 0.0),
s2.S2LatLng.FromDegrees(1.0, 1.0)])
# Contains(S2Point) always return false.
self.assertFalse(
polyline.Contains(s2.S2LatLng.FromDegrees(0.0, 0.0).ToPoint()))
self.assertFalse(
polyline.Contains(s2.S2Cell(s2.S2LatLng.FromDegrees(0.0, 0.0))))
self.assertTrue(
polyline.MayIntersect(s2.S2Cell(s2.S2LatLng.FromDegrees(0.0, 0.0))))
self.assertFalse(
polyline.MayIntersect(s2.S2Cell(s2.S2LatLng.FromDegrees(3.0, 4.0))))
cap_bound = polyline.GetCapBound()
self.assertTrue(
cap_bound.Contains(s2.S2LatLng.FromDegrees(0.0, 0.0).ToPoint()))
self.assertFalse(
cap_bound.Contains(s2.S2LatLng.FromDegrees(2.0, 2.0).ToPoint()))
rect_bound = polyline.GetRectBound()
self.assertTrue(
rect_bound.Contains(s2.S2LatLng.FromDegrees(0.0, 0.0).ToPoint()))
self.assertFalse(
rect_bound.Contains(s2.S2LatLng.FromDegrees(2.0, 2.0).ToPoint()))
def testS2CellIdCenterSiTi(self):
cell = s2.S2CellId.FromFacePosLevel(3, 0x12345678, s2.S2CellId.kMaxLevel)
# Check that the (si, ti) coordinates of the center end in a
# 1 followed by (30 - level) 0s.
# Leaf level, 30.
face, si, ti = cell.GetCenterSiTi()
self.assertEqual(3, face)
self.assertEqual(1 << 0, si & 1)
self.assertEqual(1 << 0, ti & 1)
# Level 29.
face, si, ti = cell.parent(s2.S2CellId.kMaxLevel - 1).GetCenterSiTi()
self.assertEqual(3, face)
self.assertEqual(1 << 1, si & 3)
self.assertEqual(1 << 1, ti & 3)
# Level 28.
face, si, ti = cell.parent(s2.S2CellId.kMaxLevel - 2).GetCenterSiTi()
self.assertEqual(3, face)
self.assertEqual(1 << 2, si & 7)
self.assertEqual(1 << 2, ti & 7)
# Level 20.
face, si, ti = cell.parent(s2.S2CellId.kMaxLevel - 10).GetCenterSiTi()
self.assertEqual(3, face)
self.assertEqual(1 << 10, si & ((1 << 11) - 1))
self.assertEqual(1 << 10, ti & ((1 << 11) - 1))
# Level 10.
face, si, ti = cell.parent(s2.S2CellId.kMaxLevel - 20).GetCenterSiTi()
self.assertEqual(3, face)
self.assertEqual(1 << 20, si & ((1 << 21) - 1))
self.assertEqual(1 << 20, ti & ((1 << 21) - 1))
# Level 0.
face, si, ti = cell.parent(0).GetCenterSiTi()
self.assertEqual(3, face)
self.assertEqual(1 << 30, si & ((1 << 31) - 1))
self.assertEqual(1 << 30, ti & ((1 << 31) - 1))
def testS2CellIdToFromFaceIJ(self):
cell = s2.S2CellId.FromFaceIJ(3, 1234, 5678)
face, i, j, _ = cell.ToFaceIJOrientation()
self.assertEqual(3, face)
self.assertEqual(1234, i)
self.assertEqual(5678, j)
if __name__ == "__main__":
unittest.main()
|
wiltonlazary/arangodb
|
3rdParty/s2geometry/dfefe0c/src/python/pywraps2_test.py
|
Python
|
apache-2.0
| 18,635
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.amazon.aws.hooks.logs`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.amazon.aws.hooks.logs import AwsLogsHook # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.amazon.aws.hooks.logs`.",
DeprecationWarning, stacklevel=2
)
|
mtagle/airflow
|
airflow/contrib/hooks/aws_logs_hook.py
|
Python
|
apache-2.0
| 1,139
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2011 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Edgar Magana, Cisco Systems
"""
Logistic components for Service Insertion utility
"""
import logging
import re
import subprocess
import time
from quantum.openstack.common import importutils
from quantum.plugins.cisco.common import cisco_constants as const
from quantum.plugins.cisco.db import services_db as sdb
from quantum.plugins.cisco import l2network_plugin_configuration as conf
from quantum.plugins.cisco.services import services_constants as servconts
LOG = logging.getLogger(__name__)
class ServicesLogistics():
"""
Services Logistics Modules
"""
def __init__(self):
pass
def image_shutdown_verification(self, image_name):
"""
Verifies that the VM has been properly shutdown
"""
try:
service_args = []
service_args.append(servconts.DESCRIBE_VM_CMD)
service_args.append(image_name)
counter = 0
flag = False
while not flag and counter <= 5:
counter = counter + 1
time.sleep(2.5)
process = subprocess.Popen(service_args,
stdout=subprocess.PIPE)
result = process.stdout.readlines()
if not result:
flag = True
except Exception, exc:
print exc
def image_status(self, image_name):
"""
Checks the status of the image
"""
try:
service_args = []
service_args.append(servconts.DESCRIBE_VM_CMD)
service_args.append(image_name)
counter = 0
flag = False
while not flag and counter <= 10:
counter = counter + 1
time.sleep(2.5)
process = subprocess.Popen(service_args,
stdout=subprocess.PIPE)
result = process.stdout.readlines()
if result:
tokens = re.search("running", str(result[1]))
if tokens:
service_status = tokens.group(0)
if service_status == "running":
flag = True
except Exception as exc:
print exc
def image_exist(self, image_name):
"""
Verifies that the image id is available
"""
try:
service_vm = sdb.get_service_bindings(image_name)
if service_vm:
return True
else:
return False
except Exception as exc:
print exc
def verify_plugin(self, plugin_key):
"""
Verifies the PlugIn available
"""
_plugins = {}
for key in conf.PLUGINS[const.PLUGINS].keys():
plugin_obj = conf.PLUGINS[const.PLUGINS][key]
_plugins[key] = importutils.import_object(plugin_obj)
if not plugin_key in _plugins.keys():
LOG.debug("No %s Plugin loaded" % plugin_key)
return False
else:
LOG.debug("Plugin %s founded" % const.UCS_PLUGIN)
return True
def press_key(self):
"""
Waits for en external input
"""
key = raw_input("Press any key to continue")
return key
|
FreescaleSemiconductor/quantum
|
quantum/plugins/cisco/services/services_logistics.py
|
Python
|
apache-2.0
| 4,003
|
# ***** BEGIN LICENSE BLOCK *****
#
# For copyright and licensing please refer to COPYING.
#
# ***** END LICENSE BLOCK *****
from __future__ import nested_scopes
import os
import sys
RABBITMQ_PUBLIC_UMBRELLA = '../../rabbitmq-public-umbrella'
RABBITMQ_CODEGEN = 'rabbitmq-codegen'
PIKA_SPEC = '../pika/spec.py'
CODEGEN_PATH = os.path.realpath('%s/%s' % (RABBITMQ_PUBLIC_UMBRELLA,
RABBITMQ_CODEGEN))
print('codegen-path: %s' % CODEGEN_PATH)
sys.path.append(CODEGEN_PATH)
import amqp_codegen
import re
DRIVER_METHODS = {
"Exchange.Bind": ["Exchange.BindOk"],
"Exchange.Unbind": ["Exchange.UnbindOk"],
"Exchange.Declare": ["Exchange.DeclareOk"],
"Exchange.Delete": ["Exchange.DeleteOk"],
"Queue.Declare": ["Queue.DeclareOk"],
"Queue.Bind": ["Queue.BindOk"],
"Queue.Purge": ["Queue.PurgeOk"],
"Queue.Delete": ["Queue.DeleteOk"],
"Queue.Unbind": ["Queue.UnbindOk"],
"Basic.Qos": ["Basic.QosOk"],
"Basic.Get": ["Basic.GetOk", "Basic.GetEmpty"],
"Basic.Ack": [],
"Basic.Reject": [],
"Basic.Recover": ["Basic.RecoverOk"],
"Basic.RecoverAsync": [],
"Tx.Select": ["Tx.SelectOk"],
"Tx.Commit": ["Tx.CommitOk"],
"Tx.Rollback": ["Tx.RollbackOk"]
}
def fieldvalue(v):
if isinstance(v, unicode):
return repr(v.encode('ascii'))
else:
return repr(v)
def normalize_separators(s):
s = s.replace('-', '_')
s = s.replace(' ', '_')
return s
def pyize(s):
s = normalize_separators(s)
if s in ('global', 'class'):
s += '_'
return s
def camel(s):
return normalize_separators(s).title().replace('_', '')
amqp_codegen.AmqpMethod.structName = lambda m: camel(
m.klass.name) + '.' + camel(m.name)
amqp_codegen.AmqpClass.structName = lambda c: camel(c.name) + "Properties"
def constantName(s):
return '_'.join(re.split('[- ]', s.upper()))
def flagName(c, f):
if c:
return c.structName() + '.' + constantName('flag_' + f.name)
else:
return constantName('flag_' + f.name)
def generate(specPath):
spec = amqp_codegen.AmqpSpec(specPath)
def genSingleDecode(prefix, cLvalue, unresolved_domain):
type = spec.resolveDomain(unresolved_domain)
if type == 'shortstr':
print(prefix + "%s, offset = data.decode_short_string(encoded, offset)" % cLvalue)
elif type == 'longstr':
print(prefix +
"length = struct.unpack_from('>I', encoded, offset)[0]")
print(prefix + "offset += 4")
print(prefix + "%s = encoded[offset:offset + length]" % cLvalue)
print(prefix + "try:")
print(prefix + " %s = str(%s)" % (cLvalue, cLvalue))
print(prefix + "except UnicodeEncodeError:")
print(prefix + " pass")
print(prefix + "offset += length")
elif type == 'octet':
print(prefix + "%s = struct.unpack_from('B', encoded, offset)[0]" %
cLvalue)
print(prefix + "offset += 1")
elif type == 'short':
print(prefix + "%s = struct.unpack_from('>H', encoded, offset)[0]" %
cLvalue)
print(prefix + "offset += 2")
elif type == 'long':
print(prefix + "%s = struct.unpack_from('>I', encoded, offset)[0]" %
cLvalue)
print(prefix + "offset += 4")
elif type == 'longlong':
print(prefix + "%s = struct.unpack_from('>Q', encoded, offset)[0]" %
cLvalue)
print(prefix + "offset += 8")
elif type == 'timestamp':
print(prefix + "%s = struct.unpack_from('>Q', encoded, offset)[0]" %
cLvalue)
print(prefix + "offset += 8")
elif type == 'bit':
raise Exception("Can't decode bit in genSingleDecode")
elif type == 'table':
print(Exception(prefix + "(%s, offset) = data.decode_table(encoded, offset)" % \
cLvalue))
else:
raise Exception("Illegal domain in genSingleDecode", type)
def genSingleEncode(prefix, cValue, unresolved_domain):
type = spec.resolveDomain(unresolved_domain)
if type == 'shortstr':
print(prefix + \
"assert isinstance(%s, str_or_bytes),\\\n%s 'A non-string value was supplied for %s'" \
% (cValue, prefix, cValue))
print(prefix + "data.encode_short_string(pieces, %s)" % cValue)
elif type == 'longstr':
print(prefix + \
"assert isinstance(%s, str_or_bytes),\\\n%s 'A non-string value was supplied for %s'" \
% (cValue, prefix, cValue))
print(
prefix +
"value = %s.encode('utf-8') if isinstance(%s, unicode_type) else %s"
% (cValue, cValue, cValue))
print(prefix + "pieces.append(struct.pack('>I', len(value)))")
print(prefix + "pieces.append(value)")
elif type == 'octet':
print(prefix + "pieces.append(struct.pack('B', %s))" % cValue)
elif type == 'short':
print(prefix + "pieces.append(struct.pack('>H', %s))" % cValue)
elif type == 'long':
print(prefix + "pieces.append(struct.pack('>I', %s))" % cValue)
elif type == 'longlong':
print(prefix + "pieces.append(struct.pack('>Q', %s))" % cValue)
elif type == 'timestamp':
print(prefix + "pieces.append(struct.pack('>Q', %s))" % cValue)
elif type == 'bit':
raise Exception("Can't encode bit in genSingleEncode")
elif type == 'table':
print(Exception(prefix + "data.encode_table(pieces, %s)" % cValue))
else:
raise Exception("Illegal domain in genSingleEncode", type)
def genDecodeMethodFields(m):
print(" def decode(self, encoded, offset=0):")
bitindex = None
for f in m.arguments:
if spec.resolveDomain(f.domain) == 'bit':
if bitindex is None:
bitindex = 0
if bitindex >= 8:
bitindex = 0
if not bitindex:
print(
" bit_buffer = struct.unpack_from('B', encoded, offset)[0]")
print(" offset += 1")
print(" self.%s = (bit_buffer & (1 << %d)) != 0" % \
(pyize(f.name), bitindex))
bitindex += 1
else:
bitindex = None
genSingleDecode(" ", "self.%s" % (pyize(f.name),),
f.domain)
print(" return self")
print('')
def genDecodeProperties(c):
print(" def decode(self, encoded, offset=0):")
print(" flags = 0")
print(" flagword_index = 0")
print(" while True:")
print(
" partial_flags = struct.unpack_from('>H', encoded, offset)[0]")
print(" offset += 2")
print(
" flags = flags | (partial_flags << (flagword_index * 16))")
print(" if not (partial_flags & 1):")
print(" break")
print(" flagword_index += 1")
for f in c.fields:
if spec.resolveDomain(f.domain) == 'bit':
print(" self.%s = (flags & %s) != 0" %
(pyize(f.name), flagName(c, f)))
else:
print(" if flags & %s:" % (flagName(c, f),))
genSingleDecode(" ", "self.%s" % (pyize(f.name),),
f.domain)
print(" else:")
print(" self.%s = None" % (pyize(f.name),))
print(" return self")
print('')
def genEncodeMethodFields(m):
print(" def encode(self):")
print(" pieces = list()")
bitindex = None
def finishBits():
if bitindex is not None:
print(" pieces.append(struct.pack('B', bit_buffer))")
for f in m.arguments:
if spec.resolveDomain(f.domain) == 'bit':
if bitindex is None:
bitindex = 0
print(" bit_buffer = 0")
if bitindex >= 8:
finishBits()
print(" bit_buffer = 0")
bitindex = 0
print(" if self.%s:" % pyize(f.name))
print(" bit_buffer = bit_buffer | (1 << %d)" % \
bitindex)
bitindex += 1
else:
finishBits()
bitindex = None
genSingleEncode(" ", "self.%s" % (pyize(f.name),),
f.domain)
finishBits()
print(" return pieces")
print('')
def genEncodeProperties(c):
print(" def encode(self):")
print(" pieces = list()")
print(" flags = 0")
for f in c.fields:
if spec.resolveDomain(f.domain) == 'bit':
print(" if self.%s: flags = flags | %s" %
(pyize(f.name), flagName(c, f)))
else:
print(" if self.%s is not None:" % (pyize(f.name),))
print(" flags = flags | %s" % (flagName(c, f),))
genSingleEncode(" ", "self.%s" % (pyize(f.name),),
f.domain)
print(" flag_pieces = list()")
print(" while True:")
print(" remainder = flags >> 16")
print(" partial_flags = flags & 0xFFFE")
print(" if remainder != 0:")
print(" partial_flags |= 1")
print(
" flag_pieces.append(struct.pack('>H', partial_flags))")
print(" flags = remainder")
print(" if not flags:")
print(" break")
print(" return flag_pieces + pieces")
print('')
def fieldDeclList(fields):
return ''.join([", %s=%s" % (pyize(f.name), fieldvalue(f.defaultvalue))
for f in fields])
def fieldInitList(prefix, fields):
if fields:
return ''.join(["%sself.%s = %s\n" % (prefix, pyize(f.name), pyize(f.name)) \
for f in fields])
else:
return '%spass\n' % (prefix,)
print("""# ***** BEGIN LICENSE BLOCK *****
#
# For copyright and licensing please refer to COPYING.
#
# ***** END LICENSE BLOCK *****
# NOTE: Autogenerated code by codegen.py, do not edit
import struct
from pika import amqp_object
from pika import data
from pika.compat import str_or_bytes, unicode_type
str = bytes
""")
print("PROTOCOL_VERSION = (%d, %d, %d)" % (spec.major, spec.minor,
spec.revision))
print("PORT = %d" % spec.port)
print('')
# Append some constants that arent in the spec json file
spec.constants.append(('FRAME_MAX_SIZE', 131072, ''))
spec.constants.append(('FRAME_HEADER_SIZE', 7, ''))
spec.constants.append(('FRAME_END_SIZE', 1, ''))
constants = {}
for c, v, cls in spec.constants:
constants[constantName(c)] = v
for key in sorted(constants.keys()):
print("%s = %s" % (key, constants[key]))
print('')
for c in spec.allClasses():
print('')
print('class %s(amqp_object.Class):' % (camel(c.name),))
print('')
print(" INDEX = 0x%.04X # %d" % (c.index, c.index))
print(" NAME = %s" % (fieldvalue(camel(c.name)),))
print('')
for m in c.allMethods():
print(' class %s(amqp_object.Method):' % (camel(m.name),))
print('')
methodid = m.klass.index << 16 | m.index
print(" INDEX = 0x%.08X # %d, %d; %d" % \
(methodid,
m.klass.index,
m.index,
methodid))
print(" NAME = %s" % (fieldvalue(m.structName(),)))
print('')
print(" def __init__(self%s):" %
(fieldDeclList(m.arguments),))
print(fieldInitList(' ', m.arguments))
print(" @property")
print(" def synchronous(self):")
print(" return %s" % m.isSynchronous)
print('')
genDecodeMethodFields(m)
genEncodeMethodFields(m)
for c in spec.allClasses():
if c.fields:
print('')
print('class %s(amqp_object.Properties):' % (c.structName(),))
print('')
print(" CLASS = %s" % (camel(c.name),))
print(" INDEX = 0x%.04X # %d" % (c.index, c.index))
print(" NAME = %s" % (fieldvalue(c.structName(),)))
print('')
index = 0
if c.fields:
for f in c.fields:
if index % 16 == 15:
index += 1
shortnum = index / 16
partialindex = 15 - (index % 16)
bitindex = shortnum * 16 + partialindex
print(' %s = (1 << %d)' % (flagName(None, f), bitindex))
index += 1
print('')
print(" def __init__(self%s):" % (fieldDeclList(c.fields),))
print(fieldInitList(' ', c.fields))
genDecodeProperties(c)
genEncodeProperties(c)
print("methods = {")
print(',\n'.join([" 0x%08X: %s" % (m.klass.index << 16 | m.index, m.structName()) \
for m in spec.allMethods()]))
print("}")
print('')
print("props = {")
print(',\n'.join([" 0x%04X: %s" % (c.index, c.structName()) \
for c in spec.allClasses() \
if c.fields]))
print("}")
print('')
print('')
print("def has_content(methodNumber):")
print(' return methodNumber in (')
for m in spec.allMethods():
if m.hasContent:
print(' %s.INDEX,' % m.structName())
print(' )')
if __name__ == "__main__":
with open(PIKA_SPEC, 'w') as handle:
sys.stdout = handle
generate(['%s/amqp-rabbitmq-0.9.1.json' % CODEGEN_PATH])
|
vrtsystems/pika
|
utils/codegen.py
|
Python
|
bsd-3-clause
| 14,858
|
import operator
import re
import xapian
from django.db import models
from django.utils.functional import curry
class X(models.Q):
pass
i = lambda f: lambda a, b: f(a.lower(), b.lower())
startswith = lambda a, b: a.startswith(b)
endswith = lambda a, b: a.endswith(b)
regex = lambda a, b: re.match(b, a) is not None
iregex = lambda a, b: re.match(b, a, re.I) is not None
class CompositeDecider(xapian.MatchDecider):
# operators map
op_map = {
'exact': operator.eq,
'iexact': i(operator.eq),
'startswith': startswith,
'istartswith': i(startswith),
'endswith': endswith,
'iendswith': i(endswith),
'contains': operator.contains,
'icontains': i(operator.contains),
'regex': regex,
'iregex': iregex,
'in': lambda a, b: operator.contains(b, a),
'gt': operator.gt,
'gte': operator.ge,
'lt': operator.lt,
'lte': operator.le,
}
def __init__(self, model, tags, filter, exclude):
xapian.MatchDecider.__init__(self)
self._model = model
self._tags = tags
self._values_map = dict([(t.prefix, t.number) for t in tags])
self._filter = filter
self._exclude = exclude
def __call__(self, document):
if self._filter and not self._do_x(self._filter, document):
return False
if self._exclude and self._do_x(self._exclude, document):
return False
return True
def get_tag(self, index):
for tag in self._tags:
if tag.number == index:
return tag
raise ValueError("No tag with number '%s'" % index)
def _do_x(self, field, document):
for child in field.children:
if isinstance(child, X):
result = self._do_x(child, document)
else:
result = self._do_field(child[0], child[1], document)
if (result and field.connector == 'OR')\
or (not result and field.connector == 'AND'):
break
if field.negated:
return not result
else:
return result
def _do_field(self, lookup, value, document):
if '__' in lookup:
field, op = lookup.split('__', 1)
else:
field, op = lookup, 'exact'
if op not in self.op_map:
raise ValueError("Unknown lookup operator '%s'" % op)
op = self.op_map[op]
doc_value = document.get_value(self._values_map[field])
convert = self.get_tag(self._values_map[field]).convert
if isinstance(value, (list, tuple)):
value = map(convert, value)
else:
value = convert(value)
operands = [
doc_value,
value,
]
return reduce(op, operands)
|
pombreda/djapian
|
src/djapian/decider.py
|
Python
|
bsd-3-clause
| 2,849
|
# encoding: utf-8
"""
coroutine.py
Created by Thomas Mangin on 2013-07-01.
Copyright (c) 2009-2012 Exa Networks. All rights reserved.
"""
from functools import wraps
def each(function):
@wraps(function)
def start(*args, **kwargs):
generator = function(*args, **kwargs)
return lambda: generator.next()
return start
def join (function):
@wraps(function)
def start (*args, **kwargs):
return ''.join(function(*args, **kwargs))
return start
|
mshahbaz/exabgp
|
lib/exabgp/util/coroutine.py
|
Python
|
bsd-3-clause
| 452
|
import functools
from django.shortcuts import redirect
def submit_step(outer_step):
"""Wraps the function with a decorator that bounces to the right step."""
def decorator(f):
@functools.wraps(f)
def wrapper(request, *args, **kw):
from mkt.submit.views import _resume
from mkt.submit.models import AppSubmissionChecklist
addon = kw.get('addon', False)
if addon:
try:
step = addon.appsubmissionchecklist.get_next()
except AppSubmissionChecklist.DoesNotExist:
step = None
if step and step != outer_step:
return _resume(addon, step)
return f(request, *args, **kw)
wrapper.submitting = True
return wrapper
return decorator
def read_dev_agreement_required(f):
"""
Decorator that checks if the user has read the dev agreement, redirecting
if not.
"""
def decorator(f):
@functools.wraps(f)
def wrapper(request, *args, **kw):
if not request.amo_user.read_dev_agreement:
return redirect('submit.app')
return f(request, *args, **kw)
return wrapper
return decorator(f)
|
wagnerand/zamboni
|
mkt/submit/decorators.py
|
Python
|
bsd-3-clause
| 1,263
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
def get_notification_config():
return {
"for_doctype": {
"Error Log": {"seen": 0},
"Communication": {"status": "Open", "communication_type": "Communication"},
"ToDo": "frappe.core.notifications.get_things_todo",
"Event": "frappe.core.notifications.get_todays_events",
"Error Snapshot": {"seen": 0, "parent_error_snapshot": None},
},
"for_other": {
"Likes": "frappe.core.notifications.get_unseen_likes",
"Chat": "frappe.core.notifications.get_unread_messages",
"Email": "frappe.core.notifications.get_unread_emails",
}
}
def get_things_todo(as_list=False):
"""Returns a count of incomplete todos"""
data = frappe.get_list("ToDo",
fields=["name", "description"] if as_list else "count(*)",
filters=[["ToDo", "status", "=", "Open"]],
or_filters=[["ToDo", "owner", "=", frappe.session.user],
["ToDo", "assigned_by", "=", frappe.session.user]],
as_list=True)
if as_list:
return data
else:
return data[0][0]
def get_todays_events(as_list=False):
"""Returns a count of todays events in calendar"""
from frappe.desk.doctype.event.event import get_events
from frappe.utils import nowdate
today = nowdate()
events = get_events(today, today)
return events if as_list else len(events)
def get_unread_messages():
"returns unread (docstatus-0 messages for a user)"
return frappe.db.sql("""\
SELECT count(*)
FROM `tabCommunication`
WHERE communication_type in ('Chat', 'Notification')
AND reference_doctype = 'User'
AND reference_name = %s
and modified >= DATE_SUB(NOW(),INTERVAL 1 YEAR)
AND seen=0
""", (frappe.session.user,))[0][0]
def get_unseen_likes():
"""Returns count of unseen likes"""
return frappe.db.sql("""select count(*) from `tabCommunication`
where
communication_type='Comment'
and modified >= DATE_SUB(NOW(),INTERVAL 1 YEAR)
and comment_type='Like'
and owner is not null and owner!=%(user)s
and reference_owner=%(user)s
and seen=0""", {"user": frappe.session.user})[0][0]
def get_unread_emails():
"returns unread emails for a user"
return frappe.db.sql("""\
SELECT count(*)
FROM `tabCommunication`
WHERE communication_type='Communication'
AND communication_medium="Email"
AND email_status not in ("Spam", "Trash")
AND email_account in (
SELECT distinct email_account from `tabUser Email` WHERE parent=%(user)s
)
AND modified >= DATE_SUB(NOW(),INTERVAL 1 YEAR)
AND seen=0
""", {"user": frappe.session.user})[0][0]
|
bcornwellmott/frappe
|
frappe/core/notifications.py
|
Python
|
mit
| 2,601
|
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, HttpResponseRedirect, redirect
from django.utils.safestring import mark_safe
from django.core.urlresolvers import reverse
# Create your views here.
from billing.models import Transaction
from notifications.models import Notification
from .forms import LoginForm, RegisterForm
from .models import MyUser
@login_required
def account_home(request):
notifications = Notification.objects.get_recent_for_user(request.user, 6)
transactions = Transaction.objects.get_recent_for_user(request.user, 3)
context = {
"notifications": notifications,
"transactions": transactions
}
return render(request, "accounts/account_home.html", context)
def auth_logout(request):
logout(request)
return HttpResponseRedirect('/')
def auth_login(request):
form = LoginForm(request.POST or None)
next_url = request.GET.get('next')
if form.is_valid():
username = form.cleaned_data['username']
password = form.cleaned_data['password']
print username, password
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
if next_url is not None:
return HttpResponseRedirect(next_url)
return HttpResponseRedirect("/")
action_url = reverse("login")
title = "Login"
submit_btn = title
submit_btn_class = "btn-success btn-block"
extra_form_link = "Upgrade your account today <a href='%s'>here</a>!" %(reverse("account_upgrade"))
context = {
"form": form,
"action_url": action_url,
"title": title,
"submit_btn": submit_btn,
"submit_btn_class": submit_btn_class,
"extra_form_link":extra_form_link
}
return render(request, "accounts/account_login_register.html", context)
def auth_register(request):
form = RegisterForm(request.POST or None)
if form.is_valid():
username = form.cleaned_data['username']
email = form.cleaned_data['email']
password = form.cleaned_data['password2']
#MyUser.objects.create_user(username=username, email=email, password=password)
new_user = MyUser()
new_user.username = username
new_user.email = email
#new_user.password = password #WRONG
new_user.set_password(password) #RIGHT
new_user.save()
action_url = reverse("register")
title = "Register"
submit_btn = "Create free account"
context = {
"form": form,
"action_url": action_url,
"title": title,
"submit_btn": submit_btn
}
return render(request, "accounts/account_login_register.html", context)
|
climberwb/video-api
|
src/accounts/views.py
|
Python
|
mit
| 2,552
|
from math import sqrt
from ase import Atom, Atoms
from ase.neb import NEB
from ase.constraints import FixAtoms
from ase.vibrations import Vibrations
from ase.visualize import view
from ase.calculators.emt import EMT
from ase.optimize import QuasiNewton, BFGS
# Distance between Cu atoms on a (100) surface:
d = 3.6 / sqrt(2)
initial = Atoms('Cu',
positions=[(0, 0, 0)],
cell=(d, d, 1.0),
pbc=(True, True, False))
initial *= (2, 2, 1) # 2x2 (100) surface-cell
# Approximate height of Ag atom on Cu(100) surfece:
h0 = 2.0
initial += Atom('Ag', (d / 2, d / 2, h0))
if 0:
view(initial)
# Make band:
images = [initial.copy() for i in range(6)]
neb = NEB(images, climb=True)
# Set constraints and calculator:
constraint = FixAtoms(range(len(initial) - 1))
for image in images:
image.set_calculator(EMT())
image.set_constraint(constraint)
# Displace last image:
images[-1].positions[-1] += (d, 0, 0)
#images[-1].positions[-1] += (d, d, 0)
# Relax height of Ag atom for initial and final states:
dyn1 = QuasiNewton(images[0])
dyn1.run(fmax=0.01)
dyn2 = QuasiNewton(images[-1])
dyn2.run(fmax=0.01)
# Interpolate positions between initial and final states:
neb.interpolate()
for image in images:
print image.positions[-1], image.get_potential_energy()
#dyn = MDMin(neb, dt=0.4)
#dyn = FIRE(neb, dt=0.4)
dyn = BFGS(neb, trajectory='mep.traj')
dyn.run(fmax=0.05)
for image in images:
print image.positions[-1], image.get_potential_energy()
a = images[0]
vib = Vibrations(a, [4])
vib.run()
print vib.get_frequencies()
vib.summary()
print vib.get_mode(-1)
vib.write_mode(-1, nimages=20)
|
grhawk/ASE
|
tools/ase/test/Ag-Cu100.py
|
Python
|
gpl-2.0
| 1,654
|
import operator
import re
from distutils.version import LooseVersion # pylint: disable=no-name-in-module,import-error
class VersionInterval(object):
"""
A class for a Version Interval object.
An interval is a string representation of a mathmetical like interval.
e.g: "(3,4]", "[3.10.0,)"
A verison is a version string.
Examples::
>>> verison_interval = VersionInterval("[2.10.1, 2.11.0)")
>>> verison = "2.10.16"
>>> verison in verison_interval
True
>>> verison = "2.13.0"
>>> verison in verison_interval
False
"""
def __init__(self, interval):
interval_rex = r"^(\[|\()(.*?)\s*,\s*(.*?)(\]|\))$"
match = re.search(interval_rex, interval)
if match is None:
raise ValueError("Invaild string representation of an interval")
self.opening, lower, upper, self.closing = match.groups()
self.lower_bound = LooseVersion(lower) if lower else None
self.upper_bound = LooseVersion(upper) if upper else None
self._check_interval()
def _check_interval(self):
if not (self.upper_bound and self.lower_bound):
return
if self.lower_bound < self.upper_bound:
return
if (self.lower_bound == self.upper_bound and self.opening == '[' and
self.closing == ']'):
return
raise ValueError("Invaild interval")
def __repr__(self):
return '<version interval %s%s, %s%s>' % (self.opening,
self.lower_bound,
self.upper_bound,
self.closing)
def __contains__(self, version):
op_mapping = {"(": operator.lt, "[": operator.le,
")": operator.gt, "]": operator.ge}
in_interval = True
version = LooseVersion(version)
if self.lower_bound:
opt = op_mapping.get(self.opening)
in_interval = opt(self.lower_bound, version)
if in_interval and self.upper_bound:
opt = op_mapping.get(self.closing)
in_interval = opt(self.upper_bound, version)
return in_interval
|
clebergnu/avocado-vt
|
virttest/utils_version.py
|
Python
|
gpl-2.0
| 2,256
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012 - Wolter Hellmund <wolterh6@gmail.com>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
from GTG.plugins.urgency_color.urgency_color import pluginUrgencyColor
# suppress pyflakes warning (given by make lint)
if False is True:
pluginUrgencyColor()
|
jakubbrindza/gtg
|
GTG/plugins/urgency_color/__init__.py
|
Python
|
gpl-3.0
| 884
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
try:
import json
except ImportError:
from django.utils import simplejson as json
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from django.views.decorators.http import require_POST
from django.shortcuts import get_object_or_404
from django.conf import settings
from geonode.utils import resolve_object
from geonode.base.models import ResourceBase
if "notification" in settings.INSTALLED_APPS:
from notification import models as notification
def _perms_info(obj):
info = obj.get_all_level_info()
return info
def _perms_info_json(obj):
info = _perms_info(obj)
info['users'] = dict([(u.username, perms)
for u, perms in info['users'].items()])
info['groups'] = dict([(g.name, perms)
for g, perms in info['groups'].items()])
return json.dumps(info)
def resource_permissions(request, resource_id):
try:
resource = resolve_object(
request, ResourceBase, {
'id': resource_id}, 'base.change_resourcebase_permissions')
except PermissionDenied:
# we are handling this in a non-standard way
return HttpResponse(
'You are not allowed to change permissions for this resource',
status=401,
content_type='text/plain')
if request.method == 'POST':
permission_spec = json.loads(request.body)
resource.set_permissions(permission_spec)
return HttpResponse(
json.dumps({'success': True}),
status=200,
content_type='text/plain'
)
elif request.method == 'GET':
permission_spec = _perms_info_json(resource)
return HttpResponse(
json.dumps({'success': True, 'permissions': permission_spec}),
status=200,
content_type='text/plain'
)
else:
return HttpResponse(
'No methods other than get and post are allowed',
status=401,
content_type='text/plain')
@require_POST
def set_bulk_permissions(request):
permission_spec = json.loads(request.POST.get('permissions', None))
resource_ids = request.POST.getlist('resources', [])
if permission_spec is not None:
not_permitted = []
for resource_id in resource_ids:
try:
resource = resolve_object(
request, ResourceBase, {
'id': resource_id
},
'base.change_resourcebase_permissions')
resource.set_permissions(permission_spec)
except PermissionDenied:
not_permitted.append(ResourceBase.objects.get(id=resource_id).title)
return HttpResponse(
json.dumps({'success': 'ok', 'not_changed': not_permitted}),
status=200,
content_type='text/plain'
)
else:
return HttpResponse(
json.dumps({'error': 'Wrong permissions specification'}),
status=400,
content_type='text/plain')
@require_POST
def request_permissions(request):
""" Request permission to download a resource.
"""
uuid = request.POST['uuid']
resource = get_object_or_404(ResourceBase, uuid=uuid)
try:
notification.send(
[resource.owner],
'request_download_resourcebase',
{'from_user': request.user, 'resource': resource}
)
return HttpResponse(
json.dumps({'success': 'ok', }),
status=200,
content_type='text/plain')
except:
return HttpResponse(
json.dumps({'error': 'error delivering notification'}),
status=400,
content_type='text/plain')
|
cjahangir/geodash
|
geonode/security/views.py
|
Python
|
gpl-3.0
| 4,620
|
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2014 Angus Gratton <gus@projectgus.com>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
import re
'''
OUTPUT_PYTHON format:
Packet:
[<ptype>, <pdata>]
<ptype>:
- 'AP_READ' (AP read)
- 'DP_READ' (DP read)
- 'AP_WRITE' (AP write)
- 'DP_WRITE' (DP write)
- 'LINE_RESET' (line reset sequence)
<pdata>:
- tuple of address, ack state, data for the given sequence
'''
swd_states = [
'IDLE', # Idle/unknown
'REQUEST', # Request phase (first 8 bits)
'ACK', # Ack phase (next 3 bits)
'READ', # Reading phase (next 32 bits for reads)
'WRITE', # Writing phase (next 32 bits for write)
'DPARITY', # Data parity phase
]
# Regexes for matching SWD data out of bitstring ('1' / '0' characters) format
RE_SWDSWITCH = re.compile(bin(0xE79E)[:1:-1] + '$')
RE_SWDREQ = re.compile(r'1(?P<apdp>.)(?P<rw>.)(?P<addr>..)(?P<parity>.)01$')
RE_IDLE = re.compile('0' * 50 + '$')
# Sample edges
RISING = 1
FALLING = 0
ADDR_DP_SELECT = 0x8
ADDR_DP_CTRLSTAT = 0x4
BIT_SELECT_CTRLSEL = 1
BIT_CTRLSTAT_ORUNDETECT = 1
ANNOTATIONS = ['reset', 'enable', 'read', 'write', 'ack', 'data', 'parity']
class Decoder(srd.Decoder):
api_version = 3
id = 'swd'
name = 'SWD'
longname = 'Serial Wire Debug'
desc = 'Two-wire protocol for debug access to ARM CPUs.'
license = 'gplv2+'
inputs = ['logic']
outputs = ['swd']
tags = ['Debug/trace']
channels = (
{'id': 'swclk', 'name': 'SWCLK', 'desc': 'Master clock'},
{'id': 'swdio', 'name': 'SWDIO', 'desc': 'Data input/output'},
)
options = (
{'id': 'strict_start',
'desc': 'Wait for a line reset before starting to decode',
'default': 'no', 'values': ('yes', 'no')},
)
annotations = (
('reset', 'RESET'),
('enable', 'ENABLE'),
('read', 'READ'),
('write', 'WRITE'),
('ack', 'ACK'),
('data', 'DATA'),
('parity', 'PARITY'),
)
def __init__(self):
self.reset()
def reset(self):
# SWD data/clock state
self.state = 'UNKNOWN'
self.sample_edge = RISING
self.ack = None # Ack state of the current phase
self.ss_req = 0 # Start sample of current req
self.turnaround = 0 # Number of turnaround edges to ignore before continuing
self.bits = '' # Bits from SWDIO are accumulated here, matched against expected sequences
self.samplenums = [] # Sample numbers that correspond to the samples in self.bits
self.linereset_count = 0
# SWD debug port state
self.data = None
self.addr = None
self.rw = None # Are we inside an SWD read or a write?
self.ctrlsel = 0 # 'ctrlsel' is bit 0 in the SELECT register.
self.orundetect = 0 # 'orundetect' is bit 0 in the CTRLSTAT register.
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
self.out_python = self.register(srd.OUTPUT_PYTHON)
if self.options['strict_start'] == 'no':
self.state = 'REQ' # No need to wait for a LINE RESET.
def putx(self, ann, length, data):
'''Output annotated data.'''
ann = ANNOTATIONS.index(ann)
try:
ss = self.samplenums[-length]
except IndexError:
ss = self.samplenums[0]
if self.state == 'REQ':
self.ss_req = ss
es = self.samplenum
self.put(ss, es, self.out_ann, [ann, [data]])
def putp(self, ptype, pdata):
self.put(self.ss_req, self.samplenum, self.out_python, [ptype, pdata])
def put_python_data(self):
'''Emit Python data item based on current SWD packet contents.'''
ptype = {
('AP', 'R'): 'AP_READ',
('AP', 'W'): 'AP_WRITE',
('DP', 'R'): 'DP_READ',
('DP', 'W'): 'DP_WRITE',
}[(self.apdp, self.rw)]
self.putp(ptype, (self.addr, self.data, self.ack))
def decode(self):
while True:
# Wait for any clock edge.
clk, dio = self.wait({0: 'e'})
# Count rising edges with DIO held high,
# as a line reset (50+ high edges) can happen from any state.
if clk == RISING:
if dio == 1:
self.linereset_count += 1
else:
if self.linereset_count >= 50:
self.putx('reset', self.linereset_count, 'LINERESET')
self.putp('LINE_RESET', None)
self.reset_state()
self.linereset_count = 0
# Otherwise, we only care about either rising or falling edges
# (depending on sample_edge, set according to current state).
if clk != self.sample_edge:
continue
# Turnaround bits get skipped.
if self.turnaround > 0:
self.turnaround -= 1
continue
self.bits += str(dio)
self.samplenums.append(self.samplenum)
{
'UNKNOWN': self.handle_unknown_edge,
'REQ': self.handle_req_edge,
'ACK': self.handle_ack_edge,
'DATA': self.handle_data_edge,
'DPARITY': self.handle_dparity_edge,
}[self.state]()
def next_state(self):
'''Step to the next SWD state, reset internal counters accordingly.'''
self.bits = ''
self.samplenums = []
self.linereset_count = 0
if self.state == 'UNKNOWN':
self.state = 'REQ'
self.sample_edge = RISING
self.turnaround = 0
elif self.state == 'REQ':
self.state = 'ACK'
self.sample_edge = FALLING
self.turnaround = 1
elif self.state == 'ACK':
self.state = 'DATA'
self.sample_edge = RISING if self.rw == 'W' else FALLING
self.turnaround = 0 if self.rw == 'R' else 2
elif self.state == 'DATA':
self.state = 'DPARITY'
elif self.state == 'DPARITY':
self.put_python_data()
self.state = 'REQ'
self.sample_edge = RISING
self.turnaround = 1 if self.rw == 'R' else 0
def reset_state(self):
'''Line reset (or equivalent), wait for a new pending SWD request.'''
if self.state != 'REQ': # Emit a Python data item.
self.put_python_data()
# Clear state.
self.bits = ''
self.samplenums = []
self.linereset_count = 0
self.turnaround = 0
self.sample_edge = RISING
self.data = ''
self.ack = None
self.state = 'REQ'
def handle_unknown_edge(self):
'''
Clock edge in the UNKNOWN state.
In the unknown state, clock edges get ignored until we see a line
reset (which is detected in the decode method, not here.)
'''
pass
def handle_req_edge(self):
'''Clock edge in the REQ state (waiting for SWD r/w request).'''
# Check for a JTAG->SWD enable sequence.
m = re.search(RE_SWDSWITCH, self.bits)
if m is not None:
self.putx('enable', 16, 'JTAG->SWD')
self.reset_state()
return
# Or a valid SWD Request packet.
m = re.search(RE_SWDREQ, self.bits)
if m is not None:
calc_parity = sum([int(x) for x in m.group('rw') + m.group('apdp') + m.group('addr')]) % 2
parity = '' if str(calc_parity) == m.group('parity') else 'E'
self.rw = 'R' if m.group('rw') == '1' else 'W'
self.apdp = 'AP' if m.group('apdp') == '1' else 'DP'
self.addr = int(m.group('addr')[::-1], 2) << 2
self.putx('read' if self.rw == 'R' else 'write', 8, self.get_address_description())
self.next_state()
return
def handle_ack_edge(self):
'''Clock edge in the ACK state (waiting for complete ACK sequence).'''
if len(self.bits) < 3:
return
if self.bits == '100':
self.putx('ack', 3, 'OK')
self.ack = 'OK'
self.next_state()
elif self.bits == '001':
self.putx('ack', 3, 'FAULT')
self.ack = 'FAULT'
if self.orundetect == 1:
self.next_state()
else:
self.reset_state()
self.turnaround = 1
elif self.bits == '010':
self.putx('ack', 3, 'WAIT')
self.ack = 'WAIT'
if self.orundetect == 1:
self.next_state()
else:
self.reset_state()
self.turnaround = 1
elif self.bits == '111':
self.putx('ack', 3, 'NOREPLY')
self.ack = 'NOREPLY'
self.reset_state()
else:
self.putx('ack', 3, 'ERROR')
self.ack = 'ERROR'
self.reset_state()
def handle_data_edge(self):
'''Clock edge in the DATA state (waiting for 32 bits to clock past).'''
if len(self.bits) < 32:
return
self.data = 0
self.dparity = 0
for x in range(32):
if self.bits[x] == '1':
self.data += (1 << x)
self.dparity += 1
self.dparity = self.dparity % 2
self.putx('data', 32, '0x%08x' % self.data)
self.next_state()
def handle_dparity_edge(self):
'''Clock edge in the DPARITY state (clocking in parity bit).'''
if str(self.dparity) != self.bits:
self.putx('parity', 1, str(self.dparity) + self.bits) # PARITY ERROR
elif self.rw == 'W':
self.handle_completed_write()
self.next_state()
def handle_completed_write(self):
'''
Update internal state of the debug port based on a completed
write operation.
'''
if self.apdp != 'DP':
return
elif self.addr == ADDR_DP_SELECT:
self.ctrlsel = self.data & BIT_SELECT_CTRLSEL
elif self.addr == ADDR_DP_CTRLSTAT and self.ctrlsel == 0:
self.orundetect = self.data & BIT_CTRLSTAT_ORUNDETECT
def get_address_description(self):
'''
Return a human-readable description of the currently selected address,
for annotated results.
'''
if self.apdp == 'DP':
if self.rw == 'R':
# Tables 2-4 & 2-5 in ADIv5.2 spec ARM document IHI 0031C
return {
0: 'IDCODE',
0x4: 'R CTRL/STAT' if self.ctrlsel == 0 else 'R DLCR',
0x8: 'RESEND',
0xC: 'RDBUFF'
}[self.addr]
elif self.rw == 'W':
# Tables 2-4 & 2-5 in ADIv5.2 spec ARM document IHI 0031C
return {
0: 'W ABORT',
0x4: 'W CTRL/STAT' if self.ctrlsel == 0 else 'W DLCR',
0x8: 'W SELECT',
0xC: 'W RESERVED'
}[self.addr]
elif self.apdp == 'AP':
if self.rw == 'R':
return 'R AP%x' % self.addr
elif self.rw == 'W':
return 'W AP%x' % self.addr
# Any legitimate operations shouldn't fall through to here, probably
# a decoder bug.
return '? %s%s%x' % (self.rw, self.apdp, self.addr)
|
sigrokproject/libsigrokdecode
|
decoders/swd/pd.py
|
Python
|
gpl-3.0
| 12,080
|
#!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr, gr_unittest, blocks
import pmt
import numpy
import time
# Simple block to generate messages
class message_generator(gr.sync_block):
def __init__(self, msg_list, msg_interval):
gr.sync_block.__init__(
self,
name="message generator",
in_sig=[numpy.float32],
out_sig=None
)
self.msg_list = msg_list
self.msg_interval = msg_interval
self.msg_ctr = 0
self.message_port_register_out(pmt.intern('out_port'))
def work(self, input_items, output_items):
inLen = len(input_items[0])
while self.msg_ctr < len(self.msg_list) and \
(self.msg_ctr * self.msg_interval) < \
(self.nitems_read(0) + inLen):
self.message_port_pub(pmt.intern('out_port'),
self.msg_list[self.msg_ctr])
self.msg_ctr += 1
return inLen
# Simple block to consume messages
class message_consumer(gr.sync_block):
def __init__(self):
gr.sync_block.__init__(
self,
name="message consumer",
in_sig=None,
out_sig=None
)
self.msg_list = []
self.message_port_register_in(pmt.intern('in_port'))
self.set_msg_handler(pmt.intern('in_port'),
self.handle_msg)
def handle_msg(self, msg):
# Create a new PMT from long value and put in list
self.msg_list.append(pmt.from_long(pmt.to_long(msg)))
class test_python_message_passing(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_000(self):
num_msgs = 10
msg_interval = 1000
msg_list = []
for i in range(num_msgs):
msg_list.append(pmt.from_long(i))
# Create vector source with dummy data to trigger messages
src_data = []
for i in range(num_msgs * msg_interval):
src_data.append(float(i))
src = blocks.vector_source_f(src_data, False)
msg_gen = message_generator(msg_list, msg_interval)
msg_cons = message_consumer()
# Connect vector source to message gen
self.tb.connect(src, msg_gen)
# Connect message generator to message consumer
self.tb.msg_connect(msg_gen, 'out_port', msg_cons, 'in_port')
# Verify that the messgae port query functions work
self.assertEqual(
pmt.to_python(
msg_gen.message_ports_out())[0],
'out_port')
self.assertEqual(
'in_port' in pmt.to_python(
msg_cons.message_ports_in()), True)
# Run to verify message passing
self.tb.run()
# Verify that the message consumer got all the messages
self.assertEqual(num_msgs, len(msg_cons.msg_list))
for i in range(num_msgs):
self.assertTrue(pmt.equal(msg_list[i], msg_cons.msg_list[i]))
if __name__ == '__main__':
gr_unittest.run(test_python_message_passing)
|
dl1ksv/gnuradio
|
gr-blocks/python/blocks/qa_python_message_passing.py
|
Python
|
gpl-3.0
| 3,246
|
try:
import unittest2 as unittest
except:
import unittest
from Orange.testing import testing
from Orange.regression import pls
@testing.datasets_driven(datasets=testing.REGRESSION_DATASETS)
class TestPLS(testing.LearnerTestCase):
LEARNER = pls.PLSRegressionLearner
# TODO: Test the PLS by passing x_vars, y_vars
|
qPCR4vir/orange
|
Orange/testing/unit/tests/test_pls.py
|
Python
|
gpl-3.0
| 330
|
# Generated by Django 1.11.20 on 2019-06-05 13:59
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import simple_history.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('grades', '0014_persistentsubsectiongradeoverridehistory'),
]
operations = [
migrations.CreateModel(
name='HistoricalPersistentSubsectionGradeOverride',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created', models.DateTimeField(blank=True, db_index=True, editable=False)),
('modified', models.DateTimeField(blank=True, db_index=True, editable=False)),
('earned_all_override', models.FloatField(blank=True, null=True)),
('possible_all_override', models.FloatField(blank=True, null=True)),
('earned_graded_override', models.FloatField(blank=True, null=True)),
('possible_graded_override', models.FloatField(blank=True, null=True)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('grade', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='grades.PersistentSubsectionGrade')),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
'verbose_name': 'historical persistent subsection grade override',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
]
|
edx/edx-platform
|
lms/djangoapps/grades/migrations/0015_historicalpersistentsubsectiongradeoverride.py
|
Python
|
agpl-3.0
| 2,222
|
import copy
from datetime import datetime
from fs.errors import ResourceNotFoundError
import logging
from lxml import etree
import os
from path import Path as path
from pkg_resources import resource_string
import re
import sys
import textwrap
import dogstats_wrapper as dog_stats_api
from xmodule.util.misc import escape_html_characters
from xmodule.contentstore.content import StaticContent
from xmodule.editing_module import EditingDescriptor
from xmodule.edxnotes_utils import edxnotes
from xmodule.html_checker import check_html
from xmodule.stringify import stringify_children
from xmodule.x_module import XModule, DEPRECATION_VSCOMPAT_EVENT
from xmodule.xml_module import XmlDescriptor, name_to_pathname
from xblock.core import XBlock
from xblock.fields import Scope, String, Boolean, List
from xblock.fragment import Fragment
log = logging.getLogger("edx.courseware")
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
class HtmlBlock(object):
"""
This will eventually subclass XBlock and merge HtmlModule and HtmlDescriptor
into one. For now, it's a place to put the pieces that are already sharable
between the two (field information and XBlock handlers).
"""
display_name = String(
display_name=_("Display Name"),
help=_("This name appears in the horizontal navigation at the top of the page."),
scope=Scope.settings,
# it'd be nice to have a useful default but it screws up other things; so,
# use display_name_with_default for those
default=_("Text")
)
data = String(help=_("Html contents to display for this module"), default=u"", scope=Scope.content)
source_code = String(
help=_("Source code for LaTeX documents. This feature is not well-supported."),
scope=Scope.settings
)
use_latex_compiler = Boolean(
help=_("Enable LaTeX templates?"),
default=False,
scope=Scope.settings
)
editor = String(
help=_(
"Select Visual to enter content and have the editor automatically create the HTML. Select Raw to edit "
"HTML directly. If you change this setting, you must save the component and then re-open it for editing."
),
display_name=_("Editor"),
default="visual",
values=[
{"display_name": _("Visual"), "value": "visual"},
{"display_name": _("Raw"), "value": "raw"}
],
scope=Scope.settings
)
@XBlock.supports("multi_device")
def student_view(self, _context):
"""
Return a fragment that contains the html for the student view
"""
return Fragment(self.get_html())
def get_html(self):
""" Returns html required for rendering XModule. """
# When we switch this to an XBlock, we can merge this with student_view,
# but for now the XModule mixin requires that this method be defined.
# pylint: disable=no-member
if self.system.anonymous_student_id:
return self.data.replace("%%USER_ID%%", self.system.anonymous_student_id)
return self.data
class HtmlModuleMixin(HtmlBlock, XModule):
"""
Attributes and methods used by HtmlModules internally.
"""
js = {
'coffee': [
resource_string(__name__, 'js/src/html/display.coffee'),
],
'js': [
resource_string(__name__, 'js/src/javascript_loader.js'),
resource_string(__name__, 'js/src/collapsible.js'),
resource_string(__name__, 'js/src/html/imageModal.js'),
resource_string(__name__, 'js/common_static/js/vendor/draggabilly.js'),
]
}
js_module_name = "HTMLModule"
css = {'scss': [resource_string(__name__, 'css/html/display.scss')]}
@edxnotes
class HtmlModule(HtmlModuleMixin):
"""
Module for putting raw html in a course
"""
class HtmlDescriptor(HtmlBlock, XmlDescriptor, EditingDescriptor): # pylint: disable=abstract-method
"""
Module for putting raw html in a course
"""
mako_template = "widgets/html-edit.html"
module_class = HtmlModule
resources_dir = None
filename_extension = "xml"
template_dir_name = "html"
show_in_read_only_mode = True
js = {'coffee': [resource_string(__name__, 'js/src/html/edit.coffee')]}
js_module_name = "HTMLEditingDescriptor"
css = {'scss': [resource_string(__name__, 'css/editor/edit.scss'), resource_string(__name__, 'css/html/edit.scss')]}
# VS[compat] TODO (cpennington): Delete this method once all fall 2012 course
# are being edited in the cms
@classmethod
def backcompat_paths(cls, filepath):
"""
Get paths for html and xml files.
"""
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=["location:html_descriptor_backcompat_paths"]
)
if filepath.endswith('.html.xml'):
filepath = filepath[:-9] + '.html' # backcompat--look for html instead of xml
if filepath.endswith('.html.html'):
filepath = filepath[:-5] # some people like to include .html in filenames..
candidates = []
while os.sep in filepath:
candidates.append(filepath)
_, _, filepath = filepath.partition(os.sep)
# also look for .html versions instead of .xml
new_candidates = []
for candidate in candidates:
if candidate.endswith('.xml'):
new_candidates.append(candidate[:-4] + '.html')
return candidates + new_candidates
@classmethod
def filter_templates(cls, template, course):
"""
Filter template that contains 'latex' from templates.
Show them only if use_latex_compiler is set to True in
course settings.
"""
return 'latex' not in template['template_id'] or course.use_latex_compiler
def get_context(self):
"""
an override to add in specific rendering context, in this case we need to
add in a base path to our c4x content addressing scheme
"""
_context = EditingDescriptor.get_context(self)
# Add some specific HTML rendering context when editing HTML modules where we pass
# the root /c4x/ url for assets. This allows client-side substitutions to occur.
_context.update({
'base_asset_url': StaticContent.get_base_url_path_for_course_assets(self.location.course_key),
'enable_latex_compiler': self.use_latex_compiler,
'editor': self.editor
})
return _context
# NOTE: html descriptors are special. We do not want to parse and
# export them ourselves, because that can break things (e.g. lxml
# adds body tags when it exports, but they should just be html
# snippets that will be included in the middle of pages.
@classmethod
def load_definition(cls, xml_object, system, location, id_generator):
'''Load a descriptor from the specified xml_object:
If there is a filename attribute, load it as a string, and
log a warning if it is not parseable by etree.HTMLParser.
If there is not a filename attribute, the definition is the body
of the xml_object, without the root tag (do not want <html> in the
middle of a page)
Args:
xml_object: an lxml.etree._Element containing the definition to load
system: the modulestore system or runtime which caches data
location: the usage id for the block--used to compute the filename if none in the xml_object
id_generator: used by other impls of this method to generate the usage_id
'''
filename = xml_object.get('filename')
if filename is None:
definition_xml = copy.deepcopy(xml_object)
cls.clean_metadata_from_xml(definition_xml)
return {'data': stringify_children(definition_xml)}, []
else:
# html is special. cls.filename_extension is 'xml', but
# if 'filename' is in the definition, that means to load
# from .html
# 'filename' in html pointers is a relative path
# (not same as 'html/blah.html' when the pointer is in a directory itself)
pointer_path = "{category}/{url_path}".format(
category='html',
url_path=name_to_pathname(location.name)
)
base = path(pointer_path).dirname()
# log.debug("base = {0}, base.dirname={1}, filename={2}".format(base, base.dirname(), filename))
filepath = "{base}/{name}.html".format(base=base, name=filename)
# log.debug("looking for html file for {0} at {1}".format(location, filepath))
# VS[compat]
# TODO (cpennington): If the file doesn't exist at the right path,
# give the class a chance to fix it up. The file will be written out
# again in the correct format. This should go away once the CMS is
# online and has imported all current (fall 2012) courses from xml
if not system.resources_fs.exists(filepath):
dog_stats_api.increment(
DEPRECATION_VSCOMPAT_EVENT,
tags=["location:html_descriptor_load_definition"]
)
candidates = cls.backcompat_paths(filepath)
# log.debug("candidates = {0}".format(candidates))
for candidate in candidates:
if system.resources_fs.exists(candidate):
filepath = candidate
break
try:
with system.resources_fs.open(filepath) as infile:
html = infile.read().decode('utf-8')
# Log a warning if we can't parse the file, but don't error
if not check_html(html) and len(html) > 0:
msg = "Couldn't parse html in {0}, content = {1}".format(filepath, html)
log.warning(msg)
system.error_tracker("Warning: " + msg)
definition = {'data': html}
# TODO (ichuang): remove this after migration
# for Fall 2012 LMS migration: keep filename (and unmangled filename)
definition['filename'] = [filepath, filename]
return definition, []
except (ResourceNotFoundError) as err:
msg = 'Unable to load file contents at path {0}: {1} '.format(
filepath, err)
# add more info and re-raise
raise Exception(msg), None, sys.exc_info()[2]
# TODO (vshnayder): make export put things in the right places.
def definition_to_xml(self, resource_fs):
''' Write <html filename="" [meta-attrs="..."]> to filename.xml, and the html
string to filename.html.
'''
# Write html to file, return an empty tag
pathname = name_to_pathname(self.url_name)
filepath = u'{category}/{pathname}.html'.format(
category=self.category,
pathname=pathname
)
resource_fs.makedir(os.path.dirname(filepath), recursive=True, allow_recreate=True)
with resource_fs.open(filepath, 'w') as filestream:
html_data = self.data.encode('utf-8')
filestream.write(html_data)
# write out the relative name
relname = path(pathname).basename()
elt = etree.Element('html')
elt.set("filename", relname)
return elt
@property
def non_editable_metadata_fields(self):
"""
`use_latex_compiler` should not be editable in the Studio settings editor.
"""
non_editable_fields = super(HtmlDescriptor, self).non_editable_metadata_fields
non_editable_fields.append(HtmlDescriptor.use_latex_compiler)
return non_editable_fields
def index_dictionary(self):
xblock_body = super(HtmlDescriptor, self).index_dictionary()
# Removing script and style
html_content = re.sub(
re.compile(
r"""
<script>.*?</script> |
<style>.*?</style>
""",
re.DOTALL |
re.VERBOSE),
"",
self.data
)
html_content = escape_html_characters(html_content)
html_body = {
"html_content": html_content,
"display_name": self.display_name,
}
if "content" in xblock_body:
xblock_body["content"].update(html_body)
else:
xblock_body["content"] = html_body
xblock_body["content_type"] = "Text"
return xblock_body
class AboutFields(object):
display_name = String(
help=_("Display name for this module"),
scope=Scope.settings,
default="overview",
)
data = String(
help=_("Html contents to display for this module"),
default=u"",
scope=Scope.content
)
@XBlock.tag("detached")
class AboutModule(AboutFields, HtmlModuleMixin):
"""
Overriding defaults but otherwise treated as HtmlModule.
"""
pass
@XBlock.tag("detached")
class AboutDescriptor(AboutFields, HtmlDescriptor):
"""
These pieces of course content are treated as HtmlModules but we need to overload where the templates are located
in order to be able to create new ones
"""
template_dir_name = "about"
module_class = AboutModule
class StaticTabFields(object):
"""
The overrides for Static Tabs
"""
display_name = String(
display_name=_("Display Name"),
help=_("This name appears in the horizontal navigation at the top of the page."),
scope=Scope.settings,
default="Empty",
)
course_staff_only = Boolean(
display_name=_("Hide Page From Learners"),
help=_("If you select this option, only course team members with"
" the Staff or Admin role see this page."),
default=False,
scope=Scope.settings
)
data = String(
default=textwrap.dedent(u"""\
<p>Add the content you want students to see on this page.</p>
"""),
scope=Scope.content,
help=_("HTML for the additional pages")
)
@XBlock.tag("detached")
class StaticTabModule(StaticTabFields, HtmlModuleMixin):
"""
Supports the field overrides
"""
pass
@XBlock.tag("detached")
class StaticTabDescriptor(StaticTabFields, HtmlDescriptor):
"""
These pieces of course content are treated as HtmlModules but we need to overload where the templates are located
in order to be able to create new ones
"""
template_dir_name = None
module_class = StaticTabModule
class CourseInfoFields(object):
"""
Field overrides
"""
items = List(
help=_("List of course update items"),
default=[],
scope=Scope.content
)
data = String(
help=_("Html contents to display for this module"),
default=u"<ol></ol>",
scope=Scope.content
)
@XBlock.tag("detached")
class CourseInfoModule(CourseInfoFields, HtmlModuleMixin):
"""
Just to support xblock field overrides
"""
# statuses
STATUS_VISIBLE = 'visible'
STATUS_DELETED = 'deleted'
TEMPLATE_DIR = 'courseware'
@XBlock.supports("multi_device")
def student_view(self, _context):
"""
Return a fragment that contains the html for the student view
"""
return Fragment(self.get_html())
def get_html(self):
""" Returns html required for rendering XModule. """
# When we switch this to an XBlock, we can merge this with student_view,
# but for now the XModule mixin requires that this method be defined.
# pylint: disable=no-member
if self.data != "":
if self.system.anonymous_student_id:
return self.data.replace("%%USER_ID%%", self.system.anonymous_student_id)
return self.data
else:
course_updates = [item for item in self.items if item.get('status') == self.STATUS_VISIBLE]
course_updates.sort(
key=lambda item: (CourseInfoModule.safe_parse_date(item['date']), item['id']),
reverse=True
)
context = {
'visible_updates': course_updates[:3],
'hidden_updates': course_updates[3:],
}
return self.system.render_template("{0}/course_updates.html".format(self.TEMPLATE_DIR), context)
@staticmethod
def safe_parse_date(date):
"""
Since this is used solely for ordering purposes, use today's date as a default
"""
try:
return datetime.strptime(date, '%B %d, %Y')
except ValueError: # occurs for ill-formatted date values
return datetime.today()
@XBlock.tag("detached")
class CourseInfoDescriptor(CourseInfoFields, HtmlDescriptor):
"""
These pieces of course content are treated as HtmlModules but we need to overload where the templates are located
in order to be able to create new ones
"""
template_dir_name = None
module_class = CourseInfoModule
|
TheMOOCAgency/edx-platform
|
common/lib/xmodule/xmodule/html_module.py
|
Python
|
agpl-3.0
| 17,460
|
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
ECDSA keys
"""
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.asymmetric.utils import (
decode_dss_signature,
encode_dss_signature,
)
from paramiko.common import four_byte
from paramiko.message import Message
from paramiko.pkey import PKey
from paramiko.ssh_exception import SSHException
from paramiko.util import deflate_long
class _ECDSACurve(object):
"""
Represents a specific ECDSA Curve (nistp256, nistp384, etc).
Handles the generation of the key format identifier and the selection of
the proper hash function. Also grabs the proper curve from the 'ecdsa'
package.
"""
def __init__(self, curve_class, nist_name):
self.nist_name = nist_name
self.key_length = curve_class.key_size
# Defined in RFC 5656 6.2
self.key_format_identifier = "ecdsa-sha2-" + self.nist_name
# Defined in RFC 5656 6.2.1
if self.key_length <= 256:
self.hash_object = hashes.SHA256
elif self.key_length <= 384:
self.hash_object = hashes.SHA384
else:
self.hash_object = hashes.SHA512
self.curve_class = curve_class
class _ECDSACurveSet(object):
"""
A collection to hold the ECDSA curves. Allows querying by oid and by key
format identifier. The two ways in which ECDSAKey needs to be able to look
up curves.
"""
def __init__(self, ecdsa_curves):
self.ecdsa_curves = ecdsa_curves
def get_key_format_identifier_list(self):
return [curve.key_format_identifier for curve in self.ecdsa_curves]
def get_by_curve_class(self, curve_class):
for curve in self.ecdsa_curves:
if curve.curve_class == curve_class:
return curve
def get_by_key_format_identifier(self, key_format_identifier):
for curve in self.ecdsa_curves:
if curve.key_format_identifier == key_format_identifier:
return curve
def get_by_key_length(self, key_length):
for curve in self.ecdsa_curves:
if curve.key_length == key_length:
return curve
class ECDSAKey(PKey):
"""
Representation of an ECDSA key which can be used to sign and verify SSH2
data.
"""
_ECDSA_CURVES = _ECDSACurveSet(
[
_ECDSACurve(ec.SECP256R1, "nistp256"),
_ECDSACurve(ec.SECP384R1, "nistp384"),
_ECDSACurve(ec.SECP521R1, "nistp521"),
]
)
def __init__(
self,
msg=None,
data=None,
filename=None,
password=None,
vals=None,
file_obj=None,
validate_point=True,
):
self.verifying_key = None
self.signing_key = None
self.public_blob = None
if file_obj is not None:
self._from_private_key(file_obj, password)
return
if filename is not None:
self._from_private_key_file(filename, password)
return
if (msg is None) and (data is not None):
msg = Message(data)
if vals is not None:
self.signing_key, self.verifying_key = vals
c_class = self.signing_key.curve.__class__
self.ecdsa_curve = self._ECDSA_CURVES.get_by_curve_class(c_class)
else:
# Must set ecdsa_curve first; subroutines called herein may need to
# spit out our get_name(), which relies on this.
key_type = msg.get_text()
# But this also means we need to hand it a real key/curve
# identifier, so strip out any cert business. (NOTE: could push
# that into _ECDSACurveSet.get_by_key_format_identifier(), but it
# feels more correct to do it here?)
suffix = "-cert-v01@openssh.com"
if key_type.endswith(suffix):
key_type = key_type[: -len(suffix)]
self.ecdsa_curve = self._ECDSA_CURVES.get_by_key_format_identifier(
key_type
)
key_types = self._ECDSA_CURVES.get_key_format_identifier_list()
cert_types = [
"{}-cert-v01@openssh.com".format(x) for x in key_types
]
self._check_type_and_load_cert(
msg=msg, key_type=key_types, cert_type=cert_types
)
curvename = msg.get_text()
if curvename != self.ecdsa_curve.nist_name:
raise SSHException(
"Can't handle curve of type {}".format(curvename)
)
pointinfo = msg.get_binary()
try:
numbers = ec.EllipticCurvePublicNumbers.from_encoded_point(
self.ecdsa_curve.curve_class(), pointinfo
)
except ValueError:
raise SSHException("Invalid public key")
self.verifying_key = numbers.public_key(backend=default_backend())
@classmethod
def supported_key_format_identifiers(cls):
return cls._ECDSA_CURVES.get_key_format_identifier_list()
def asbytes(self):
key = self.verifying_key
m = Message()
m.add_string(self.ecdsa_curve.key_format_identifier)
m.add_string(self.ecdsa_curve.nist_name)
numbers = key.public_numbers()
key_size_bytes = (key.curve.key_size + 7) // 8
x_bytes = deflate_long(numbers.x, add_sign_padding=False)
x_bytes = b"\x00" * (key_size_bytes - len(x_bytes)) + x_bytes
y_bytes = deflate_long(numbers.y, add_sign_padding=False)
y_bytes = b"\x00" * (key_size_bytes - len(y_bytes)) + y_bytes
point_str = four_byte + x_bytes + y_bytes
m.add_string(point_str)
return m.asbytes()
def __str__(self):
return self.asbytes()
def __hash__(self):
return hash(
(
self.get_name(),
self.verifying_key.public_numbers().x,
self.verifying_key.public_numbers().y,
)
)
def get_name(self):
return self.ecdsa_curve.key_format_identifier
def get_bits(self):
return self.ecdsa_curve.key_length
def can_sign(self):
return self.signing_key is not None
def sign_ssh_data(self, data):
ecdsa = ec.ECDSA(self.ecdsa_curve.hash_object())
sig = self.signing_key.sign(data, ecdsa)
r, s = decode_dss_signature(sig)
m = Message()
m.add_string(self.ecdsa_curve.key_format_identifier)
m.add_string(self._sigencode(r, s))
return m
def verify_ssh_sig(self, data, msg):
if msg.get_text() != self.ecdsa_curve.key_format_identifier:
return False
sig = msg.get_binary()
sigR, sigS = self._sigdecode(sig)
signature = encode_dss_signature(sigR, sigS)
try:
self.verifying_key.verify(
signature, data, ec.ECDSA(self.ecdsa_curve.hash_object())
)
except InvalidSignature:
return False
else:
return True
def write_private_key_file(self, filename, password=None):
self._write_private_key_file(
filename,
self.signing_key,
serialization.PrivateFormat.TraditionalOpenSSL,
password=password,
)
def write_private_key(self, file_obj, password=None):
self._write_private_key(
file_obj,
self.signing_key,
serialization.PrivateFormat.TraditionalOpenSSL,
password=password,
)
@classmethod
def generate(cls, curve=ec.SECP256R1(), progress_func=None, bits=None):
"""
Generate a new private ECDSA key. This factory function can be used to
generate a new host key or authentication key.
:param progress_func: Not used for this type of key.
:returns: A new private key (`.ECDSAKey`) object
"""
if bits is not None:
curve = cls._ECDSA_CURVES.get_by_key_length(bits)
if curve is None:
raise ValueError("Unsupported key length: {:d}".format(bits))
curve = curve.curve_class()
private_key = ec.generate_private_key(curve, backend=default_backend())
return ECDSAKey(vals=(private_key, private_key.public_key()))
# ...internals...
def _from_private_key_file(self, filename, password):
data = self._read_private_key_file("EC", filename, password)
self._decode_key(data)
def _from_private_key(self, file_obj, password):
data = self._read_private_key("EC", file_obj, password)
self._decode_key(data)
def _decode_key(self, data):
try:
key = serialization.load_der_private_key(
data, password=None, backend=default_backend()
)
except (ValueError, AssertionError) as e:
raise SSHException(str(e))
self.signing_key = key
self.verifying_key = key.public_key()
curve_class = key.curve.__class__
self.ecdsa_curve = self._ECDSA_CURVES.get_by_curve_class(curve_class)
def _sigencode(self, r, s):
msg = Message()
msg.add_mpint(r)
msg.add_mpint(s)
return msg.asbytes()
def _sigdecode(self, sig):
msg = Message(sig)
r = msg.get_mpint()
s = msg.get_mpint()
return r, s
|
mirrorcoder/paramiko
|
paramiko/ecdsakey.py
|
Python
|
lgpl-2.1
| 10,385
|
# This file is part of the MapProxy project.
# Copyright (C) 2011 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import codecs
import datetime
import json
import socket
import time
import hashlib
import base64
from mapproxy.image import ImageSource
from mapproxy.cache.base import (
TileCacheBase,
tile_buffer, CacheBackendError,)
from mapproxy.source import SourceError
from mapproxy.srs import SRS
from mapproxy.compat import string_type, iteritems, BytesIO
from threading import Lock
try:
import requests
except ImportError:
requests = None
import logging
log = logging.getLogger(__name__)
class UnexpectedResponse(CacheBackendError):
pass
class CouchDBCache(TileCacheBase):
def __init__(self, url, db_name,
file_ext, tile_grid, md_template=None,
tile_id_template=None):
if requests is None:
raise ImportError("CouchDB backend requires 'requests' package.")
self.lock_cache_id = 'couchdb-' + hashlib.md5((url + db_name).encode('utf-8')).hexdigest()
self.file_ext = file_ext
self.tile_grid = tile_grid
self.md_template = md_template
self.couch_url = '%s/%s' % (url.rstrip('/'), db_name.lower())
self.req_session = requests.Session()
self.req_session.timeout = 5
self.db_initialised = False
self.app_init_db_lock = Lock()
self.tile_id_template = tile_id_template
def init_db(self):
with self.app_init_db_lock:
if self.db_initialised:
return
try:
self.req_session.put(self.couch_url)
self.db_initialised = True
except requests.exceptions.RequestException as ex:
log.warning('unable to initialize CouchDB: %s', ex)
def tile_url(self, coord):
return self.document_url(coord) + '/tile'
def document_url(self, coord, relative=False):
x, y, z = coord
grid_name = self.tile_grid.name
couch_url = self.couch_url
if relative:
if self.tile_id_template:
if self.tile_id_template.startswith('%(couch_url)s/'):
tile_id_template = self.tile_id_template[len('%(couch_url)s/'):]
else:
tile_id_template = self.tile_id_template
return tile_id_template % locals()
else:
return '%(grid_name)s-%(z)s-%(x)s-%(y)s' % locals()
else:
if self.tile_id_template:
return self.tile_id_template % locals()
else:
return '%(couch_url)s/%(grid_name)s-%(z)s-%(x)s-%(y)s' % locals()
def is_cached(self, tile):
if tile.coord is None or tile.source:
return True
url = self.document_url(tile.coord)
try:
self.init_db()
resp = self.req_session.get(url)
if resp.status_code == 200:
doc = json.loads(codecs.decode(resp.content, 'utf-8'))
tile.timestamp = doc.get(self.md_template.timestamp_key)
return True
except (requests.exceptions.RequestException, socket.error) as ex:
# is_cached should not fail (would abort seeding for example),
# so we catch these errors here and just return False
log.warning('error while requesting %s: %s', url, ex)
return False
if resp.status_code == 404:
return False
raise SourceError('%r: %r' % (resp.status_code, resp.content))
def _tile_doc(self, tile):
tile_id = self.document_url(tile.coord, relative=True)
if self.md_template:
tile_doc = self.md_template.doc(tile, self.tile_grid)
else:
tile_doc = {}
tile_doc['_id'] = tile_id
with tile_buffer(tile) as buf:
data = buf.read()
tile_doc['_attachments'] = {
'tile': {
'content_type': 'image/' + self.file_ext,
'data': codecs.decode(
base64.b64encode(data).replace(b'\n', b''),
'ascii',
),
}
}
return tile_id, tile_doc
def _store_bulk(self, tiles):
tile_docs = {}
for tile in tiles:
tile_id, tile_doc = self._tile_doc(tile)
tile_docs[tile_id] = tile_doc
duplicate_tiles = self._post_bulk(tile_docs)
if duplicate_tiles:
self._fill_rev_ids(duplicate_tiles)
self._post_bulk(duplicate_tiles, no_conflicts=True)
return True
def _post_bulk(self, tile_docs, no_conflicts=False):
"""
POST multiple tiles, returns all tile docs with conflicts during POST.
"""
doc = {'docs': list(tile_docs.values())}
data = json.dumps(doc)
self.init_db()
resp = self.req_session.post(self.couch_url + '/_bulk_docs', data=data, headers={'Content-type': 'application/json'})
if resp.status_code != 201:
raise UnexpectedResponse('got unexpected resp (%d) from CouchDB: %s' % (resp.status_code, resp.content))
resp_doc = json.loads(codecs.decode(resp.content, 'utf-8'))
duplicate_tiles = {}
for tile in resp_doc:
if tile.get('error', 'false') == 'conflict':
duplicate_tiles[tile['id']] = tile_docs[tile['id']]
if no_conflicts and duplicate_tiles:
raise UnexpectedResponse('got unexpected resp (%d) from CouchDB: %s' % (resp.status_code, resp.content))
return duplicate_tiles
def _fill_rev_ids(self, tile_docs):
"""
Request all revs for tile_docs and insert it into the tile_docs.
"""
keys_doc = {'keys': list(tile_docs.keys())}
data = json.dumps(keys_doc)
self.init_db()
resp = self.req_session.post(self.couch_url + '/_all_docs', data=data, headers={'Content-type': 'application/json'})
if resp.status_code != 200:
raise UnexpectedResponse('got unexpected resp (%d) from CouchDB: %s' % (resp.status_code, resp.content))
resp_doc = json.loads(codecs.decode(resp.content, 'utf-8'))
for tile in resp_doc['rows']:
tile_docs[tile['id']]['_rev'] = tile['value']['rev']
def store_tile(self, tile):
if tile.stored:
return True
return self._store_bulk([tile])
def store_tiles(self, tiles):
tiles = [t for t in tiles if not t.stored]
return self._store_bulk(tiles)
def load_tile_metadata(self, tile):
if tile.timestamp:
return
# is_cached loads metadata
self.is_cached(tile)
def load_tile(self, tile, with_metadata=False):
# bulk loading with load_tiles is not implemented, because
# CouchDB's /all_docs? does not include attachments
if tile.source or tile.coord is None:
return True
url = self.document_url(tile.coord) + '?attachments=true'
self.init_db()
resp = self.req_session.get(url, headers={'Accept': 'application/json'})
if resp.status_code == 200:
doc = json.loads(codecs.decode(resp.content, 'utf-8'))
tile_data = BytesIO(base64.b64decode(doc['_attachments']['tile']['data']))
tile.source = ImageSource(tile_data)
tile.timestamp = doc.get(self.md_template.timestamp_key)
return True
return False
def remove_tile(self, tile):
if tile.coord is None:
return True
url = self.document_url(tile.coord)
resp = requests.head(url)
if resp.status_code == 404:
# already removed
return True
rev_id = resp.headers['etag']
url += '?rev=' + rev_id.strip('"')
self.init_db()
resp = self.req_session.delete(url)
if resp.status_code == 200:
return True
return False
def utc_now_isoformat():
now = datetime.datetime.utcnow()
now = now.isoformat()
# remove milliseconds, add Zulu timezone
now = now.rsplit('.', 1)[0] + 'Z'
return now
class CouchDBMDTemplate(object):
def __init__(self, attributes):
self.attributes = attributes
for key, value in iteritems(attributes):
if value == '{{timestamp}}':
self.timestamp_key = key
break
else:
attributes['timestamp'] = '{{timestamp}}'
self.timestamp_key = 'timestamp'
def doc(self, tile, grid):
doc = {}
x, y, z = tile.coord
for key, value in iteritems(self.attributes):
if not isinstance(value, string_type) or not value.startswith('{{'):
doc[key] = value
continue
if value == '{{timestamp}}':
doc[key] = time.time()
elif value == '{{x}}':
doc[key] = x
elif value == '{{y}}':
doc[key] = y
elif value in ('{{z}}', '{{level}}'):
doc[key] = z
elif value == '{{utc_iso}}':
doc[key] = utc_now_isoformat()
elif value == '{{wgs_tile_centroid}}':
tile_bbox = grid.tile_bbox(tile.coord)
centroid = (
tile_bbox[0] + (tile_bbox[2]-tile_bbox[0])/2,
tile_bbox[1] + (tile_bbox[3]-tile_bbox[1])/2
)
centroid = grid.srs.transform_to(SRS(4326), centroid)
doc[key] = centroid
elif value == '{{tile_centroid}}':
tile_bbox = grid.tile_bbox(tile.coord)
centroid = (
tile_bbox[0] + (tile_bbox[2]-tile_bbox[0])/2,
tile_bbox[1] + (tile_bbox[3]-tile_bbox[1])/2
)
doc[key] = centroid
else:
raise ValueError('unknown CouchDB tile_metadata value: %r' % (value, ))
return doc
|
camptocamp/mapproxy
|
mapproxy/cache/couchdb.py
|
Python
|
apache-2.0
| 10,510
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid as uuid_lib
from oslo.config import cfg
from nova.cloudpipe import pipelib
from nova.network import api as network_api
from nova.tests.functional.v3 import api_sample_base
from nova.tests.unit.image import fake
CONF = cfg.CONF
CONF.import_opt('vpn_image_id', 'nova.cloudpipe.pipelib')
class CloudPipeSampleTest(api_sample_base.ApiSampleTestBaseV3):
extension_name = "os-cloudpipe"
def setUp(self):
super(CloudPipeSampleTest, self).setUp()
def get_user_data(self, project_id):
"""Stub method to generate user data for cloudpipe tests."""
return "VVNFUiBEQVRB\n"
def network_api_get(self, context, network_uuid):
"""Stub to get a valid network and its information."""
return {'vpn_public_address': '127.0.0.1',
'vpn_public_port': 22}
self.stubs.Set(pipelib.CloudPipe, 'get_encoded_zip', get_user_data)
self.stubs.Set(network_api.API, "get",
network_api_get)
def generalize_subs(self, subs, vanilla_regexes):
subs['project_id'] = '[0-9a-f-]+'
return subs
def test_cloud_pipe_create(self):
# Get api samples of cloud pipe extension creation.
self.flags(vpn_image_id=fake.get_valid_image_id())
project = {'project_id': str(uuid_lib.uuid4().hex)}
response = self._do_post('os-cloudpipe', 'cloud-pipe-create-req',
project)
subs = self._get_regexes()
subs.update(project)
subs['image_id'] = CONF.vpn_image_id
self._verify_response('cloud-pipe-create-resp', subs, response, 200)
return project
def test_cloud_pipe_list(self):
# Get api samples of cloud pipe extension get request.
project = self.test_cloud_pipe_create()
response = self._do_get('os-cloudpipe')
subs = self._get_regexes()
subs.update(project)
subs['image_id'] = CONF.vpn_image_id
self._verify_response('cloud-pipe-get-resp', subs, response, 200)
def test_cloud_pipe_update(self):
subs = {'vpn_ip': '192.168.1.1',
'vpn_port': 2000}
response = self._do_put('os-cloudpipe/configure-project',
'cloud-pipe-update-req',
subs)
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, "")
|
silenceli/nova
|
nova/tests/functional/v3/test_cloudpipe.py
|
Python
|
apache-2.0
| 3,032
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for deterministic cuDNN functionality."""
import collections
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
# Notes:
#
# TensorFlow makes cuDNN run deterministically when op determinism is enabled
# via tf.config.experimental.enable_op_determinism(). Additionally, setting the
# environmental variable TF_CUDNN_DETERMINISTIC to 'true' or '1' makes cuDNN run
# deterministically, although this environemtnal variable is deprecated and will
# be removed in a future TensorFlow version. Unlike the enable_op_determinism()
# function, the environmental variable only makes ops using cuDNN deterministic,
# not all TensorFlow ops.
#
# Where both deterministic and non-deterministic cuDNN algorithms are available,
# selecting determinitic operation will lead to only the deterministic
# algorithms being chosen. Additionally, selecting deterministic operation will
# result in a deterministic, or reproducible, selection of algorithms (for any
# given layer configuration) for each of the forward and the two backward paths.
#
# These tests intend to confirm that deterministic algorithms are chosen (for
# the back-prop paths) when desterministic operation is selected. The tested
# configurations were first confirmed to produce non-deterministic results when
# the above-mentioned environment variables are not set.
#
# Even though selecting determinitic operation should ensure that the same
# algorithms, for a given layer configuration, are always used (i.e. that
# algorithm selection is deterministic / reproducible), this is not tested.
# TODO(duncanriach): Add test for deterministic cuDNN max-pooling
LayerShapeNHWC = collections.namedtuple('LayerShapeNHWC',
'batch, height, width, channels')
FilterShape2D = collections.namedtuple(
'FilterShape2D', 'height, width, in_channels, out_channels')
LayerShapeNCDHW = collections.namedtuple(
'LayerShapeNCDHW', 'batch, channels, depth, height, width')
FilterShape3D = collections.namedtuple(
'FilterShape3D', 'depth, height, width, in_channels, out_channels')
class ConvolutionTest(test.TestCase):
"""Tests for deterministic cuDNN functionality."""
def _random_data_op(self, shape):
# np.random.random_sample can properly interpret either tf.TensorShape or
# namedtuple as a list.
return constant_op.constant(
2 * np.random.random_sample(shape) - 1, dtype=dtypes.float32)
def _random_out_op(self, in_shape, filter_shape, strides, padding):
# Choosing not to use array_op.zeros() to prevent possible removal by
# optimization
in_op = self._random_data_op(in_shape)
filter_op = self._random_data_op(filter_shape)
# Use the forward op's shape-inference
conv_op = nn_ops.conv2d(in_op, filter_op, strides=strides, padding=padding)
out_shape = conv_op.get_shape()
out_op = self._random_data_op(out_shape)
return out_op
def _assert_reproducible(self, operation):
with self.cached_session(force_gpu=True):
result_1 = self.evaluate(operation)
result_2 = self.evaluate(operation)
self.assertAllEqual(result_1, result_2)
# The default forward algorithm choice, when using cuDNN 7, does not support
# the following layer configuration. This test case intends to confirm that
# an alternative algorithm is selected. Note that, in cuDNN 7, all forward
# algorithms are determnistic.
@test_util.run_cuda_only
def testForward(self):
in_shape = LayerShapeNCDHW(batch=2, channels=3, depth=5, height=7, width=6)
filter_shape = FilterShape3D(
depth=3, height=3, width=3, in_channels=3, out_channels=2)
in_op = self._random_data_op(in_shape)
filter_op = self._random_data_op(filter_shape)
strides = [1, 1, 1, 1, 1]
padding = 'VALID'
dilations = [1, 1, 2, 2, 2]
out_op = nn_ops.conv3d(
in_op,
filter_op,
strides=strides,
padding=padding,
data_format='NCDHW',
dilations=dilations)
self._assert_reproducible(out_op)
@test_util.run_cuda_only
def testBackwardFilterGradient(self):
in_shape = LayerShapeNHWC(batch=8, height=128, width=128, channels=8)
filter_shape = FilterShape2D(
height=3, width=3, in_channels=8, out_channels=8)
in_op = self._random_data_op(in_shape)
strides = [1, 1, 1, 1]
padding = 'SAME'
out_op = self._random_out_op(in_shape, filter_shape, strides, padding)
filter_gradient_op = nn_ops.conv2d_backprop_filter(
in_op, filter_shape, out_op, strides=strides, padding=padding)
self._assert_reproducible(filter_gradient_op)
@test_util.run_cuda_only
def testBackwardFilterGradientWithDilations(self):
in_shape = LayerShapeNHWC(batch=8, height=128, width=128, channels=8)
filter_shape = FilterShape2D(
height=3, width=3, in_channels=8, out_channels=8)
in_op = self._random_data_op(in_shape)
strides = [1, 1, 1, 1]
padding = 'SAME'
dilations = [1, 2, 2, 1]
out_op = self._random_out_op(in_shape, filter_shape, strides, padding)
filter_gradient_op = nn_ops.conv2d_backprop_filter(
in_op, filter_shape, out_op, strides=strides, padding=padding,
dilations=dilations)
self._assert_reproducible(filter_gradient_op)
@test_util.run_cuda_only
def testBackwardInputGradient(self):
in_shape = LayerShapeNHWC(batch=8, height=32, width=32, channels=8)
filter_shape = FilterShape2D(
height=7, width=7, in_channels=8, out_channels=128)
filter_op = self._random_data_op(filter_shape)
strides = [1, 1, 1, 1]
padding = 'SAME'
out_op = self._random_out_op(in_shape, filter_shape, strides, padding)
input_gradient_op = nn_ops.conv2d_backprop_input(
in_shape, filter_op, out_op, strides=strides, padding=padding)
self._assert_reproducible(input_gradient_op)
@test_util.run_cuda_only
def testBackwardInputGradientWithDilations(self):
in_shape = LayerShapeNHWC(batch=8, height=32, width=32, channels=8)
filter_shape = FilterShape2D(
height=7, width=7, in_channels=8, out_channels=128)
filter_op = self._random_data_op(filter_shape)
strides = [1, 1, 1, 1]
padding = 'SAME'
dilations = [1, 2, 2, 1]
out_op = self._random_out_op(in_shape, filter_shape, strides, padding)
input_gradient_op = nn_ops.conv2d_backprop_input(
in_shape, filter_op, out_op, strides=strides, padding=padding,
dilations=dilations)
self._assert_reproducible(input_gradient_op)
|
tensorflow/tensorflow
|
tensorflow/python/kernel_tests/nn_ops/cudnn_deterministic_base.py
|
Python
|
apache-2.0
| 7,357
|
"""Tests for AVM Fritz!Box config flow."""
from unittest import mock
from pyfritzhome import LoginError
import pytest
from requests.exceptions import HTTPError
from homeassistant.components.fritzbox.const import DOMAIN
from homeassistant.components.ssdp import (
ATTR_SSDP_LOCATION,
ATTR_UPNP_FRIENDLY_NAME,
ATTR_UPNP_UDN,
)
from homeassistant.const import CONF_DEVICES, CONF_HOST, CONF_PASSWORD, CONF_USERNAME
from homeassistant.helpers.typing import HomeAssistantType
from . import MOCK_CONFIG
from tests.async_mock import Mock, patch
MOCK_USER_DATA = MOCK_CONFIG[DOMAIN][CONF_DEVICES][0]
MOCK_SSDP_DATA = {
ATTR_SSDP_LOCATION: "https://fake_host:12345/test",
ATTR_UPNP_FRIENDLY_NAME: "fake_name",
ATTR_UPNP_UDN: "uuid:only-a-test",
}
@pytest.fixture(name="fritz")
def fritz_fixture() -> Mock:
"""Patch libraries."""
with patch("homeassistant.components.fritzbox.config_flow.Fritzhome") as fritz:
yield fritz
async def test_user(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow by user."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=MOCK_USER_DATA
)
assert result["type"] == "create_entry"
assert result["title"] == "fake_host"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_PASSWORD] == "fake_pass"
assert result["data"][CONF_USERNAME] == "fake_user"
assert not result["result"].unique_id
async def test_user_auth_failed(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow by user with authentication failure."""
fritz().login.side_effect = [LoginError("Boom"), mock.DEFAULT]
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}, data=MOCK_USER_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"]["base"] == "invalid_auth"
async def test_user_not_successful(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow by user but no connection found."""
fritz().login.side_effect = OSError("Boom")
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}, data=MOCK_USER_DATA
)
assert result["type"] == "abort"
assert result["reason"] == "no_devices_found"
async def test_user_already_configured(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow by user when already configured."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}, data=MOCK_USER_DATA
)
assert result["type"] == "create_entry"
assert not result["result"].unique_id
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}, data=MOCK_USER_DATA
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_import(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow by import."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "import"}, data=MOCK_USER_DATA
)
assert result["type"] == "create_entry"
assert result["title"] == "fake_host"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_PASSWORD] == "fake_pass"
assert result["data"][CONF_USERNAME] == "fake_user"
assert not result["result"].unique_id
async def test_ssdp(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow from discovery."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_PASSWORD: "fake_pass", CONF_USERNAME: "fake_user"},
)
assert result["type"] == "create_entry"
assert result["title"] == "fake_name"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_PASSWORD] == "fake_pass"
assert result["data"][CONF_USERNAME] == "fake_user"
assert result["result"].unique_id == "only-a-test"
async def test_ssdp_no_friendly_name(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow from discovery without friendly name."""
MOCK_NO_NAME = MOCK_SSDP_DATA.copy()
del MOCK_NO_NAME[ATTR_UPNP_FRIENDLY_NAME]
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_NO_NAME
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_PASSWORD: "fake_pass", CONF_USERNAME: "fake_user"},
)
assert result["type"] == "create_entry"
assert result["title"] == "fake_host"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_PASSWORD] == "fake_pass"
assert result["data"][CONF_USERNAME] == "fake_user"
assert result["result"].unique_id == "only-a-test"
async def test_ssdp_auth_failed(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow from discovery with authentication failure."""
fritz().login.side_effect = LoginError("Boom")
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_PASSWORD: "whatever", CONF_USERNAME: "whatever"},
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
assert result["errors"]["base"] == "invalid_auth"
async def test_ssdp_not_successful(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow from discovery but no device found."""
fritz().login.side_effect = OSError("Boom")
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_PASSWORD: "whatever", CONF_USERNAME: "whatever"},
)
assert result["type"] == "abort"
assert result["reason"] == "no_devices_found"
async def test_ssdp_not_supported(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow from discovery with unsupported device."""
fritz().get_device_elements.side_effect = HTTPError("Boom")
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_PASSWORD: "whatever", CONF_USERNAME: "whatever"},
)
assert result["type"] == "abort"
assert result["reason"] == "not_supported"
async def test_ssdp_already_in_progress_unique_id(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow from discovery twice."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_SSDP_DATA
)
assert result["type"] == "abort"
assert result["reason"] == "already_in_progress"
async def test_ssdp_already_in_progress_host(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow from discovery twice."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
MOCK_NO_UNIQUE_ID = MOCK_SSDP_DATA.copy()
del MOCK_NO_UNIQUE_ID[ATTR_UPNP_UDN]
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_NO_UNIQUE_ID
)
assert result["type"] == "abort"
assert result["reason"] == "already_in_progress"
async def test_ssdp_already_configured(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow from discovery when already configured."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}, data=MOCK_USER_DATA
)
assert result["type"] == "create_entry"
assert not result["result"].unique_id
result2 = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_SSDP_DATA
)
assert result2["type"] == "abort"
assert result2["reason"] == "already_configured"
assert result["result"].unique_id == "only-a-test"
|
tboyce021/home-assistant
|
tests/components/fritzbox/test_config_flow.py
|
Python
|
apache-2.0
| 9,231
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
TARGETS = [
'cssauto.py',
]
PACKAGE = {
'title': 'cssauto',
'desc': 'CSS autoloading',
}
def setup(targets):
'''Setup example for translation, MUST call util.setup(targets).'''
util.setup(targets)
def translate():
'''Translate example, MUST call util.translate().'''
util.translate()
def install(package):
'''Install and cleanup example module. MUST call util.install(package)'''
util.install(package)
##---------------------------------------##
# --------- (-: DO NOT EDIT :-) --------- #
##---------------------------------------##
import sys
import os
examples = head = os.path.abspath(os.path.dirname(__file__))
while os.path.split(examples)[1].lower() != 'examples':
examples = os.path.split(examples)[0]
if not examples:
raise ValueError("Cannot determine examples directory")
sys.path.insert(0, os.path.join(examples))
from _examples import util
sys.path.pop(0)
util.init(head)
setup(TARGETS)
translate()
install(PACKAGE)
|
Hasimir/pyjs
|
examples/cssauto/__main__.py
|
Python
|
apache-2.0
| 1,045
|
from __future__ import division, print_function, absolute_import
import os
import sys
import subprocess
from .utils.six.moves import configparser
COMMIT_INFO_FNAME = 'COMMIT_INFO.txt'
def pkg_commit_hash(pkg_path):
''' Get short form of commit hash given directory `pkg_path`
There should be a file called 'COMMIT_INFO.txt' in `pkg_path`. This is a
file in INI file format, with at least one section: ``commit hash``, and two
variables ``archive_subst_hash`` and ``install_hash``. The first has a
substitution pattern in it which may have been filled by the execution of
``git archive`` if this is an archive generated that way. The second is
filled in by the installation, if the installation is from a git archive.
We get the commit hash from (in order of preference):
* A substituted value in ``archive_subst_hash``
* A written commit hash value in ``install_hash`
* git's output, if we are in a git repository
If all these fail, we return a not-found placeholder tuple
Parameters
-------------
pkg_path : str
directory containing package
Returns
---------
hash_from : str
Where we got the hash from - description
hash_str : str
short form of hash
'''
# Try and get commit from written commit text file
pth = os.path.join(pkg_path, COMMIT_INFO_FNAME)
if not os.path.isfile(pth):
raise IOError('Missing commit info file %s' % pth)
cfg_parser = configparser.ConfigParser()
cfg_parser.read(pth)
archive_subst = cfg_parser.get('commit hash', 'archive_subst_hash')
if not archive_subst.startswith('$Format'): # it has been substituted
return 'archive substitution', archive_subst
install_subst = cfg_parser.get('commit hash', 'install_hash')
if install_subst != '':
return 'installation', install_subst
# maybe we are in a repository
proc = subprocess.Popen('git rev-parse --short HEAD',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=pkg_path, shell=True)
repo_commit, _ = proc.communicate()
if repo_commit:
return 'repository', repo_commit.strip()
return '(none found)', '<not found>'
def get_pkg_info(pkg_path):
''' Return dict describing the context of this package
Parameters
------------
pkg_path : str
path containing __init__.py for package
Returns
----------
context : dict
with named parameters of interest
'''
src, hsh = pkg_commit_hash(pkg_path)
import numpy
return dict(
pkg_path=pkg_path,
commit_source=src,
commit_hash=hsh,
sys_version=sys.version,
sys_executable=sys.executable,
sys_platform=sys.platform,
np_version=numpy.__version__)
|
mdesco/dipy
|
dipy/pkg_info.py
|
Python
|
bsd-3-clause
| 2,859
|
from __future__ import unicode_literals
import os
import re
from unittest import skipUnless
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.geos import HAS_GEOS
from django.test import TestCase, ignore_warnings, skipUnlessDBFeature
from django.utils._os import upath
from django.utils.deprecation import RemovedInDjango110Warning
if HAS_GEOS:
from django.contrib.gis.db.models import Union, Extent3D
from django.contrib.gis.geos import GEOSGeometry, LineString, Point, Polygon
from .models import (City3D, Interstate2D, Interstate3D, InterstateProj2D,
InterstateProj3D, Point2D, Point3D, MultiPoint3D, Polygon2D, Polygon3D)
if HAS_GDAL:
from django.contrib.gis.utils import LayerMapping, LayerMapError
data_path = os.path.realpath(os.path.join(os.path.dirname(upath(__file__)), '..', 'data'))
city_file = os.path.join(data_path, 'cities', 'cities.shp')
vrt_file = os.path.join(data_path, 'test_vrt', 'test_vrt.vrt')
# The coordinates of each city, with Z values corresponding to their
# altitude in meters.
city_data = (
('Houston', (-95.363151, 29.763374, 18)),
('Dallas', (-96.801611, 32.782057, 147)),
('Oklahoma City', (-97.521157, 34.464642, 380)),
('Wellington', (174.783117, -41.315268, 14)),
('Pueblo', (-104.609252, 38.255001, 1433)),
('Lawrence', (-95.235060, 38.971823, 251)),
('Chicago', (-87.650175, 41.850385, 181)),
('Victoria', (-123.305196, 48.462611, 15)),
)
# Reference mapping of city name to its altitude (Z value).
city_dict = {name: coords for name, coords in city_data}
# 3D freeway data derived from the National Elevation Dataset:
# http://seamless.usgs.gov/products/9arc.php
interstate_data = (
('I-45',
'LINESTRING(-95.3708481 29.7765870 11.339,-95.3694580 29.7787980 4.536,'
'-95.3690305 29.7797359 9.762,-95.3691886 29.7812450 12.448,'
'-95.3696447 29.7850144 10.457,-95.3702511 29.7868518 9.418,'
'-95.3706724 29.7881286 14.858,-95.3711632 29.7896157 15.386,'
'-95.3714525 29.7936267 13.168,-95.3717848 29.7955007 15.104,'
'-95.3717719 29.7969804 16.516,-95.3717305 29.7982117 13.923,'
'-95.3717254 29.8000778 14.385,-95.3719875 29.8013539 15.160,'
'-95.3720575 29.8026785 15.544,-95.3721321 29.8040912 14.975,'
'-95.3722074 29.8050998 15.688,-95.3722779 29.8060430 16.099,'
'-95.3733818 29.8076750 15.197,-95.3741563 29.8103686 17.268,'
'-95.3749458 29.8129927 19.857,-95.3763564 29.8144557 15.435)',
(11.339, 4.536, 9.762, 12.448, 10.457, 9.418, 14.858,
15.386, 13.168, 15.104, 16.516, 13.923, 14.385, 15.16,
15.544, 14.975, 15.688, 16.099, 15.197, 17.268, 19.857,
15.435),
),
)
# Bounding box polygon for inner-loop of Houston (in projected coordinate
# system 32140), with elevation values from the National Elevation Dataset
# (see above).
bbox_data = (
'POLYGON((941527.97 4225693.20,962596.48 4226349.75,963152.57 4209023.95,'
'942051.75 4208366.38,941527.97 4225693.20))',
(21.71, 13.21, 9.12, 16.40, 21.71)
)
@skipUnless(HAS_GDAL, "GDAL is required for Geo3DTest.")
@skipUnlessDBFeature("gis_enabled", "supports_3d_functions")
class Geo3DTest(TestCase):
"""
Only a subset of the PostGIS routines are 3D-enabled, and this TestCase
tries to test the features that can handle 3D and that are also
available within GeoDjango. For more information, see the PostGIS docs
on the routines that support 3D:
http://postgis.net/docs/PostGIS_Special_Functions_Index.html#PostGIS_3D_Functions
"""
def _load_interstate_data(self):
# Interstate (2D / 3D and Geographic/Projected variants)
for name, line, exp_z in interstate_data:
line_3d = GEOSGeometry(line, srid=4269)
line_2d = LineString([l[:2] for l in line_3d.coords], srid=4269)
# Creating a geographic and projected version of the
# interstate in both 2D and 3D.
Interstate3D.objects.create(name=name, line=line_3d)
InterstateProj3D.objects.create(name=name, line=line_3d)
Interstate2D.objects.create(name=name, line=line_2d)
InterstateProj2D.objects.create(name=name, line=line_2d)
def _load_city_data(self):
for name, pnt_data in city_data:
City3D.objects.create(name=name, point=Point(*pnt_data, srid=4326))
def _load_polygon_data(self):
bbox_wkt, bbox_z = bbox_data
bbox_2d = GEOSGeometry(bbox_wkt, srid=32140)
bbox_3d = Polygon(tuple((x, y, z) for (x, y), z in zip(bbox_2d[0].coords, bbox_z)), srid=32140)
Polygon2D.objects.create(name='2D BBox', poly=bbox_2d)
Polygon3D.objects.create(name='3D BBox', poly=bbox_3d)
def test_3d_hasz(self):
"""
Make sure data is 3D and has expected Z values -- shouldn't change
because of coordinate system.
"""
self._load_interstate_data()
for name, line, exp_z in interstate_data:
interstate = Interstate3D.objects.get(name=name)
interstate_proj = InterstateProj3D.objects.get(name=name)
for i in [interstate, interstate_proj]:
self.assertTrue(i.line.hasz)
self.assertEqual(exp_z, tuple(i.line.z))
self._load_city_data()
for name, pnt_data in city_data:
city = City3D.objects.get(name=name)
z = pnt_data[2]
self.assertTrue(city.point.hasz)
self.assertEqual(z, city.point.z)
def test_3d_polygons(self):
"""
Test the creation of polygon 3D models.
"""
self._load_polygon_data()
p3d = Polygon3D.objects.get(name='3D BBox')
self.assertTrue(p3d.poly.hasz)
self.assertIsInstance(p3d.poly, Polygon)
self.assertEqual(p3d.poly.srid, 32140)
def test_3d_layermapping(self):
"""
Testing LayerMapping on 3D models.
"""
point_mapping = {'point': 'POINT'}
mpoint_mapping = {'mpoint': 'MULTIPOINT'}
# The VRT is 3D, but should still be able to map sans the Z.
lm = LayerMapping(Point2D, vrt_file, point_mapping, transform=False)
lm.save()
self.assertEqual(3, Point2D.objects.count())
# The city shapefile is 2D, and won't be able to fill the coordinates
# in the 3D model -- thus, a LayerMapError is raised.
self.assertRaises(LayerMapError, LayerMapping,
Point3D, city_file, point_mapping, transform=False)
# 3D model should take 3D data just fine.
lm = LayerMapping(Point3D, vrt_file, point_mapping, transform=False)
lm.save()
self.assertEqual(3, Point3D.objects.count())
# Making sure LayerMapping.make_multi works right, by converting
# a Point25D into a MultiPoint25D.
lm = LayerMapping(MultiPoint3D, vrt_file, mpoint_mapping, transform=False)
lm.save()
self.assertEqual(3, MultiPoint3D.objects.count())
def test_kml(self):
"""
Test GeoQuerySet.kml() with Z values.
"""
self._load_city_data()
h = City3D.objects.kml(precision=6).get(name='Houston')
# KML should be 3D.
# `SELECT ST_AsKML(point, 6) FROM geo3d_city3d WHERE name = 'Houston';`
ref_kml_regex = re.compile(r'^<Point><coordinates>-95.363\d+,29.763\d+,18</coordinates></Point>$')
self.assertTrue(ref_kml_regex.match(h.kml))
def test_geojson(self):
"""
Test GeoQuerySet.geojson() with Z values.
"""
self._load_city_data()
h = City3D.objects.geojson(precision=6).get(name='Houston')
# GeoJSON should be 3D
# `SELECT ST_AsGeoJSON(point, 6) FROM geo3d_city3d WHERE name='Houston';`
ref_json_regex = re.compile(r'^{"type":"Point","coordinates":\[-95.363151,29.763374,18(\.0+)?\]}$')
self.assertTrue(ref_json_regex.match(h.geojson))
def test_union(self):
"""
Testing the Union aggregate of 3D models.
"""
# PostGIS query that returned the reference EWKT for this test:
# `SELECT ST_AsText(ST_Union(point)) FROM geo3d_city3d;`
self._load_city_data()
ref_ewkt = (
'SRID=4326;MULTIPOINT(-123.305196 48.462611 15,-104.609252 38.255001 1433,'
'-97.521157 34.464642 380,-96.801611 32.782057 147,-95.363151 29.763374 18,'
'-95.23506 38.971823 251,-87.650175 41.850385 181,174.783117 -41.315268 14)'
)
ref_union = GEOSGeometry(ref_ewkt)
union = City3D.objects.aggregate(Union('point'))['point__union']
self.assertTrue(union.hasz)
# Ordering of points in the resulting geometry may vary between implementations
self.assertSetEqual({p.ewkt for p in ref_union}, {p.ewkt for p in union})
@ignore_warnings(category=RemovedInDjango110Warning)
def test_extent(self):
"""
Testing the Extent3D aggregate for 3D models.
"""
self._load_city_data()
# `SELECT ST_Extent3D(point) FROM geo3d_city3d;`
ref_extent3d = (-123.305196, -41.315268, 14, 174.783117, 48.462611, 1433)
extent1 = City3D.objects.aggregate(Extent3D('point'))['point__extent3d']
extent2 = City3D.objects.extent3d()
def check_extent3d(extent3d, tol=6):
for ref_val, ext_val in zip(ref_extent3d, extent3d):
self.assertAlmostEqual(ref_val, ext_val, tol)
for e3d in [extent1, extent2]:
check_extent3d(e3d)
self.assertIsNone(City3D.objects.none().extent3d())
self.assertIsNone(City3D.objects.none().aggregate(Extent3D('point'))['point__extent3d'])
def test_perimeter(self):
"""
Testing GeoQuerySet.perimeter() on 3D fields.
"""
self._load_polygon_data()
# Reference query for values below:
# `SELECT ST_Perimeter3D(poly), ST_Perimeter2D(poly) FROM geo3d_polygon3d;`
ref_perim_3d = 76859.2620451
ref_perim_2d = 76859.2577803
tol = 6
self.assertAlmostEqual(ref_perim_2d,
Polygon2D.objects.perimeter().get(name='2D BBox').perimeter.m,
tol)
self.assertAlmostEqual(ref_perim_3d,
Polygon3D.objects.perimeter().get(name='3D BBox').perimeter.m,
tol)
def test_length(self):
"""
Testing GeoQuerySet.length() on 3D fields.
"""
# ST_Length_Spheroid Z-aware, and thus does not need to use
# a separate function internally.
# `SELECT ST_Length_Spheroid(line, 'SPHEROID["GRS 1980",6378137,298.257222101]')
# FROM geo3d_interstate[2d|3d];`
self._load_interstate_data()
tol = 3
ref_length_2d = 4368.1721949481
ref_length_3d = 4368.62547052088
self.assertAlmostEqual(ref_length_2d,
Interstate2D.objects.length().get(name='I-45').length.m,
tol)
self.assertAlmostEqual(ref_length_3d,
Interstate3D.objects.length().get(name='I-45').length.m,
tol)
# Making sure `ST_Length3D` is used on for a projected
# and 3D model rather than `ST_Length`.
# `SELECT ST_Length(line) FROM geo3d_interstateproj2d;`
ref_length_2d = 4367.71564892392
# `SELECT ST_Length3D(line) FROM geo3d_interstateproj3d;`
ref_length_3d = 4368.16897234101
self.assertAlmostEqual(ref_length_2d,
InterstateProj2D.objects.length().get(name='I-45').length.m,
tol)
self.assertAlmostEqual(ref_length_3d,
InterstateProj3D.objects.length().get(name='I-45').length.m,
tol)
def test_scale(self):
"""
Testing GeoQuerySet.scale() on Z values.
"""
self._load_city_data()
# Mapping of City name to reference Z values.
zscales = (-3, 4, 23)
for zscale in zscales:
for city in City3D.objects.scale(1.0, 1.0, zscale):
self.assertEqual(city_dict[city.name][2] * zscale, city.scale.z)
def test_translate(self):
"""
Testing GeoQuerySet.translate() on Z values.
"""
self._load_city_data()
ztranslations = (5.23, 23, -17)
for ztrans in ztranslations:
for city in City3D.objects.translate(0, 0, ztrans):
self.assertEqual(city_dict[city.name][2] + ztrans, city.translate.z)
|
h4r5h1t/django-hauthy
|
tests/gis_tests/geo3d/tests.py
|
Python
|
bsd-3-clause
| 12,627
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Mapper to compute distance matrix
Date: 2012.05.29
"""
__docformat__ = 'restructuredtext'
import numpy as np
import scipy.sparse as sp
from mvpa2.base import warning
from mvpa2.base.dochelpers import _str, borrowkwargs, _repr_attrs
from mvpa2.mappers.base import accepts_dataset_as_samples, Mapper
from mvpa2.datasets.base import Dataset
from mvpa2.support import copy
from scipy.spatial.distance import pdist, squareform
class FeatureDistanceMapper(Mapper):
"""Mapper to compute distance matrix
"""
def __init__(self, metric='euclidean', **kwargs):
"""
parameters
__________
metric : string or function
The distance metric to use. The distance function can be 'braycurtis',
'canberra', 'chebyshev', 'cityblock', 'correlation', 'cosine', 'dice',
'euclidean', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule'.
"""
Mapper.__init__(self, **kwargs)
self.__metric = metric
def __repr__(self, prefixes=[]):
return super(KMeanMapper, self).__repr__(
prefixes=prefixes
+ _repr_attrs(self, ['metric']))
def __str__(self):
return _str(self)
def _forward_dataset(self, ds):
mds = Dataset([])
mds.a = ds.a
vectordist = self._fdistance(ds.samples)
mds.samples = squareform(vectordist, force='no', checks=True)
return mds
def _fdistance(self, samples):
if sp.issparse(samples):
samples = samples.todense()
samples = samples.T
print np.shape(samples)
return pdist(samples, metric=self.__metric)
|
sealhuang/FreeROI
|
froi/algorithm/unused/featuredistancemapper.py
|
Python
|
bsd-3-clause
| 1,951
|
from optparse import make_option
from django.conf import settings
from django.db import connections, router, transaction, models, DEFAULT_DB_ALIAS
from django.core.management import call_command
from django.core.management.base import NoArgsCommand, CommandError
from django.core.management.color import no_style
from django.core.management.sql import sql_flush, emit_post_sync_signal
from django.utils.importlib import import_module
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to flush. '
'Defaults to the "default" database.'),
make_option('--no-initial-data', action='store_false', dest='load_initial_data', default=True,
help='Tells Django not to load any initial data after database synchronization.'),
)
help = ('Returns the database to the state it was in immediately after '
'syncdb was executed. This means that all data will be removed '
'from the database, any post-synchronization handlers will be '
're-executed, and the initial_data fixture will be re-installed.')
def handle_noargs(self, **options):
db = options.get('database')
connection = connections[db]
verbosity = int(options.get('verbosity'))
interactive = options.get('interactive')
self.style = no_style()
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_name in settings.INSTALLED_APPS:
try:
import_module('.management', app_name)
except ImportError:
pass
sql_list = sql_flush(self.style, connection, only_django=True)
if interactive:
confirm = raw_input("""You have requested a flush of the database.
This will IRREVERSIBLY DESTROY all data currently in the %r database,
and return each table to the state it was in after syncdb.
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """ % connection.settings_dict['NAME'])
else:
confirm = 'yes'
if confirm == 'yes':
try:
cursor = connection.cursor()
for sql in sql_list:
cursor.execute(sql)
except Exception as e:
transaction.rollback_unless_managed(using=db)
raise CommandError("""Database %s couldn't be flushed. Possible reasons:
* The database isn't running or isn't configured correctly.
* At least one of the expected database tables doesn't exist.
* The SQL was invalid.
Hint: Look at the output of 'django-admin.py sqlflush'. That's the SQL this command wasn't able to run.
The full error: %s""" % (connection.settings_dict['NAME'], e))
transaction.commit_unless_managed(using=db)
# Emit the post sync signal. This allows individual
# applications to respond as if the database had been
# sync'd from scratch.
all_models = []
for app in models.get_apps():
all_models.extend([
m for m in models.get_models(app, include_auto_created=True)
if router.allow_syncdb(db, m)
])
emit_post_sync_signal(set(all_models), verbosity, interactive, db)
# Reinstall the initial_data fixture.
kwargs = options.copy()
kwargs['database'] = db
if options.get('load_initial_data'):
# Reinstall the initial_data fixture.
call_command('loaddata', 'initial_data', **options)
else:
self.stdout.write("Flush cancelled.\n")
|
rebost/django
|
django/core/management/commands/flush.py
|
Python
|
bsd-3-clause
| 3,988
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import logging
import os
import plistlib
import shutil
import tempfile
import xml.parsers.expat
from telemetry.core import os_version
from telemetry import decorators
from telemetry.internal.platform import power_monitor
import py_utils
# TODO: rename this class (seems like this is used by mac)
class PowerMetricsPowerMonitor(power_monitor.PowerMonitor):
def __init__(self, backend):
super(PowerMetricsPowerMonitor, self).__init__()
self._powermetrics_process = None
self._backend = backend
self._output_filename = None
self._output_directory = None
@property
def binary_path(self):
return '/usr/bin/powermetrics'
def StartMonitoringPower(self, browser):
self._CheckStart()
# Empirically powermetrics creates an empty output file immediately upon
# starting. We detect file creation as a signal that measurement has
# started. In order to avoid various race conditions in tempfile creation
# we create a temp directory and have powermetrics create it's output
# there rather than say, creating a tempfile, deleting it and reusing its
# name.
self._output_directory = tempfile.mkdtemp()
self._output_filename = os.path.join(self._output_directory,
'powermetrics.output')
args = ['-f', 'plist',
'-u', self._output_filename,
'-i0',
'--show-usage-summary']
self._powermetrics_process = self._backend.LaunchApplication(
self.binary_path, args, elevate_privilege=True)
# Block until output file is written to ensure this function call is
# synchronous in respect to powermetrics starting.
def _OutputFileExists():
return os.path.isfile(self._output_filename)
py_utils.WaitFor(_OutputFileExists, 1)
@decorators.Cache
def CanMonitorPower(self):
mavericks_or_later = (
self._backend.GetOSVersionName() >= os_version.MAVERICKS)
binary_path = self.binary_path
return mavericks_or_later and self._backend.CanLaunchApplication(
binary_path)
@staticmethod
def _ParsePlistString(plist_string):
"""Wrapper to parse a plist from a string and catch any errors.
Sometimes powermetrics will exit in the middle of writing it's output,
empirically it seems that it always writes at least one sample in it's
entirety so we can safely ignore any errors in it's output.
Returns:
Parser output on successful parse, None on parse error.
"""
try:
return plistlib.readPlistFromString(plist_string)
except xml.parsers.expat.ExpatError:
return None
@staticmethod
def ParsePowerMetricsOutput(powermetrics_output):
"""Parse output of powermetrics command line utility.
Returns:
Dictionary in the format returned by StopMonitoringPower() or None
if |powermetrics_output| is empty - crbug.com/353250 .
"""
if len(powermetrics_output) == 0:
logging.warning('powermetrics produced zero length output')
return {}
# Container to collect samples for running averages.
# out_path - list containing the key path in the output dictionary.
# src_path - list containing the key path to get the data from in
# powermetrics' output.
def ConstructMetric(out_path, src_path):
RunningAverage = collections.namedtuple('RunningAverage', [
'out_path', 'src_path', 'samples'])
return RunningAverage(out_path, src_path, [])
# List of RunningAverage objects specifying metrics we want to aggregate.
metrics = [
ConstructMetric(
['platform_info', 'average_frequency_hz'],
['processor', 'freq_hz']),
ConstructMetric(
['platform_info', 'idle_percent'],
['processor', 'packages', 0, 'c_state_ratio'])]
def DataWithMetricKeyPath(metric, powermetrics_output):
"""Retrieve the sample from powermetrics' output for a given metric.
Args:
metric: The RunningAverage object we want to collect a new sample for.
powermetrics_output: Dictionary containing powermetrics output.
Returns:
The sample corresponding to |metric|'s keypath."""
# Get actual data corresponding to key path.
out_data = powermetrics_output
for k in metric.src_path:
out_data = out_data[k]
assert type(out_data) in [int, float], (
'Was expecting a number: %s (%s)' % (type(out_data), out_data))
return float(out_data)
sample_durations = []
total_energy_consumption_mwh = 0
# powermetrics outputs multiple plists separated by null terminators.
raw_plists = powermetrics_output.split('\0')
raw_plists = [x for x in raw_plists if len(x) > 0]
assert len(raw_plists) == 1
# -------- Examine contents of first plist for systems specs. --------
plist = PowerMetricsPowerMonitor._ParsePlistString(raw_plists[0])
if not plist:
logging.warning('powermetrics produced invalid output, output length: '
'%d', len(powermetrics_output))
return {}
# Powermetrics doesn't record power usage when running on a VM.
hw_model = plist.get('hw_model')
if hw_model and hw_model.startswith('VMware'):
return {}
if 'GPU' in plist:
metrics.extend([
ConstructMetric(
['component_utilization', 'gpu', 'average_frequency_hz'],
['GPU', 0, 'freq_hz']),
ConstructMetric(
['component_utilization', 'gpu', 'idle_percent'],
['GPU', 0, 'c_state_ratio'])])
# There's no way of knowing ahead of time how many cpus and packages the
# current system has. Iterate over cores and cpus - construct metrics for
# each one.
if 'processor' in plist:
core_dict = plist['processor']['packages'][0]['cores']
num_cores = len(core_dict)
cpu_num = 0
for core_idx in xrange(num_cores):
num_cpus = len(core_dict[core_idx]['cpus'])
base_src_path = ['processor', 'packages', 0, 'cores', core_idx]
for cpu_idx in xrange(num_cpus):
base_out_path = ['component_utilization', 'cpu%d' % cpu_num]
# C State ratio is per-package, component CPUs of that package may
# have different frequencies.
metrics.append(ConstructMetric(
base_out_path + ['average_frequency_hz'],
base_src_path + ['cpus', cpu_idx, 'freq_hz']))
metrics.append(ConstructMetric(
base_out_path + ['idle_percent'],
base_src_path + ['c_state_ratio']))
cpu_num += 1
# -------- Parse Data Out of Plists --------
plist = PowerMetricsPowerMonitor._ParsePlistString(raw_plists[0])
if not plist:
logging.error('Error parsing plist.')
return {}
# Duration of this sample.
sample_duration_ms = int(plist['elapsed_ns']) / 10 ** 6
sample_durations.append(sample_duration_ms)
if 'processor' not in plist:
logging.error("'processor' field not found in plist.")
return {}
processor = plist['processor']
total_energy_consumption_mwh = (
(float(processor.get('package_joules', 0)) / 3600.) * 10 ** 3)
for m in metrics:
try:
m.samples.append(DataWithMetricKeyPath(m, plist))
except KeyError:
# Old CPUs don't have c-states, so if data is missing, just ignore it.
logging.info('Field missing from powermetrics output: %s', m.src_path)
continue
# -------- Collect and Process Data --------
out_dict = {}
out_dict['identifier'] = 'powermetrics'
out_dict['energy_consumption_mwh'] = total_energy_consumption_mwh
def StoreMetricAverage(metric, sample_durations, out):
"""Calculate average value of samples in a metric and store in output
path as specified by metric.
Args:
metric: A RunningAverage object containing samples to average.
sample_durations: A list which parallels the samples list containing
the time slice for each sample.
out: The output dicat, average is stored in the location specified by
metric.out_path.
"""
if len(metric.samples) == 0:
return
assert len(metric.samples) == len(sample_durations)
avg = 0
for i in xrange(len(metric.samples)):
avg += metric.samples[i] * sample_durations[i]
avg /= sum(sample_durations)
# Store data in output, creating empty dictionaries as we go.
for k in metric.out_path[:-1]:
if not out.has_key(k):
out[k] = {}
out = out[k]
out[metric.out_path[-1]] = avg
for m in metrics:
StoreMetricAverage(m, sample_durations, out_dict)
if 'tasks' not in plist:
logging.error("'tasks' field not found in plist.")
return {}
# The following CPU metrics are already time-normalized, and segmented by
# process. Sum the metrics across all Chrome processes.
cputime = 0
energy_impact = 0
browser_process_count = 0
idle_wakeups = 0
for task in plist['tasks']:
if 'Chrome' in task['name'] or 'Chromium' in task['name']:
if 'Helper' not in task['name']:
browser_process_count += 1
cputime += float(task['cputime_ms_per_s'])
energy_impact += float(task.get('energy_impact', 0))
idle_wakeups += float(task['idle_wakeups_per_s'])
if browser_process_count == 0:
logging.warning('No Chrome or Chromium browser process found with '
'powermetrics. Chrome CPU metrics will not be emitted.')
return {}
elif browser_process_count >= 2:
logging.warning('powermetrics found more than one Chrome or Chromium '
'browser. Chrome CPU metrics will not be emitted.')
# During Telemetry unit tests, there may be multiple Chrome browsers
# present. Don't add cpu metrics, but don't return {} either.
else: # browser_process_count == 1:
chrome_dict = {}
chrome_dict['cputime_ms_per_s'] = cputime
chrome_dict['energy_impact'] = energy_impact
chrome_dict['idle_wakeups_per_s'] = idle_wakeups
out_dict['component_utilization']['chrome'] = chrome_dict
return out_dict
def _KillPowerMetricsProcess(self):
"""Kill a running powermetrics process."""
try:
if self._powermetrics_process.poll() is None:
self._powermetrics_process.terminate()
except OSError as e:
logging.warning(
'Error when trying to terminate powermetric process: %s', repr(e))
if self._powermetrics_process.poll() is None:
# terminate() can fail when Powermetrics does not have the SetUID set.
self._backend.LaunchApplication(
'/usr/bin/pkill',
['-SIGTERM', os.path.basename(self.binary_path)],
elevate_privilege=True)
def StopMonitoringPower(self):
self._CheckStop()
# Tell powermetrics to take an immediate sample.
try:
self._KillPowerMetricsProcess()
(power_stdout, power_stderr) = self._powermetrics_process.communicate()
returncode = self._powermetrics_process.returncode
assert returncode in [0, -15], (
"""powermetrics error
return code=%d
stdout=(%s)
stderr=(%s)""" % (returncode, power_stdout, power_stderr))
with open(self._output_filename, 'rb') as output_file:
powermetrics_output = output_file.read()
return PowerMetricsPowerMonitor.ParsePowerMetricsOutput(
powermetrics_output)
except Exception as e:
logging.warning(
'Error when trying to collect power monitoring data: %s', repr(e))
return PowerMetricsPowerMonitor.ParsePowerMetricsOutput('')
finally:
shutil.rmtree(self._output_directory)
self._output_directory = None
self._output_filename = None
self._powermetrics_process = None
|
sahiljain/catapult
|
telemetry/telemetry/internal/platform/power_monitor/powermetrics_power_monitor.py
|
Python
|
bsd-3-clause
| 12,065
|
import os
import sys
import traceback
import urllib
import urllib2
import base64
import json
import requests
import subprocess
#set Jenkins build description using submitDescription to mock browser behavior
http_proxy = ''
if('HTTP_PROXY' in os.environ):
http_proxy = os.environ['HTTP_PROXY']
proxyDict = {'http': http_proxy, 'https': http_proxy}
branch = "v3"
pr_num = 0
workspace = "."
node_name = "ios"
def set_jenkins_job_description(desc, url):
req_data = urllib.urlencode({'description': desc})
request = urllib2.Request(url + 'submitDescription', req_data)
#print(os.environ['BUILD_URL'])
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
user_name = os.environ['JENKINS_ADMIN']
password = os.environ['JENKINS_ADMIN_PW']
base64string = base64.encodestring(user_name + ":" + password).replace('\n', '')
request.add_header("Authorization", "Basic " + base64string)
try:
urllib2.urlopen(request)
except:
traceback.print_exc()
def send_notifies_to_github():
global branch
global pr_num
global workspace
global node_name
# get payload from os env
payload_str = os.environ['payload']
payload_str = payload_str.decode('utf-8', 'ignore')
#parse to json obj
payload = json.loads(payload_str)
#get pull number
pr_num = payload['number']
print 'pr_num:' + str(pr_num)
#build for pull request action 'open' and 'synchronize', skip 'close'
action = payload['action']
print 'action: ' + action
#pr = payload['pull_request']
url = payload['html_url']
print "url:" + url
pr_desc = '<h3><a href=' + url + '> pr#' + str(pr_num) + ' is ' + action + '</a></h3>'
#get statuses url
statuses_url = payload['statuses_url']
#get pr target branch
branch = payload['branch']
workspace = os.environ['WORKSPACE']
node_name = os.environ['NODE_NAME']
#set commit status to pending
# target_url = os.environ['BUILD_URL']
jenkins_url = os.environ['JENKINS_URL']
job_name = os.environ['JOB_NAME'].split('/')[0]
build_number = os.environ['BUILD_NUMBER']
target_url = jenkins_url + 'job/' + job_name + '/' + build_number + '/'
set_jenkins_job_description(pr_desc, target_url)
data = {"state": "pending", "target_url": target_url, "context": "Jenkins CI", "description": "Build started..."}
access_token = os.environ['GITHUB_ACCESS_TOKEN']
Headers = {"Authorization": "token " + access_token}
try:
requests.post(statuses_url, data=json.dumps(data), headers=Headers, proxies=proxyDict)
except:
traceback.print_exc()
def syntronize_remote_pr():
global workspace
global branch
global pr_num
#reset path to workspace root
os.system("cd " + workspace)
#pull latest code
os.system("git fetch origin " + branch)
os.system("git --version")
os.system("git checkout " + branch)
os.system("git merge origin/" + branch)
os.system("git branch -D pull" + str(pr_num))
#clean workspace
print "Before checkout: git clean -xdf -f"
os.system("git clean -xdf -f")
#fetch pull request to local repo
git_fetch_pr = "git fetch origin pull/" + str(pr_num) + "/head"
ret = os.system(git_fetch_pr)
if(ret != 0):
sys.exit(1)
#checkout a new branch from v3 or v4-develop
git_checkout = "git checkout -b " + "pull" + str(pr_num)
os.system(git_checkout)
#merge pull reqeust head
subprocess.call("git merge --no-edit FETCH_HEAD", shell=True)
# The follow method is not working for Azure server
# p = os.popen('git merge --no-edit FETCH_HEAD')
# r = p.read()
# #check if merge fail
# if output.find('CONFLICT') > 0:
# print output
# raise Exception('There are conflicts in your PR!')
# After checkout a new branch, clean workspace again
print "After checkout: git clean -xdf -f"
os.system("git clean -xdf -f")
#update submodule
git_update_submodule = "git submodule update --init --force"
ret = os.system(git_update_submodule)
if(ret != 0):
sys.exit(1)
# -------------- main --------------
if __name__ == '__main__':
sys_ret = 0
try:
send_notifies_to_github()
#syntronize local git repository with remote and merge the PR
syntronize_remote_pr()
jenkins_script_path = "tools" + os.sep + "jenkins-scripts" + os.sep + "do-pull-request-builder.py"
sys_ret = os.system("python " + jenkins_script_path)
except:
traceback.print_exc()
sys_ret = 1
finally:
if sys_ret != 0:
sys.exit(1)
else:
sys.exit(0)
|
dios-game/dios-cocos
|
src/oslibs/cocos/cocos-src/tools/jenkins-scripts/pull-request-builder.py
|
Python
|
mit
| 4,685
|
from functools import partial
import logging
from six import iteritems
from bravado_core.docstring import docstring_property
from bravado_core.schema import SWAGGER_PRIMITIVES
log = logging.getLogger(__name__)
# Models in #/definitions are tagged with this key so that they can be
# differentiated from 'object' types.
MODEL_MARKER = 'x-model'
def tag_models(container, key, path, visited_models, swagger_spec):
"""Callback used during the swagger spec ingestion process to tag models
with a 'x-model'. This is only done in the root document.
A list of visited models is maintained to avoid duplication of tagging.
:param container: container being visited
:param key: attribute in container being visited as a string
:param path: list of path segments to the key
:type visited_models: dict (k,v) == (model_name, path)
:type swagger_spec: :class:`bravado_core.spec.Spec`
"""
if len(path) < 2 or path[-2] != 'definitions':
return
deref = swagger_spec.deref
model_name = key
model_spec = deref(container.get(key))
if deref(model_spec.get('type')) != 'object':
return
if deref(model_spec.get(MODEL_MARKER)) is not None:
return
log.debug('Found model: {0}'.format(model_name))
if model_name in visited_models:
raise ValueError(
'Duplicate "{0}" model found at path {1}. '
'Original "{0}" model at path {2}'
.format(model_name, path, visited_models[model_name]))
model_spec['x-model'] = model_name
visited_models[model_name] = path
def collect_models(container, key, path, models, swagger_spec):
"""Callback used during the swagger spec ingestion to collect all the
tagged models and create appropriate python types for them.
:param container: container being visited
:param key: attribute in container being visited as a string
:param path: list of path segments to the key
:param models: created model types are placed here
:type swagger_spec: :class:`bravado_core.spec.Spec`
"""
deref = swagger_spec.deref
if key == MODEL_MARKER:
model_spec = container
model_name = deref(model_spec.get(MODEL_MARKER))
models[model_name] = create_model_type(
swagger_spec, model_name, model_spec)
def create_model_type(swagger_spec, model_name, model_spec):
"""Create a dynamic class from the model data defined in the swagger
spec.
The docstring for this class is dynamically generated because generating
the docstring is relatively expensive, and would only be used in rare
cases for interactive debugging in a REPL.
:type swagger_spec: :class:`bravado_core.spec.Spec`
:param model_name: model name
:param model_spec: json-like dict that describes a model.
:returns: dynamic type created with attributes, docstrings attached
:rtype: type
"""
doc = docstring_property(partial(
create_model_docstring, swagger_spec, model_spec))
methods = dict(
__doc__=doc,
__eq__=lambda self, other: compare(self, other),
__init__=lambda self, **kwargs: model_constructor(self, model_spec,
kwargs),
__repr__=lambda self: create_model_repr(self, model_spec),
__dir__=lambda self: model_dir(self, model_spec),
)
return type(str(model_name), (object,), methods)
def model_dir(model, model_spec):
"""Responsible for returning the names of the valid attributes on this
model object. This includes any properties defined in this model's spec
plus additional attibutes that exist as `additionalProperties`.
:param model: instance of a model
:param model_spec: spec the passed in model in dict form
:returns: list of str
"""
return list(model_spec['properties'].keys()) + model._additional_props
def compare(first, second):
"""Compares two model types for equivalence.
TODO: If a type composes another model type, .__dict__ recurse on those
and compare again on those dict values.
:param first: generated model type
:type first: type
:param second: generated model type
:type second: type
:returns: True if equivalent, False otherwise
"""
if not hasattr(first, '__dict__') or not hasattr(second, '__dict__'):
return False
# Ignore any '_raw' keys
def norm_dict(d):
return dict((k, d[k]) for k in d if k != '_raw')
return norm_dict(first.__dict__) == norm_dict(second.__dict__)
def model_constructor(model, model_spec, constructor_kwargs):
"""Constructor for the given model instance. Just assigns kwargs as attrs
on the model based on the 'properties' in the model specification.
:param model: Instance of a model type
:type model: type
:param model_spec: model specification
:type model_spec: dict
:param constructor_kwargs: kwargs sent in to the constructor invocation
:type constructor_kwargs: dict
:raises: AttributeError on constructor_kwargs that don't exist in the
model specification's list of properties
"""
arg_names = list(constructor_kwargs.keys())
for attr_name, attr_spec in iteritems(model_spec['properties']):
if attr_name in arg_names:
attr_value = constructor_kwargs[attr_name]
arg_names.remove(attr_name)
else:
attr_value = None
setattr(model, attr_name, attr_value)
if arg_names and not model_spec.get('additionalProperties', True):
raise AttributeError(
"Model {0} does not have attributes for: {1}"
.format(type(model), arg_names))
# we've got additionalProperties to set on the model
for arg_name in arg_names:
setattr(model, arg_name, constructor_kwargs[arg_name])
# stash so that dir(model) works
model._additional_props = arg_names
def create_model_repr(model, model_spec):
"""Generates the repr string for the model.
:param model: Instance of a model
:param model_spec: model specification
:type model_spec: dict
:returns: repr string for the model
"""
s = [
"{0}={1!r}".format(attr_name, getattr(model, attr_name))
for attr_name in sorted(model_spec['properties'].keys())
]
return "{0}({1})".format(model.__class__.__name__, ', '.join(s))
def is_model(swagger_spec, schema_object_spec):
"""
:param swagger_spec: :class:`bravado_core.spec.Spec`
:param schema_object_spec: specification for a swagger object
:type schema_object_spec: dict
:return: True if the spec has been "marked" as a model type, false
otherwise.
"""
deref = swagger_spec.deref
schema_object_spec = deref(schema_object_spec)
return deref(schema_object_spec.get(MODEL_MARKER)) is not None
def create_model_docstring(swagger_spec, model_spec):
"""
:type swagger_spec: :class:`bravado_core.spec.Spec`
:param model_spec: specification for a model in dict form
:rtype: string or unicode
"""
deref = swagger_spec.deref
model_spec = deref(model_spec)
s = 'Attributes:\n\n\t'
attr_iter = iter(sorted(iteritems(model_spec['properties'])))
# TODO: Add more stuff available in the spec - 'required', 'example', etc
for attr_name, attr_spec in attr_iter:
attr_spec = deref(attr_spec)
schema_type = deref(attr_spec['type'])
if schema_type in SWAGGER_PRIMITIVES:
# TODO: update to python types and take 'format' into account
attr_type = schema_type
elif schema_type == 'array':
array_spec = deref(attr_spec['items'])
if is_model(swagger_spec, array_spec):
array_type = deref(array_spec[MODEL_MARKER])
else:
array_type = deref(array_spec['type'])
attr_type = u'list of {0}'.format(array_type)
elif is_model(swagger_spec, attr_spec):
attr_type = deref(attr_spec[MODEL_MARKER])
elif schema_type == 'object':
attr_type = 'dict'
s += u'{0}: {1}'.format(attr_name, attr_type)
if deref(attr_spec.get('description')):
s += u' - {0}'.format(deref(attr_spec['description']))
s += '\n\t'
return s
|
MphasisWyde/eWamSublimeAdaptor
|
src/third-party/bravado_core/model.py
|
Python
|
mit
| 8,303
|
"""
Neighbours was moved into theano.tensor.nnet.neighbours.
This file was created for compatibility.
"""
from theano.tensor.nnet.neighbours import (images2neibs, neibs2images,
Images2Neibs)
__all__ = ["images2neibs", "neibs2images", "Images2Neibs"]
|
valexandersaulys/airbnb_kaggle_contest
|
venv/lib/python3.4/site-packages/Theano-0.7.0-py3.4.egg/theano/sandbox/neighbours.py
|
Python
|
gpl-2.0
| 294
|
# Volatility
# Copyright (C) 2007,2008 Volatile Systems
#
# Volatools Basic
# Copyright (C) 2007 Komoku, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""
@author: AAron Walters
@license: GNU General Public License 2.0 or later
@contact: awalters@volatilesystems.com
@organization: Volatile Systems
"""
from forensics.object import *
from forensics.object2 import NewObject
from forensics.win32.datetime import *
#from forensics.win32.info import *
from forensics.win32.info import find_psactiveprocesshead, kpcr_addr
import os
from struct import unpack
from forensics.addrspace import *
def pslist(addr_space, profile):
""" A Generator for _EPROCESS objects (uses _KPCR symbols) """
## Locate the kpcr struct - this is hard coded right now
kpcr = NewObject("_KPCR", kpcr_addr, addr_space, profile=profile)
## Try to dereference the KdVersionBlock as a 64 bit struct
DebuggerDataList = kpcr.KdVersionBlock.dereference_as("_DBGKD_GET_VERSION64").DebuggerDataList
PsActiveProcessHead = DebuggerDataList.dereference_as("_KDDEBUGGER_DATA64"
).PsActiveProcessHead \
or DebuggerDataList.dereference_as("_KDDEBUGGER_DATA32"
).PsActiveProcessHead \
or kpcr.KdVersionBlock.dereference_as("_KDDEBUGGER_DATA32"
).PsActiveProcessHead
if PsActiveProcessHead:
print type(PsActiveProcessHead)
## Try to iterate over the process list in PsActiveProcessHead
## (its really a pointer to a _LIST_ENTRY)
for l in PsActiveProcessHead.dereference_as("_LIST_ENTRY").list_of_type(
"_EPROCESS", "ActiveProcessLinks"):
yield l
else:
raise RuntimeError("Unable to find PsActiveProcessHead - is this image supported?")
def process_list(addr_space, types, symbol_table=None):
"""
Get the virtual addresses of all Windows processes
"""
plist = []
PsActiveProcessHead = find_psactiveprocesshead(addr_space,types)
if not PsActiveProcessHead is None:
(offset, tmp) = get_obj_offset(types, ['_EPROCESS', 'ActiveProcessLinks'])
first_process = PsActiveProcessHead - offset
current = read_obj(addr_space, types, ['_EPROCESS', 'ActiveProcessLinks', 'Flink'],
first_process)
if current is None:
print "Unable to read beginning of process list 0x%x. Try a different DTB?" % (first_process)
return plist
this_process = current - offset
while current != PsActiveProcessHead:
Type = read_obj(addr_space, types, ['_EPROCESS', 'Pcb', 'Header','Type'], this_process)
if not Type == 0x03:
break
plist.append(this_process)
current = read_obj(addr_space, types, ['_EPROCESS', 'ActiveProcessLinks', 'Flink'],
this_process)
if current == None:
plist.append(this_process)
break
this_process = current - offset
return plist
def process_find_pid(addr_space, types, symbol_table, all_tasks, pid):
"""
Find process offset with this pid in the task list
"""
match_tasks = []
for task in all_tasks:
process_id = process_pid(addr_space, types, task)
if process_id == pid:
match_tasks.append(task)
return match_tasks
# Blocksize was chosen to make it aligned
# on 8 bytes
# Optimized by Michael Cohen
BLOCKSIZE = 1024 * 1024 * 10
def find_dtb(addr_space, types):
"""
Find the Idle dtb (DTB Feeling lucky)
"""
try:
flat_address_space = FileAddressSpace(addr_space.name,fast=True)
except Exception,e:
op.error("Unable to open image file %s" % (filename))
offset = 0
while 1:
data = flat_address_space.fread(BLOCKSIZE)
found = 0
if not data:
break
while 1:
found = data.find("\x03\x00\x1b\x00", found+1)
if found >= 0:
(type,size) = unpack('=HH',data[found:found+4])
if process_imagename(addr_space,types,offset+found).find('Idle') != -1:
return process_dtb(addr_space, types, offset+found)
else:
break
offset+=len(data)
return None
def process_imagename(addr_space, types, task_vaddr):
return read_null_string(addr_space, types,
['_EPROCESS', 'ImageFileName'], task_vaddr)
def process_dtb(addr_space, types, task_vaddr):
return read_obj(addr_space, types,
['_EPROCESS', 'Pcb', 'DirectoryTableBase', 0], task_vaddr)
def process_vadroot(addr_space, types, task_vaddr):
return read_obj(addr_space, types,
['_EPROCESS', 'VadRoot'], task_vaddr)
def process_pid(addr_space, types, task_vaddr):
return read_obj(addr_space, types,
['_EPROCESS', 'UniqueProcessId'], task_vaddr)
def process_num_active_threads(addr_space, types, task_vaddr):
return read_obj(addr_space, types,
['_EPROCESS', 'ActiveThreads'], task_vaddr)
def process_exit_status(addr_space, types, task_vaddr):
return read_obj(addr_space, types,
['_EPROCESS', 'ExitStatus'], task_vaddr)
def process_inherited_from(addr_space, types, task_vaddr):
return read_obj(addr_space, types,
['_EPROCESS', 'InheritedFromUniqueProcessId'], task_vaddr)
def process_handle_table(addr_space, types, task_vaddr):
return read_obj(addr_space, types,
['_EPROCESS', 'ObjectTable'], task_vaddr)
def process_handle_count(addr_space, types, task_vaddr):
object_table = read_obj(addr_space, types,
['_EPROCESS', 'ObjectTable'], task_vaddr)
if object_table is None or not addr_space.is_valid_address(object_table):
return None
else:
try:
handle_count = read_obj(addr_space, types,
['_HANDLE_TABLE', 'HandleCount'], object_table)
except: return None
return handle_count
def process_create_time(addr_space, types, task_vaddr):
(create_time_offset, tmp) = get_obj_offset(types, ['_EPROCESS', 'CreateTime'])
create_time = read_time(addr_space, types, task_vaddr + create_time_offset)
if create_time is None:
return None
create_time = windows_to_unix_time(create_time)
return create_time
def process_exit_time(addr_space, types, task_vaddr):
(exit_time_offset, tmp) = get_obj_offset(types, ['_EPROCESS', 'ExitTime'])
exit_time = read_time(addr_space, types, task_vaddr + exit_time_offset)
if exit_time is None:
return None
exit_time = windows_to_unix_time(exit_time)
return exit_time
def process_addr_space(kaddr_space, types, task_vaddr, fname):
directory_table_base = read_obj(kaddr_space, types,
['_EPROCESS', 'Pcb', 'DirectoryTableBase', 0], task_vaddr)
try:
#process_address_space = type(kaddr_space)(fname, directory_table_base)
process_address_space = kaddr_space.__class__(kaddr_space.base, directory_table_base)
except:
return None
return process_address_space
def process_peb(addr_space, types, task_vaddr):
return read_obj(addr_space, types,
['_EPROCESS', 'Peb'], task_vaddr)
def process_threadlisthead(addr_space, types, task_vaddr):
return read_obj(addr_space, types,
['_EPROCESS', 'ThreadListHead', 'Flink'], task_vaddr)
def create_addr_space(kaddr_space, directory_table_base):
try:
process_address_space = kaddr_space.__class__(kaddr_space.base, directory_table_base)
except:
return None
return process_address_space
def process_command_line(process_address_space, types, peb_vaddr):
process_parameters = read_obj(process_address_space, types,
['_PEB', 'ProcessParameters'], peb_vaddr)
if process_parameters is None:
return None
return read_unicode_string(process_address_space, types,
['_RTL_USER_PROCESS_PARAMETERS', 'CommandLine'],
process_parameters)
def peb_number_processors(process_address_space, types, peb_vaddr):
return read_obj(process_address_space, types,
['_PEB', 'NumberOfProcessors'], peb_vaddr)
def peb_csdversion(process_address_space, types, peb_vaddr):
return read_unicode_string(process_address_space, types,
['_PEB', 'CSDVersion'], peb_vaddr)
def module_path(process_address_space, types, module_vaddr):
return read_unicode_string(process_address_space, types,
['_LDR_MODULE', 'FullDllName'], module_vaddr)
def module_size(process_address_space, types, module_vaddr):
return read_obj(process_address_space, types,
['_LDR_MODULE', 'SizeOfImage'], module_vaddr)
def module_base(process_address_space, types, module_vaddr):
return read_obj(process_address_space, types,
['_LDR_MODULE', 'BaseAddress'], module_vaddr)
def process_ldrs(process_address_space, types, peb_vaddr):
ldr = read_obj(process_address_space, types,
['_PEB', 'Ldr'], peb_vaddr)
module_list = []
if ldr is None:
print "Unable to read ldr for peb 0x%x" % (peb_vaddr)
return module_list
first_module = read_obj(process_address_space, types,
['_PEB_LDR_DATA', 'InLoadOrderModuleList', 'Flink'],
ldr)
if first_module is None:
print "Unable to read first module for ldr 0x%x" % (ldr)
return module_list
this_module = first_module
next_module = read_obj(process_address_space, types,
['_LDR_MODULE', 'InLoadOrderModuleList', 'Flink'],
this_module)
if next_module is None:
print "ModuleList Truncated, unable to read module at 0x%x\n" % (this_module)
return module_list
while next_module != first_module:
module_list.append(this_module)
if not process_address_space.is_valid_address(next_module):
print "ModuleList Truncated, unable to read module at 0x%x\n" % (next_module)
return module_list
prev_module = this_module
this_module = next_module
next_module = read_obj(process_address_space, types,
['_LDR_MODULE', 'InLoadOrderModuleList', 'Flink'],
this_module)
return module_list
def find_csdversion(addr_space, types):
CSDVersionDict = dict()
all_tasks = process_list(addr_space, types)
for task in all_tasks:
if not addr_space.is_valid_address(task):
continue
process_address_space = process_addr_space(addr_space, types, task, addr_space.base.fname)
if process_address_space is None:
continue
peb = process_peb(addr_space, types, task)
try:
if not process_address_space.is_valid_address(peb):
continue
except:
continue
CSDVersion = peb_csdversion(process_address_space, types, peb)
if CSDVersion in CSDVersionDict:
CSDVersionDict[CSDVersion] +=1
else:
CSDVersionDict[CSDVersion] = 1
MaxCSDVersion = max([ (CSDVersionDict[x],x) for x in CSDVersionDict])[1]
return MaxCSDVersion
|
backupManager/pyflag
|
src/plugins_old/MemoryForensics/Volatility-1.3_Linux_rc.1/forensics/win32/tasks.py
|
Python
|
gpl-2.0
| 12,448
|
"idlelib.filelist"
import os
from tkinter import messagebox as tkMessageBox
class FileList:
# N.B. this import overridden in PyShellFileList.
from idlelib.editor import EditorWindow
def __init__(self, root):
self.root = root
self.dict = {}
self.inversedict = {}
self.vars = {} # For EditorWindow.getrawvar (shared Tcl variables)
def open(self, filename, action=None):
assert filename
filename = self.canonize(filename)
if os.path.isdir(filename):
# This can happen when bad filename is passed on command line:
tkMessageBox.showerror(
"File Error",
"%r is a directory." % (filename,),
master=self.root)
return None
key = os.path.normcase(filename)
if key in self.dict:
edit = self.dict[key]
edit.top.wakeup()
return edit
if action:
# Don't create window, perform 'action', e.g. open in same window
return action(filename)
else:
edit = self.EditorWindow(self, filename, key)
if edit.good_load:
return edit
else:
edit._close()
return None
def gotofileline(self, filename, lineno=None):
edit = self.open(filename)
if edit is not None and lineno is not None:
edit.gotoline(lineno)
def new(self, filename=None):
return self.EditorWindow(self, filename)
def close_all_callback(self, *args, **kwds):
for edit in list(self.inversedict):
reply = edit.close()
if reply == "cancel":
break
return "break"
def unregister_maybe_terminate(self, edit):
try:
key = self.inversedict[edit]
except KeyError:
print("Don't know this EditorWindow object. (close)")
return
if key:
del self.dict[key]
del self.inversedict[edit]
if not self.inversedict:
self.root.quit()
def filename_changed_edit(self, edit):
edit.saved_change_hook()
try:
key = self.inversedict[edit]
except KeyError:
print("Don't know this EditorWindow object. (rename)")
return
filename = edit.io.filename
if not filename:
if key:
del self.dict[key]
self.inversedict[edit] = None
return
filename = self.canonize(filename)
newkey = os.path.normcase(filename)
if newkey == key:
return
if newkey in self.dict:
conflict = self.dict[newkey]
self.inversedict[conflict] = None
tkMessageBox.showerror(
"Name Conflict",
"You now have multiple edit windows open for %r" % (filename,),
master=self.root)
self.dict[newkey] = edit
self.inversedict[edit] = newkey
if key:
try:
del self.dict[key]
except KeyError:
pass
def canonize(self, filename):
if not os.path.isabs(filename):
try:
pwd = os.getcwd()
except OSError:
pass
else:
filename = os.path.join(pwd, filename)
return os.path.normpath(filename)
def _test(): # TODO check and convert to htest
from tkinter import Tk
from idlelib.editor import fixwordbreaks
from idlelib.run import fix_scaling
root = Tk()
fix_scaling(root)
fixwordbreaks(root)
root.withdraw()
flist = FileList(root)
flist.new()
if flist.inversedict:
root.mainloop()
if __name__ == '__main__':
from unittest import main
main('idlelib.idle_test.test_filelist', verbosity=2)
# _test()
|
FFMG/myoddweb.piger
|
monitor/api/python/Python-3.7.2/Lib/idlelib/filelist.py
|
Python
|
gpl-2.0
| 3,896
|
#!/usr/bin/env python
import sys
import os
import re
import urlparse
def usage():
message = """ usage: {program} inDir outDir
inDir: directory containing .ht files
outDir: target for the new files"""
print(message.format(program = os.path.basename(sys.argv[0])))
def parseFile(filename):
file = open(filename, "r")
data = file.readlines()
data = [line.rstrip('\n') for line in data]
pairs = {}
regEx = re.compile("^(\S+)\s(\S+)\s(\S+)\s((?:\s*\S*)+)$")
old_line = None
for line in data:
if len(line) > 0:
if(old_line != None):
print filename
#print("failed to parse line")
#print(old_line)
line = old_line + line
print line
old_line = None
split_line = regEx.split(line)
#print(split_line)
#print(urlparse.unquote(split_line[2]))
#print(split_line[4])
if(old_line == None and split_line[4] == "" and split_line[3] != "0"):
print(line)
print(split_line)
old_line = line
else:
pairs[urlparse.unquote(split_line[2])] = split_line[4]
assert(len(split_line) == 6)
#print data
#print(pairs)
return pairs
def parseFiles(dir):
strings = []
for files in os.listdir(dir):
if files.endswith(".ht"):
string = parseFile(os.path.join(dir,files))
print(files)
#print string
strings.append([files, string])
return strings
def extractSharedEntries(strings):
first_dict = strings[0][1]
shared_dict = {}
#print(first_dict)
for key, value in first_dict.iteritems():
# check that the entry in the same in all dics
is_in_all_dicts = True
for dict_file_pair in strings:
dict = dict_file_pair[1]
if not dict.has_key(key):
is_in_all_dicts = False
elif not dict[key] == value:
print("Element with different values")
print(key)
is_in_all_dicts = False
if is_in_all_dicts:
shared_dict[key] = value
#print(shared_dict)
for dict_file_pair in strings:
for key in shared_dict.iterkeys():
dict_file_pair[1].pop(key)
strings.append(["shared.ht", shared_dict])
return strings
def writeOutFiles(dir, strings):
for string in strings:
file_name_base = string[0]
file_name_base = file_name_base.replace(".ht", ".properties")
file_name = os.path.join(dir, file_name_base)
file = open(file_name, "w")
for key, value in string[1].iteritems():
try:
file.write(key)
file.write("=")
file.write(value)
file.write("\n")
except UnicodeDecodeError:
print key
print value
file.close()
def main (args):
if(len(args) != 3):
usage()
sys.exit(1)
strings = parseFiles(args[1])
new_strings = extractSharedEntries(strings)
writeOutFiles(args[2], new_strings)
if __name__ == "__main__":
main(sys.argv)
|
jvanz/core
|
bin/extract-tooltip.py
|
Python
|
gpl-3.0
| 3,242
|
# Copyright (c) 2014 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Arvind Somya (asomya@cisco.com), Cisco Systems
import mock
from neutron.services.l3_router import l3_apic
from neutron.tests import base
TENANT = 'tenant1'
TENANT_CONTRACT = 'abcd'
ROUTER = 'router1'
SUBNET = 'subnet1'
NETWORK = 'network1'
NETWORK_NAME = 'one_network'
NETWORK_EPG = 'one_network-epg'
TEST_SEGMENT1 = 'test-segment1'
SUBNET_GATEWAY = '10.3.2.1'
SUBNET_CIDR = '10.3.1.0/24'
SUBNET_NETMASK = '24'
class FakeContext(object):
def __init__(self):
self.tenant_id = None
class FakeContract(object):
def __init__(self):
self.contract_id = '123'
class FakeEpg(object):
def __init__(self):
self.epg_id = 'abcd_epg'
class FakePort(object):
def __init__(self):
self.id = 'Fake_port_id'
self.network_id = NETWORK
self.subnet_id = SUBNET
class TestCiscoApicL3Plugin(base.BaseTestCase):
def setUp(self):
super(TestCiscoApicL3Plugin, self).setUp()
mock.patch('neutron.plugins.ml2.drivers.cisco.apic.apic_manager.'
'APICManager').start()
self.plugin = l3_apic.ApicL3ServicePlugin()
self.context = FakeContext()
self.context.tenant_id = TENANT
self.interface_info = {'subnet_id': SUBNET, 'network_id': NETWORK,
'name': NETWORK_NAME}
self.contract = FakeContract()
self.plugin.manager.create_tenant_contract = mock.Mock()
ctmk = mock.PropertyMock(return_value=self.contract.contract_id)
type(self.plugin.manager.create_tenant_contract).contract_id = ctmk
self.epg = FakeEpg()
self.plugin.manager.ensure_epg_created_for_network = mock.Mock()
epmk = mock.PropertyMock(return_value=self.epg.epg_id)
type(self.plugin.manager.ensure_epg_created_for_network).epg_id = epmk
self.plugin.manager.db.get_provider_contract = mock.Mock(
return_value=None)
self.plugin.manager.set_contract_for_epg = mock.Mock(
return_value=True)
self.plugin.get_subnet = mock.Mock(return_value=self.interface_info)
self.plugin.get_network = mock.Mock(return_value=self.interface_info)
mock.patch('neutron.db.l3_gwmode_db.L3_NAT_db_mixin.'
'_core_plugin').start()
mock.patch('neutron.db.l3_gwmode_db.L3_NAT_db_mixin.'
'add_router_interface').start()
mock.patch('neutron.db.l3_gwmode_db.L3_NAT_db_mixin.'
'remove_router_interface').start()
mock.patch('neutron.openstack.common.excutils.'
'save_and_reraise_exception').start()
def test_add_router_interface(self):
mgr = self.plugin.manager
self.plugin.add_router_interface(self.context, ROUTER,
self.interface_info)
mgr.create_tenant_contract.assert_called_once_with(TENANT)
mgr.create_tenant_contract.assertEqual(TENANT_CONTRACT)
mgr.ensure_epg_created_for_network.assert_called_once_with(
TENANT, NETWORK, NETWORK_NAME)
mgr.ensure_epg_created_for_network.assertEqual(NETWORK_EPG)
mgr.db.get_provider_contract.assert_called_once()
mgr.db.get_provider_contract.assertEqual(None)
mgr.set_contract_for_epg.assert_called_once()
def test_remove_router_interface(self):
mgr = self.plugin.manager
self.plugin.remove_router_interface(self.context, ROUTER,
self.interface_info)
mgr.create_tenant_contract.assert_called_once_with(TENANT)
mgr.ensure_epg_created_for_network.assert_called_once_with(
TENANT, NETWORK, NETWORK_NAME)
mgr.ensure_epg_created_for_network.assertEqual(NETWORK_EPG)
mgr.delete_contract_for_epg.assert_called_once()
def test_add_router_interface_fail_contract_delete(self):
mgr = self.plugin.manager
with mock.patch('neutron.db.l3_gwmode_db.L3_NAT_db_mixin.'
'add_router_interface',
side_effect=KeyError()):
self.plugin.add_router_interface(self.context, ROUTER,
self.interface_info)
mgr.delete_contract_for_epg.assert_called_once()
def test_delete_router_interface_fail_contract_create(self):
mgr = self.plugin.manager
with mock.patch('neutron.db.l3_gwmode_db.L3_NAT_db_mixin.'
'remove_router_interface',
side_effect=KeyError()):
self.plugin.remove_router_interface(self.context, ROUTER,
self.interface_info)
mgr.set_contract_for_epg.assert_called_once()
|
subramani95/neutron
|
neutron/tests/unit/services/l3_router/test_l3_apic_plugin.py
|
Python
|
apache-2.0
| 5,358
|
"""Support for retrieving status info from Google Wifi/OnHub routers."""
import logging
from datetime import timedelta
import voluptuous as vol
import requests
from homeassistant.util import dt
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME, CONF_HOST, CONF_MONITORED_CONDITIONS, STATE_UNKNOWN)
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
ATTR_CURRENT_VERSION = 'current_version'
ATTR_LAST_RESTART = 'last_restart'
ATTR_LOCAL_IP = 'local_ip'
ATTR_NEW_VERSION = 'new_version'
ATTR_STATUS = 'status'
ATTR_UPTIME = 'uptime'
DEFAULT_HOST = 'testwifi.here'
DEFAULT_NAME = 'google_wifi'
ENDPOINT = '/api/v1/status'
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=1)
MONITORED_CONDITIONS = {
ATTR_CURRENT_VERSION: [
['software', 'softwareVersion'],
None,
'mdi:checkbox-marked-circle-outline'
],
ATTR_NEW_VERSION: [
['software', 'updateNewVersion'],
None,
'mdi:update'
],
ATTR_UPTIME: [
['system', 'uptime'],
'days',
'mdi:timelapse'
],
ATTR_LAST_RESTART: [
['system', 'uptime'],
None,
'mdi:restart'
],
ATTR_LOCAL_IP: [
['wan', 'localIpAddress'],
None,
'mdi:access-point-network'
],
ATTR_STATUS: [
['wan', 'online'],
None,
'mdi:google'
]
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS,
default=list(MONITORED_CONDITIONS)):
vol.All(cv.ensure_list, [vol.In(MONITORED_CONDITIONS)]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Google Wifi sensor."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
conditions = config.get(CONF_MONITORED_CONDITIONS)
api = GoogleWifiAPI(host, conditions)
dev = []
for condition in conditions:
dev.append(GoogleWifiSensor(api, name, condition))
add_entities(dev, True)
class GoogleWifiSensor(Entity):
"""Representation of a Google Wifi sensor."""
def __init__(self, api, name, variable):
"""Initialize a Google Wifi sensor."""
self._api = api
self._name = name
self._state = None
variable_info = MONITORED_CONDITIONS[variable]
self._var_name = variable
self._var_units = variable_info[1]
self._var_icon = variable_info[2]
@property
def name(self):
"""Return the name of the sensor."""
return '{}_{}'.format(self._name, self._var_name)
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._var_icon
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._var_units
@property
def available(self):
"""Return availability of Google Wifi API."""
return self._api.available
@property
def state(self):
"""Return the state of the device."""
return self._state
def update(self):
"""Get the latest data from the Google Wifi API."""
self._api.update()
if self.available:
self._state = self._api.data[self._var_name]
else:
self._state = None
class GoogleWifiAPI:
"""Get the latest data and update the states."""
def __init__(self, host, conditions):
"""Initialize the data object."""
uri = 'http://'
resource = "{}{}{}".format(uri, host, ENDPOINT)
self._request = requests.Request('GET', resource).prepare()
self.raw_data = None
self.conditions = conditions
self.data = {
ATTR_CURRENT_VERSION: STATE_UNKNOWN,
ATTR_NEW_VERSION: STATE_UNKNOWN,
ATTR_UPTIME: STATE_UNKNOWN,
ATTR_LAST_RESTART: STATE_UNKNOWN,
ATTR_LOCAL_IP: STATE_UNKNOWN,
ATTR_STATUS: STATE_UNKNOWN
}
self.available = True
self.update()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from the router."""
try:
with requests.Session() as sess:
response = sess.send(self._request, timeout=10)
self.raw_data = response.json()
self.data_format()
self.available = True
except (ValueError, requests.exceptions.ConnectionError):
_LOGGER.warning("Unable to fetch data from Google Wifi")
self.available = False
self.raw_data = None
def data_format(self):
"""Format raw data into easily accessible dict."""
for attr_key in self.conditions:
value = MONITORED_CONDITIONS[attr_key]
try:
primary_key = value[0][0]
sensor_key = value[0][1]
if primary_key in self.raw_data:
sensor_value = self.raw_data[primary_key][sensor_key]
# Format sensor for better readability
if (attr_key == ATTR_NEW_VERSION and
sensor_value == '0.0.0.0'):
sensor_value = 'Latest'
elif attr_key == ATTR_UPTIME:
sensor_value = round(sensor_value / (3600 * 24), 2)
elif attr_key == ATTR_LAST_RESTART:
last_restart = (
dt.now() - timedelta(seconds=sensor_value))
sensor_value = last_restart.strftime(
'%Y-%m-%d %H:%M:%S')
elif attr_key == ATTR_STATUS:
if sensor_value:
sensor_value = 'Online'
else:
sensor_value = 'Offline'
elif attr_key == ATTR_LOCAL_IP:
if not self.raw_data['wan']['online']:
sensor_value = STATE_UNKNOWN
self.data[attr_key] = sensor_value
except KeyError:
_LOGGER.error("Router does not support %s field. "
"Please remove %s from monitored_conditions",
sensor_key, attr_key)
self.data[attr_key] = STATE_UNKNOWN
|
MartinHjelmare/home-assistant
|
homeassistant/components/google_wifi/sensor.py
|
Python
|
apache-2.0
| 6,588
|
import pytest
from api.base.settings.defaults import API_BASE
from framework.auth.core import Auth
from rest_framework import exceptions
from osf_tests.factories import (
ProjectFactory,
AuthUserFactory,
)
@pytest.fixture()
def admin_contributor():
return AuthUserFactory()
@pytest.fixture()
def write_contrib():
return AuthUserFactory()
@pytest.fixture()
def read_contrib():
return AuthUserFactory()
@pytest.fixture()
def non_contrib():
return AuthUserFactory()
@pytest.fixture()
def disabled_contrib():
# disabled in the disable_user fixture so that they can be added as a
# contributor first
return AuthUserFactory()
@pytest.fixture()
def public_project(admin_contributor):
return ProjectFactory(creator=admin_contributor, is_public=True)
@pytest.fixture()
def private_project(
admin_contributor, write_contrib,
read_contrib, disabled_contrib):
private_project = ProjectFactory(creator=admin_contributor)
private_project.add_contributor(
write_contrib,
permissions=['read', 'write'],
auth=Auth(admin_contributor))
private_project.add_contributor(
read_contrib,
permissions=['read'],
auth=Auth(admin_contributor))
private_project.add_contributor(
disabled_contrib,
permissions=['read'],
auth=Auth(admin_contributor))
private_project.save()
return private_project
@pytest.fixture()
def disable_user(disabled_contrib, private_project):
# pass private_project so that account is disabled after private_project
# is setup
disabled_contrib.disable_account()
disabled_contrib.is_registered = False
disabled_contrib.save()
@pytest.mark.django_db
class NodeCitationsMixin:
def test_node_citations(
self, app, admin_contributor,
write_contrib, read_contrib,
non_contrib, private_url, public_url
):
# test_admin_can_view_private_project_citations
res = app.get(private_url, auth=admin_contributor.auth)
assert res.status_code == 200
# test_write_contrib_can_view_private_project_citations
res = app.get(private_url, auth=write_contrib.auth)
assert res.status_code == 200
# test_read_contrib_can_view_private_project_citations
res = app.get(private_url, auth=read_contrib.auth)
assert res.status_code == 200
# test_non_contrib_cannot_view_private_project_citations
res = app.get(private_url, auth=non_contrib.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# test_unauthenticated_cannot_view_private_project_citations
res = app.get(private_url, expect_errors=True)
assert res.status_code == 401
assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail
# test_unauthenticated_can_view_public_project_citations
res = app.get(public_url)
assert res.status_code == 200
# test_citations_are_read_only
post_res = app.post_json_api(
public_url, {},
auth=admin_contributor.auth,
expect_errors=True)
assert post_res.status_code == 405
put_res = app.put_json_api(
public_url, {},
auth=admin_contributor.auth,
expect_errors=True)
assert put_res.status_code == 405
delete_res = app.delete_json_api(
public_url,
auth=admin_contributor.auth,
expect_errors=True)
assert delete_res.status_code == 405
class TestNodeCitations(NodeCitationsMixin):
@pytest.fixture()
def public_url(self, public_project):
return '/{}nodes/{}/citation/'.format(API_BASE, public_project._id)
@pytest.fixture()
def private_url(self, private_project):
return '/{}nodes/{}/citation/'.format(API_BASE, private_project._id)
class TestNodeCitationsStyle(NodeCitationsMixin):
@pytest.fixture()
def public_url(self, public_project):
return '/{}nodes/{}/citation/apa/'.format(API_BASE, public_project._id)
@pytest.fixture()
def private_url(self, private_project):
return '/{}nodes/{}/citation/apa/'.format(
API_BASE, private_project._id)
|
erinspace/osf.io
|
api_tests/nodes/views/test_node_citations.py
|
Python
|
apache-2.0
| 4,342
|
# -*- coding: utf-8 -*-
import unittest
import re
from rdflib import ConjunctiveGraph, URIRef, Literal
from rdflib.util import from_n3
HOST = 'http://localhost:3031'
DB = '/db/'
# this assumes SPARQL1.1 query/update endpoints running locally at
# http://localhost:3031/db/
#
# The ConjunctiveGraph tests below require that the SPARQL endpoint renders its
# default graph as the union of all known graphs! This is incompatible with the
# endpoint behavior required by our Dataset tests in test_dataset.py, so you
# need to run a second SPARQL endpoint on a non standard port,
# e.g. fuseki started with:
# ./fuseki-server --port 3031 --memTDB --update --set tdb:unionDefaultGraph=true /db
# THIS WILL DELETE ALL DATA IN THE /db dataset
michel = URIRef(u'urn:michel')
tarek = URIRef(u'urn:tarek')
bob = URIRef(u'urn:bob')
likes = URIRef(u'urn:likes')
hates = URIRef(u'urn:hates')
pizza = URIRef(u'urn:pizza')
cheese = URIRef(u'urn:cheese')
graphuri = URIRef('urn:graph')
othergraphuri = URIRef('urn:othergraph')
class TestSparql11(unittest.TestCase):
def setUp(self):
self.longMessage = True
self.graph = ConjunctiveGraph('SPARQLUpdateStore')
root = HOST + DB
self.graph.open((root + "sparql", root + "update"))
# clean out the store
for c in self.graph.contexts():
c.remove((None, None, None))
assert len(c) == 0
def tearDown(self):
self.graph.close()
def testSimpleGraph(self):
g = self.graph.get_context(graphuri)
g.add((tarek, likes, pizza))
g.add((bob, likes, pizza))
g.add((bob, likes, cheese))
g2 = self.graph.get_context(othergraphuri)
g2.add((michel, likes, pizza))
self.assertEquals(3, len(g), 'graph contains 3 triples')
self.assertEquals(1, len(g2), 'other graph contains 1 triple')
r = g.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }")
self.assertEquals(2, len(list(r)), "two people like pizza")
r = g.triples((None, likes, pizza))
self.assertEquals(2, len(list(r)), "two people like pizza")
# Test initBindings
r = g.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }",
initBindings={'s': tarek})
self.assertEquals(1, len(list(r)), "i was asking only about tarek")
r = g.triples((tarek, likes, pizza))
self.assertEquals(1, len(list(r)), "i was asking only about tarek")
r = g.triples((tarek, likes, cheese))
self.assertEquals(0, len(list(r)), "tarek doesn't like cheese")
g2.add((tarek, likes, pizza))
g.remove((tarek, likes, pizza))
r = g.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }")
self.assertEquals(1, len(list(r)), "only bob likes pizza")
def testConjunctiveDefault(self):
g = self.graph.get_context(graphuri)
g.add((tarek, likes, pizza))
g2 = self.graph.get_context(othergraphuri)
g2.add((bob, likes, pizza))
g.add((tarek, hates, cheese))
self.assertEquals(2, len(g), 'graph contains 2 triples')
# the following are actually bad tests as they depend on your endpoint,
# as pointed out in the sparqlstore.py code:
#
## For ConjunctiveGraphs, reading is done from the "default graph" Exactly
## what this means depends on your endpoint, because SPARQL does not offer a
## simple way to query the union of all graphs as it would be expected for a
## ConjuntiveGraph.
##
## Fuseki/TDB has a flag for specifying that the default graph
## is the union of all graphs (tdb:unionDefaultGraph in the Fuseki config).
self.assertEquals(3, len(self.graph),
'default union graph should contain three triples but contains:\n'
'%s' % list(self.graph))
r = self.graph.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }")
self.assertEquals(2, len(list(r)), "two people like pizza")
r = self.graph.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }",
initBindings={'s': tarek})
self.assertEquals(1, len(list(r)), "i was asking only about tarek")
r = self.graph.triples((tarek, likes, pizza))
self.assertEquals(1, len(list(r)), "i was asking only about tarek")
r = self.graph.triples((tarek, likes, cheese))
self.assertEquals(0, len(list(r)), "tarek doesn't like cheese")
g2.remove((bob, likes, pizza))
r = self.graph.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }")
self.assertEquals(1, len(list(r)), "only tarek likes pizza")
def testUpdate(self):
self.graph.update("INSERT DATA { GRAPH <urn:graph> { <urn:michel> <urn:likes> <urn:pizza> . } }")
g = self.graph.get_context(graphuri)
self.assertEquals(1, len(g), 'graph contains 1 triples')
def testUpdateWithInitNs(self):
self.graph.update(
"INSERT DATA { GRAPH ns:graph { ns:michel ns:likes ns:pizza . } }",
initNs={'ns': URIRef('urn:')}
)
g = self.graph.get_context(graphuri)
self.assertEquals(
set(g.triples((None,None,None))),
set([(michel,likes,pizza)]),
'only michel likes pizza'
)
def testUpdateWithInitBindings(self):
self.graph.update(
"INSERT { GRAPH <urn:graph> { ?a ?b ?c . } } WherE { }",
initBindings={
'a': URIRef('urn:michel'),
'b': URIRef('urn:likes'),
'c': URIRef('urn:pizza'),
}
)
g = self.graph.get_context(graphuri)
self.assertEquals(
set(g.triples((None,None,None))),
set([(michel,likes,pizza)]),
'only michel likes pizza'
)
def testMultipleUpdateWithInitBindings(self):
self.graph.update(
"INSERT { GRAPH <urn:graph> { ?a ?b ?c . } } WHERE { };"
"INSERT { GRAPH <urn:graph> { ?d ?b ?c . } } WHERE { }",
initBindings={
'a': URIRef('urn:michel'),
'b': URIRef('urn:likes'),
'c': URIRef('urn:pizza'),
'd': URIRef('urn:bob'),
}
)
g = self.graph.get_context(graphuri)
self.assertEquals(
set(g.triples((None,None,None))),
set([(michel,likes,pizza), (bob,likes,pizza)]),
'michel and bob like pizza'
)
def testNamedGraphUpdate(self):
g = self.graph.get_context(graphuri)
r1 = "INSERT DATA { <urn:michel> <urn:likes> <urn:pizza> }"
g.update(r1)
self.assertEquals(
set(g.triples((None,None,None))),
set([(michel,likes,pizza)]),
'only michel likes pizza'
)
r2 = "DELETE { <urn:michel> <urn:likes> <urn:pizza> } " + \
"INSERT { <urn:bob> <urn:likes> <urn:pizza> } WHERE {}"
g.update(r2)
self.assertEquals(
set(g.triples((None, None, None))),
set([(bob, likes, pizza)]),
'only bob likes pizza'
)
says = URIRef("urn:says")
# Strings with unbalanced curly braces
tricky_strs = ["With an unbalanced curly brace %s " % brace
for brace in ["{", "}"]]
for tricky_str in tricky_strs:
r3 = """INSERT { ?b <urn:says> "%s" }
WHERE { ?b <urn:likes> <urn:pizza>} """ % tricky_str
g.update(r3)
values = set()
for v in g.objects(bob, says):
values.add(str(v))
self.assertEquals(values, set(tricky_strs))
# Complicated Strings
r4strings = []
r4strings.append(ur'''"1: adfk { ' \\\" \" { "''')
r4strings.append(ur'''"2: adfk } <foo> #éï \\"''')
r4strings.append(ur"""'3: adfk { " \\\' \' { '""")
r4strings.append(ur"""'4: adfk } <foo> #éï \\'""")
r4strings.append(ur'''"""5: adfk { ' \\\" \" { """''')
r4strings.append(ur'''"""6: adfk } <foo> #éï \\"""''')
r4strings.append(u'"""7: ad adsfj \n { \n sadfj"""')
r4strings.append(ur"""'''8: adfk { " \\\' \' { '''""")
r4strings.append(ur"""'''9: adfk } <foo> #éï \\'''""")
r4strings.append(u"'''10: ad adsfj \n { \n sadfj'''")
r4 = "\n".join([
u'INSERT DATA { <urn:michel> <urn:says> %s } ;' % s
for s in r4strings
])
g.update(r4)
values = set()
for v in g.objects(michel, says):
values.add(unicode(v))
self.assertEquals(values, set([re.sub(ur"\\(.)", ur"\1", re.sub(ur"^'''|'''$|^'|'$|" + ur'^"""|"""$|^"|"$', ur"", s)) for s in r4strings]))
# IRI Containing ' or #
# The fragment identifier must not be misinterpreted as a comment
# (commenting out the end of the block).
# The ' must not be interpreted as the start of a string, causing the }
# in the literal to be identified as the end of the block.
r5 = u"""INSERT DATA { <urn:michel> <urn:hates> <urn:foo'bar?baz;a=1&b=2#fragment>, "'}" }"""
g.update(r5)
values = set()
for v in g.objects(michel, hates):
values.add(unicode(v))
self.assertEquals(values, set([u"urn:foo'bar?baz;a=1&b=2#fragment", u"'}"]))
# Comments
r6 = u"""
INSERT DATA {
<urn:bob> <urn:hates> <urn:bob> . # No closing brace: }
<urn:bob> <urn:hates> <urn:michel>.
}
#Final { } comment"""
g.update(r6)
values = set()
for v in g.objects(bob, hates):
values.add(v)
self.assertEquals(values, set([bob, michel]))
def testNamedGraphUpdateWithInitBindings(self):
g = self.graph.get_context(graphuri)
r = "INSERT { ?a ?b ?c } WHERE {}"
g.update(r, initBindings={
'a': michel,
'b': likes,
'c': pizza
})
self.assertEquals(
set(g.triples((None, None, None))),
set([(michel, likes, pizza)]),
'only michel likes pizza'
)
def testEmptyNamedGraph(self):
empty_graph_iri = u"urn:empty-graph-1"
self.graph.update(u"CREATE GRAPH <%s>" % empty_graph_iri)
named_graphs = [unicode(r[0]) for r in self.graph.query(
"SELECT ?name WHERE { GRAPH ?name {} }")]
# Some SPARQL endpoint backends (like TDB) are not able to find empty named graphs
# (at least with this query)
if empty_graph_iri in named_graphs:
self.assertTrue(empty_graph_iri in [unicode(g.identifier)
for g in self.graph.contexts()])
def testEmptyLiteral(self):
# test for https://github.com/RDFLib/rdflib/issues/457
# also see test_issue457.py which is sparql store independent!
g = self.graph.get_context(graphuri)
g.add((
URIRef('http://example.com/s'),
URIRef('http://example.com/p'),
Literal('')))
o = tuple(g)[0][2]
self.assertEquals(o, Literal(''), repr(o))
from nose import SkipTest
import urllib2
try:
assert len(urllib2.urlopen(HOST).read()) > 0
except:
raise SkipTest(HOST + " is unavailable.")
if __name__ == '__main__':
unittest.main()
|
armandobs14/rdflib
|
test/test_sparqlupdatestore.py
|
Python
|
bsd-3-clause
| 11,424
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
# This is an auto-generated file. Do not edit it.
"""
Provides Twisted version information.
"""
from twisted.python import versions
version = versions.Version('twisted.news', 15, 2, 1)
|
bdh1011/wau
|
venv/lib/python2.7/site-packages/twisted/news/_version.py
|
Python
|
mit
| 260
|
"""MiniAEFrame - A minimal AppleEvent Application framework.
There are two classes:
AEServer -- a mixin class offering nice AE handling.
MiniApplication -- a very minimal alternative to FrameWork.py,
only suitable for the simplest of AppleEvent servers.
"""
import sys
import traceback
import MacOS
from Carbon import AE
from Carbon.AppleEvents import *
from Carbon import Evt
from Carbon.Events import *
from Carbon import Menu
from Carbon import Win
from Carbon.Windows import *
from Carbon import Qd
import aetools
import EasyDialogs
kHighLevelEvent = 23 # Not defined anywhere for Python yet?
class MiniApplication:
"""A minimal FrameWork.Application-like class"""
def __init__(self):
self.quitting = 0
# Initialize menu
self.appleid = 1
self.quitid = 2
Menu.ClearMenuBar()
self.applemenu = applemenu = Menu.NewMenu(self.appleid, "\024")
applemenu.AppendMenu("%s;(-" % self.getaboutmenutext())
if MacOS.runtimemodel == 'ppc':
applemenu.AppendResMenu('DRVR')
applemenu.InsertMenu(0)
self.quitmenu = Menu.NewMenu(self.quitid, "File")
self.quitmenu.AppendMenu("Quit")
self.quitmenu.SetItemCmd(1, ord("Q"))
self.quitmenu.InsertMenu(0)
Menu.DrawMenuBar()
def __del__(self):
self.close()
def close(self):
pass
def mainloop(self, mask = everyEvent, timeout = 60*60):
while not self.quitting:
self.dooneevent(mask, timeout)
def _quit(self):
self.quitting = 1
def dooneevent(self, mask = everyEvent, timeout = 60*60):
got, event = Evt.WaitNextEvent(mask, timeout)
if got:
self.lowlevelhandler(event)
def lowlevelhandler(self, event):
what, message, when, where, modifiers = event
h, v = where
if what == kHighLevelEvent:
msg = "High Level Event: %r %r" % (code(message), code(h | (v<<16)))
try:
AE.AEProcessAppleEvent(event)
except AE.Error, err:
print 'AE error: ', err
print 'in', msg
traceback.print_exc()
return
elif what == keyDown:
c = chr(message & charCodeMask)
if modifiers & cmdKey:
if c == '.':
raise KeyboardInterrupt, "Command-period"
if c == 'q':
if hasattr(MacOS, 'OutputSeen'):
MacOS.OutputSeen()
self.quitting = 1
return
elif what == mouseDown:
partcode, window = Win.FindWindow(where)
if partcode == inMenuBar:
result = Menu.MenuSelect(where)
id = (result>>16) & 0xffff # Hi word
item = result & 0xffff # Lo word
if id == self.appleid:
if item == 1:
EasyDialogs.Message(self.getabouttext())
elif item > 1 and hasattr(Menu, 'OpenDeskAcc'):
name = self.applemenu.GetMenuItemText(item)
Menu.OpenDeskAcc(name)
elif id == self.quitid and item == 1:
if hasattr(MacOS, 'OutputSeen'):
MacOS.OutputSeen()
self.quitting = 1
Menu.HiliteMenu(0)
return
# Anything not handled is passed to Python/SIOUX
if hasattr(MacOS, 'HandleEvent'):
MacOS.HandleEvent(event)
else:
print "Unhandled event:", event
def getabouttext(self):
return self.__class__.__name__
def getaboutmenutext(self):
return "About %s\311" % self.__class__.__name__
class AEServer:
def __init__(self):
self.ae_handlers = {}
def installaehandler(self, classe, type, callback):
AE.AEInstallEventHandler(classe, type, self.callback_wrapper)
self.ae_handlers[(classe, type)] = callback
def close(self):
for classe, type in self.ae_handlers.keys():
AE.AERemoveEventHandler(classe, type)
def callback_wrapper(self, _request, _reply):
_parameters, _attributes = aetools.unpackevent(_request)
_class = _attributes['evcl'].type
_type = _attributes['evid'].type
if self.ae_handlers.has_key((_class, _type)):
_function = self.ae_handlers[(_class, _type)]
elif self.ae_handlers.has_key((_class, '****')):
_function = self.ae_handlers[(_class, '****')]
elif self.ae_handlers.has_key(('****', '****')):
_function = self.ae_handlers[('****', '****')]
else:
raise 'Cannot happen: AE callback without handler', (_class, _type)
# XXXX Do key-to-name mapping here
_parameters['_attributes'] = _attributes
_parameters['_class'] = _class
_parameters['_type'] = _type
if _parameters.has_key('----'):
_object = _parameters['----']
del _parameters['----']
# The try/except that used to be here can mask programmer errors.
# Let the program crash, the programmer can always add a **args
# to the formal parameter list.
rv = _function(_object, **_parameters)
else:
#Same try/except comment as above
rv = _function(**_parameters)
if rv == None:
aetools.packevent(_reply, {})
else:
aetools.packevent(_reply, {'----':rv})
def code(x):
"Convert a long int to the 4-character code it really is"
s = ''
for i in range(4):
x, c = divmod(x, 256)
s = chr(c) + s
return s
class _Test(AEServer, MiniApplication):
"""Mini test application, handles required events"""
def __init__(self):
MiniApplication.__init__(self)
AEServer.__init__(self)
self.installaehandler('aevt', 'oapp', self.open_app)
self.installaehandler('aevt', 'quit', self.quit)
self.installaehandler('****', '****', self.other)
self.mainloop()
def quit(self, **args):
self._quit()
def open_app(self, **args):
pass
def other(self, _object=None, _class=None, _type=None, **args):
print 'AppleEvent', (_class, _type), 'for', _object, 'Other args:', args
if __name__ == '__main__':
_Test()
|
xbmc/atv2
|
xbmc/lib/libPython/Python/Lib/plat-mac/MiniAEFrame.py
|
Python
|
gpl-2.0
| 6,454
|
#!/usr/bin/env python
# encoding: utf-8
#
# This file is part of BeRTOS.
#
# Bertos is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# As a special exception, you may use this file as part of a free software
# library without restriction. Specifically, if other files instantiate
# templates or use macros or inline functions from this file, or you compile
# this file and link it with other files to produce an executable, this
# file does not by itself cause the resulting executable to be covered by
# the GNU General Public License. This exception does not however
# invalidate any other reasons why the executable file might be covered by
# the GNU General Public License.
#
# Copyright 2008 Develer S.r.l. (http://www.develer.com/)
#
#
# Author: Lorenzo Berni <duplo@develer.com>
#
import sys
import os
import traceback
from PyQt4.QtCore import *
from PyQt4.QtGui import *
def _excepthook(exc_type, exc_value, exc_traceback):
project_dir = QApplication.instance().project.info("PROJECT_PATH")
if not project_dir:
project_dir = os.getcwd()
file_name = os.path.join(project_dir, "wizard_error.log")
if os.path.exists(file_name):
content = open(file_name, "r").read()
else:
content = ""
if not os.path.exists(os.path.dirname(file_name)):
os.makedirs(os.path.dirname(file_name))
f = open(file_name, "w")
message = "\n".join(traceback.format_exception(exc_type, exc_value, exc_traceback))
f.write(message)
f.write(">"*80 + "\n")
f.write(content)
f.close()
print>>sys.stderr, message
QMessageBox.critical(
None,
"Exception occurred",
"An exception is occurred. Please attach the '%s' file to the support request." %os.path.abspath(file_name),
)
QApplication.instance().quit()
sys.excepthook = _excepthook
|
dereks/bertos
|
wizard/exception_handler.py
|
Python
|
gpl-2.0
| 2,479
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2013 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function, unicode_literals
from guessit.plugins.transformers import Transformer
from guessit.matcher import GuessFinder, found_property, found_guess
from guessit.containers import PropertiesContainer
from guessit.patterns import sep
from guessit.guess import Guess
from guessit.textutils import strip_brackets
class GuessReleaseGroup(Transformer):
def __init__(self):
Transformer.__init__(self, -190)
self.container = PropertiesContainer(canonical_from_pattern=False)
self._allowed_groupname_pattern = '[\w@#€£$&]'
self._forbidden_groupname_lambda = [lambda elt: elt in ['rip', 'by', 'for', 'par', 'pour', 'bonus'],
lambda elt: self._is_number(elt),
]
# If the previous property in this list, the match will be considered as safe
# and group name can contain a separator.
self.previous_safe_properties = ['videoCodec', 'format', 'videoApi', 'audioCodec', 'audioProfile', 'videoProfile', 'audioChannels']
self.container.sep_replace_char = '-'
self.container.canonical_from_pattern = False
self.container.enhance = True
self.container.register_property('releaseGroup', self._allowed_groupname_pattern + '+')
self.container.register_property('releaseGroup', self._allowed_groupname_pattern + '+-' + self._allowed_groupname_pattern + '+')
def supported_properties(self):
return self.container.get_supported_properties()
def _is_number(self, s):
try:
int(s)
return True
except ValueError:
return False
def validate_group_name(self, guess):
val = guess['releaseGroup']
if len(val) >= 2:
if '-' in val:
checked_val = ""
for elt in val.split('-'):
forbidden = False
for forbidden_lambda in self._forbidden_groupname_lambda:
forbidden = forbidden_lambda(elt.lower())
if forbidden:
break
if not forbidden:
if checked_val:
checked_val += '-'
checked_val += elt
else:
break
val = checked_val
if not val:
return False
guess['releaseGroup'] = val
forbidden = False
for forbidden_lambda in self._forbidden_groupname_lambda:
forbidden = forbidden_lambda(val.lower())
if forbidden:
break
if not forbidden:
return True
return False
def is_leaf_previous(self, leaf, node):
if leaf.span[1] <= node.span[0]:
for idx in range(leaf.span[1], node.span[0]):
if not leaf.root.value[idx] in sep:
return False
return True
return False
def guess_release_group(self, string, node=None, options=None):
found = self.container.find_properties(string, node, 'releaseGroup')
guess = self.container.as_guess(found, string, self.validate_group_name, sep_replacement='-')
validated_guess = None
if guess:
explicit_group_node = node.group_node()
if explicit_group_node:
for leaf in explicit_group_node.leaves_containing(self.previous_safe_properties):
if self.is_leaf_previous(leaf, node):
if leaf.root.value[leaf.span[1]] == '-':
guess.metadata().confidence = 1
else:
guess.metadata().confidence = 0.7
validated_guess = guess
if not validated_guess:
# If previous group last leaf is identified as a safe property,
# consider the raw value as a releaseGroup
previous_group_node = node.previous_group_node()
if previous_group_node:
for leaf in previous_group_node.leaves_containing(self.previous_safe_properties):
if self.is_leaf_previous(leaf, node):
guess = Guess({'releaseGroup': node.value}, confidence=1, input=node.value, span=(0, len(node.value)))
if self.validate_group_name(guess):
node.guess = guess
validated_guess = guess
if validated_guess:
# If following group nodes have only one unidentified leaf, it belongs to the release group
next_group_node = node
while True:
next_group_node = next_group_node.next_group_node()
if next_group_node:
leaves = next_group_node.leaves()
if len(leaves) == 1 and not leaves[0].guess:
validated_guess['releaseGroup'] = validated_guess['releaseGroup'] + leaves[0].value
leaves[0].guess = validated_guess
else:
break
else:
break
if validated_guess:
# Strip brackets
validated_guess['releaseGroup'] = strip_brackets(validated_guess['releaseGroup'])
return validated_guess
def process(self, mtree, options=None):
GuessFinder(self.guess_release_group, None, self.log, options).process_nodes(mtree.unidentified_leaves())
|
Hellowlol/PyTunes
|
libs/guessit/transfo/guess_release_group.py
|
Python
|
gpl-3.0
| 6,537
|
#!/usr/bin/env python
# DExTer : Debugging Experience Tester
# ~~~~~~ ~ ~~ ~ ~~
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
"""DExTer entry point. This is the only non-module file."""
import sys
if sys.version_info < (3, 6, 0):
sys.stderr.write("You need python 3.6 or later to run DExTer\n")
# Equivalent to sys.exit(ReturnCode._ERROR).
sys.exit(1)
from dex.tools import main
if __name__ == '__main__':
return_code = main()
sys.exit(return_code.value)
|
google/llvm-propeller
|
debuginfo-tests/dexter/dexter.py
|
Python
|
apache-2.0
| 653
|
from UnitTest import UnitTest
from random import randrange, shuffle
class PassThru(Exception):
pass
def check_pass_thru():
raise PassThru
yield 1
class BadCmp:
def __hash__(self):
return 1
def __cmp__(self, other):
raise RuntimeError
class ReprWrapper:
'Used to test self-referential repr() calls'
def __repr__(self):
return repr(self.value)
class HashCountingInt(int):
'int-like object that counts the number of times __hash__ is called'
def __init__(self, *args):
self.hash_count = 0
def __hash__(self):
self.hash_count += 1
return int.__hash__(self)
class TestJointOps(UnitTest):
# Tests common to both set and frozenset
def setUp(self, *args, **kwargs):
self.word = word = 'simsalabim'
self.otherword = 'madagascar'
self.letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
self.s = self.thetype(word)
self.d = dict.fromkeys(word)
def test_new_or_init(self):
self.assertRaises(TypeError, self.thetype, [], 2)
def test_uniquification(self):
actual = sorted(self.s)
expected = sorted(self.d)
self.assertEqual(actual, expected)
self.assertRaises(PassThru, self.thetype, check_pass_thru())
self.assertRaises(TypeError, self.thetype, [[]])
def test_len(self):
self.assertEqual(len(self.s), len(self.d))
def test_contains(self):
for c in self.letters:
self.assertEqual(c in self.s, c in self.d)
self.assertRaises(TypeError, self.s.__contains__, [[]])
s = self.thetype([frozenset(self.letters)])
self.assert_(self.thetype(self.letters) in s)
def test_union(self):
u = self.s.union(self.otherword)
for c in self.letters:
self.assertEqual(c in u, c in self.d or c in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
# type(x) not supported in pyjamas
#self.assertEqual(type(u), self.thetype)
self.assertRaises(PassThru, self.s.union, check_pass_thru())
self.assertRaises(TypeError, self.s.union, [[]])
#for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
for C in set, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(self.thetype('abcba').union(C('cdc')), set('abcd'))
self.assertEqual(self.thetype('abcba').union(C('efgfe')),
set('abcefg'))
self.assertEqual(self.thetype('abcba').union(C('ccb')), set('abc'))
self.assertEqual(self.thetype('abcba').union(C('ef')), set('abcef'))
# Py2.6:
#self.assertEqual(self.thetype('abcba').union(C('ef'), C('fg')), set('abcefg'))
def test_or(self):
i = self.s.union(self.otherword)
self.assertEqual(self.s | set(self.otherword), i)
self.assertEqual(self.s | frozenset(self.otherword), i)
try:
v = self.s | self.otherword
except TypeError:
pass
else:
self.fail("s|t did not screen-out general iterables")
def test_intersection(self):
i = self.s.intersection(self.otherword)
for c in self.letters:
self.assertEqual(c in i, c in self.d and c in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
#self.assertEqual(type(i), self.thetype)
self.assertRaises(PassThru, self.s.intersection, check_pass_thru())
#for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
for C in set, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(self.thetype('abcba').intersection(C('cdc')),
set('cc'))
self.assertEqual(self.thetype('abcba').intersection(C('efgfe')),
set(''))
self.assertEqual(self.thetype('abcba').intersection(C('ccb')),
set('bc'))
self.assertEqual(self.thetype('abcba').intersection(C('ef')),
set(''))
# Py2.6:
#self.assertEqual(self.thetype('abcba').intersection(C('cbcf'), C('bag')), set('b'))
s = self.thetype('abcba')
# Py2.6:
#z = s.intersection()
#if self.thetype == frozenset():
# self.assertEqual(id(s), id(z))
#else:
# self.assertNotEqual(id(s), id(z))
def test_isdisjoint(self):
#if not hasattr(self.thetype, 'isdisjoint'):
if not hasattr(set, 'isdisjoint'):
self.fail("isdisjoint not supported")
return
def f(s1, s2):
'Pure python equivalent of isdisjoint()'
return not set(s1).intersection(s2)
for larg in '', 'a', 'ab', 'abc', 'ababac', 'cdc', 'cc', 'efgfe', 'ccb', 'ef':
s1 = self.thetype(larg)
for rarg in '', 'a', 'ab', 'abc', 'ababac', 'cdc', 'cc', 'efgfe', 'ccb', 'ef':
#for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s2 = C(rarg)
actual = s1.isdisjoint(s2)
expected = f(s1, s2)
self.assertEqual(actual, expected)
self.assert_(actual is True or actual is False)
def test_and(self):
i = self.s.intersection(self.otherword)
self.assertEqual(self.s & set(self.otherword), i)
self.assertEqual(self.s & frozenset(self.otherword), i)
try:
v = self.s & self.otherword
except TypeError:
pass
else:
self.fail("s&t did not screen-out general iterables")
def test_difference(self):
i = self.s.difference(self.otherword)
for c in self.letters:
self.assertEqual(c in i, c in self.d and c not in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
#self.assertEqual(type(i), self.thetype)
self.assertRaises(PassThru, self.s.difference, check_pass_thru())
self.assertRaises(TypeError, self.s.difference, [[]])
#for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
for C in set, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(self.thetype('abcba').difference(C('cdc')),
set('ab'))
self.assertEqual(self.thetype('abcba').difference(C('efgfe')),
set('abc'))
self.assertEqual(self.thetype('abcba').difference(C('ccb')),
set('a'))
self.assertEqual(self.thetype('abcba').difference(C('ef')),
set('abc'))
# Py2.6:
#self.assertEqual(self.thetype('abcba').difference(), set('abc'))
#self.assertEqual(self.thetype('abcba').difference(C('a'), C('b')), set('c'))
#def test_sub(self):
# i = self.s.difference(self.otherword)
# self.assertEqual(self.s - set(self.otherword), i)
# self.assertEqual(self.s - frozenset(self.otherword), i)
# try:
# v = self.s - self.otherword
# except TypeError:
# pass
# else:
# self.fail("s-t did not screen-out general iterables")
def test_symmetric_difference(self):
i = self.s.symmetric_difference(self.otherword)
for c in self.letters:
self.assertEqual(c in i, (c in self.d) ^ (c in self.otherword))
self.assertEqual(self.s, self.thetype(self.word))
#self.assertEqual(type(i), self.thetype)
self.assertRaises(PassThru, self.s.symmetric_difference,
check_pass_thru())
self.assertRaises(TypeError, self.s.symmetric_difference, [[]])
#for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
for C in set, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(
self.thetype('abcba').symmetric_difference(C('cdc')),
set('abd'))
self.assertEqual(
self.thetype('abcba').symmetric_difference(C('efgfe')),
set('abcefg'))
self.assertEqual(
self.thetype('abcba').symmetric_difference(C('ccb')), set('a'))
self.assertEqual(self.thetype('abcba').symmetric_difference(C('ef'))
, set('abcef'))
def test_xor(self):
i = self.s.symmetric_difference(self.otherword)
self.assertEqual(self.s ^ set(self.otherword), i)
self.assertEqual(self.s ^ frozenset(self.otherword), i)
try:
v = self.s ^ self.otherword
except TypeError:
pass
else:
self.fail("s^t did not screen-out general iterables")
def test_equality(self):
self.assertEqual(self.s, set(self.word))
self.assertEqual(self.s, frozenset(self.word))
self.assertEqual(self.s == self.word, False)
self.assertNotEqual(self.s, set(self.otherword))
self.assertNotEqual(self.s, frozenset(self.otherword))
self.assertEqual(self.s != self.word, True)
def test_setOfFrozensets(self):
t = map(frozenset, ['abcdef', 'bcd', 'bdcb', 'fed', 'fedccba'])
s = self.thetype(t)
self.assertEqual(len(s), 3)
def test_compare(self):
self.assertRaises(TypeError, self.s.__cmp__, self.s)
def test_sub_and_super(self):
p, q, r = map(self.thetype, ['ab', 'abcde', 'def'])
self.assert_(p < q)
self.assert_(p <= q)
self.assert_(q <= q)
self.assert_(q > p)
self.assert_(q >= p)
self.failIf(q < r)
self.failIf(q <= r)
self.failIf(q > r)
self.failIf(q >= r)
self.assert_(set('a').issubset('abc'))
self.assert_(set('abc').issuperset('a'))
self.failIf(set('a').issubset('cbs'))
self.failIf(set('cbs').issuperset('a'))
#def test_subclass_with_custom_hash(self):
# # Bug #1257731
# class H(self.thetype):
# def __hash__(self):
# return int(id(self) & 0x7fffffff)
# s=H()
# f=set()
# f.add(s)
# self.assert_(s in f)
# f.remove(s)
# f.add(s)
# f.discard(s)
def test_badcmp(self):
s = self.thetype([BadCmp()])
# Detect comparison errors during insertion and lookup
try:
s = self.thetype([BadCmp(), BadCmp()])
self.fail(
"Issue #571 Set allows adding objects with bad comparison methods")
except RuntimeError:
pass
#self.assertRaises(RuntimeError, s.__contains__, BadCmp())
# Detect errors during mutating operations
#if hasattr(s, 'add'):
# self.assertRaises(RuntimeError, s.add, BadCmp())
# self.assertRaises(RuntimeError, s.discard, BadCmp())
# self.assertRaises(RuntimeError, s.remove, BadCmp())
#def test_cyclical_repr(self):
# w = ReprWrapper()
# s = self.thetype([w])
# w.value = s
# name = repr(s).partition('(')[0] # strip class name from repr string
# self.assertEqual(repr(s), '%s([%s(...)])' % (name, name))
#def test_do_not_rehash_dict_keys(self):
# n = 10
# d = dict.fromkeys(map(HashCountingInt, xrange(n)))
# self.assertEqual(sum(elem.hash_count for elem in d), n)
# s = self.thetype(d)
# self.assertEqual(sum(elem.hash_count for elem in d), n)
# s.difference(d)
# self.assertEqual(sum(elem.hash_count for elem in d), n)
# if hasattr(s, 'symmetric_difference_update'):
# s.symmetric_difference_update(d)
# self.assertEqual(sum(elem.hash_count for elem in d), n)
# d2 = dict.fromkeys(set(d))
# self.assertEqual(sum(elem.hash_count for elem in d), n)
# d3 = dict.fromkeys(frozenset(d))
# self.assertEqual(sum(elem.hash_count for elem in d), n)
# d3 = dict.fromkeys(frozenset(d), 123)
# self.assertEqual(sum(elem.hash_count for elem in d), n)
# self.assertEqual(d3, dict.fromkeys(d, 123))
class SetTest(TestJointOps):
thetype = set
def test_init(self):
s = self.thetype()
s.__init__(self.word)
self.assertEqual(s, set(self.word))
s.__init__(self.otherword)
self.assertEqual(s, set(self.otherword))
self.assertRaises(TypeError, s.__init__, s, 2)
self.assertRaises(TypeError, s.__init__, 1)
#def test_constructor_identity(self):
# s = self.thetype(range(3))
# t = self.thetype(s)
# self.assertNotEqual(id(s), id(t))
def test_hash(self):
self.assertRaises(TypeError, hash, self.s)
def test_clear(self):
self.s.clear()
self.assertEqual(self.s, set())
self.assertEqual(len(self.s), 0)
def test_copy(self):
dup = self.s.copy()
self.assertEqual(self.s, dup)
#self.assertNotEqual(id(self.s), id(dup))
def test_add(self):
self.s.add('Q')
self.assert_('Q' in self.s)
dup = self.s.copy()
self.s.add('Q')
self.assertEqual(self.s, dup)
self.assertRaises(TypeError, self.s.add, [])
def test_remove(self):
self.s.remove('a')
self.assert_('a' not in self.s)
self.assertRaises(KeyError, self.s.remove, 'Q')
self.assertRaises(TypeError, self.s.remove, [])
s = self.thetype([frozenset(self.word)])
self.assert_(self.thetype(self.word) in s)
s.remove(self.thetype(self.word))
self.assert_(self.thetype(self.word) not in s)
self.assertRaises(KeyError, self.s.remove, self.thetype(self.word))
def test_remove_keyerror_unpacking(self):
# bug: www.python.org/sf/1576657
for v1 in ['Q', (1,)]:
try:
self.s.remove(v1)
except KeyError, e:
v2 = e.args[0]
self.assertEqual(v1, v2)
else:
self.fail()
def test_remove_keyerror_set(self):
key = self.thetype([3, 4])
try:
self.s.remove(key)
except KeyError, e:
self.assertTrue(e[0] is key,
"KeyError should be %s, not %s" % (key, e[0]))
else:
self.fail()
def test_discard(self):
self.s.discard('a')
self.assert_('a' not in self.s)
self.s.discard('Q')
self.assertRaises(TypeError, self.s.discard, [])
s = self.thetype([frozenset(self.word)])
self.assert_(self.thetype(self.word) in s)
s.discard(self.thetype(self.word))
self.assert_(self.thetype(self.word) not in s)
s.discard(self.thetype(self.word))
def test_pop(self):
for i in xrange(len(self.s)):
elem = self.s.pop()
self.assert_(elem not in self.s)
self.assertRaises(KeyError, self.s.pop)
def test_update(self):
retval = self.s.update(self.otherword)
self.assertEqual(retval, None)
#for c in (self.word + self.otherword):
t = self.word + self.otherword
for c in t:
self.assert_(c in self.s)
self.assertRaises(PassThru, self.s.update, check_pass_thru())
self.assertRaises(TypeError, self.s.update, [[]])
for p, q in (
('cdc', 'abcd'), ('efgfe', 'abcefg'), ('ccb', 'abc'), ('ef', 'abcef')):
#for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.update(C(p)), None)
self.assertEqual(s, set(q))
# Py2.6:
return
for p in ('cdc', 'efgfe', 'ccb', 'ef', 'abcda'):
q = 'ahi'
#for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.update(C(p), C(q)), None)
self.assertEqual(s, set(s) | set(p) | set(q))
def test_ior(self):
self.s |= set(self.otherword)
#for c in (self.word + self.otherword):
t = self.word + self.otherword
if isinstance(self.s, set):
for c in t:
self.assert_(c in self.s)
else:
self.fail("|= result not a set")
def test_intersection_update(self):
retval = self.s.intersection_update(self.otherword)
self.assertEqual(retval, None)
#for c in (self.word + self.otherword):
t = self.word + self.otherword
for c in t:
if c in self.otherword and c in self.word:
self.assert_(c in self.s)
else:
self.assert_(c not in self.s)
self.assertRaises(PassThru, self.s.intersection_update,
check_pass_thru())
self.assertRaises(TypeError, self.s.intersection_update, [[]])
for p, q in (('cdc', 'c'), ('efgfe', ''), ('ccb', 'bc'), ('ef', '')):
#for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.intersection_update(C(p)), None)
self.assertEqual(s, set(q))
ss = 'abcba'
s = self.thetype(ss)
t = 'cbc'
# Py2.6:
#self.assertEqual(s.intersection_update(C(p), C(t)), None)
#self.assertEqual(s, set('abcba')&set(p)&set(t))
def test_iand(self):
self.s &= set(self.otherword)
#for c in (self.word + self.otherword):
t = self.word + self.otherword
if isinstance(self.s, set):
for c in t:
if c in self.otherword and c in self.word:
self.assert_(c in self.s)
else:
self.assert_(c not in self.s)
else:
self.fail("&= result not a set")
def test_difference_update(self):
retval = self.s.difference_update(self.otherword)
self.assertEqual(retval, None)
#for c in (self.word + self.otherword):
t = self.word + self.otherword
for c in t:
if c in self.word and c not in self.otherword:
self.assert_(c in self.s)
else:
self.assert_(c not in self.s)
self.assertRaises(PassThru, self.s.difference_update, check_pass_thru())
self.assertRaises(TypeError, self.s.difference_update, [[]])
self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])
for p, q in (
('cdc', 'ab'), ('efgfe', 'abc'), ('ccb', 'a'), ('ef', 'abc')):
#for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.difference_update(C(p)), None)
self.assertEqual(s, set(q))
s = self.thetype('abcdefghih')
# Py2.6:
#s.difference_update()
self.assertEqual(s, self.thetype('abcdefghih'))
s = self.thetype('abcdefghih')
s.difference_update(C('aba'))
self.assertEqual(s, self.thetype('cdefghih'))
# Py2.6:
#s = self.thetype('abcdefghih')
#s.difference_update(C('cdc'), C('aba'))
#self.assertEqual(s, self.thetype('efghih'))
def test_isub(self):
self.s -= set(self.otherword)
#for c in (self.word + self.otherword):
t = self.word + self.otherword
for c in t:
if c in self.word and c not in self.otherword:
self.assert_(c in self.s)
else:
self.assert_(c not in self.s)
def test_symmetric_difference_update(self):
retval = self.s.symmetric_difference_update(self.otherword)
self.assertEqual(retval, None)
#for c in (self.word + self.otherword):
t = self.word + self.otherword
for c in t:
if (c in self.word) ^ (c in self.otherword):
self.assert_(c in self.s)
else:
self.assert_(c not in self.s)
self.assertRaises(PassThru, self.s.symmetric_difference_update,
check_pass_thru())
self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])
for p, q in (
('cdc', 'abd'), ('efgfe', 'abcefg'), ('ccb', 'a'), ('ef', 'abcef')):
#for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.symmetric_difference_update(C(p)), None)
self.assertEqual(s, set(q))
def test_ixor(self):
self.s ^= set(self.otherword)
#for c in (self.word + self.otherword):
t = self.word + self.otherword
if isinstance(self.s, set):
for c in t:
if (c in self.word) ^ (c in self.otherword):
self.assert_(c in self.s)
else:
self.assert_(c not in self.s)
else:
self.fail("^= result not a set")
def test_inplace_on_self(self):
return
t = self.s.copy()
t |= t
self.assertEqual(t, self.s)
t &= t
self.assertEqual(t, self.s)
t -= t
self.assertEqual(t, self.thetype())
t = self.s.copy()
t ^= t
self.assertEqual(t, self.thetype())
class SetSubclass(set):
pass
class TestSetSubclass(SetTest):
thetype = SetSubclass
class SetSubclassWithKeywordArgs(set):
def __init__(self, iterable=[], newarg=None):
set.__init__(self, iterable)
class TestSetSubclassWithKeywordArgs(SetTest):
def test_keywords_in_subclass(self):
'SF bug #1486663 -- this used to erroneously raise a TypeError'
SetSubclassWithKeywordArgs(newarg=1)
class FrozenSetTest(TestJointOps):
thetype = frozenset
def test_init(self):
s = self.thetype(self.word)
s.__init__(self.otherword)
self.assertEqual(s, set(self.word))
def test_singleton_empty_frozenset(self):
f = frozenset()
efs = [frozenset(), frozenset([]), frozenset(()), frozenset(''),
frozenset(), frozenset([]), frozenset(()), frozenset(''),
frozenset(xrange(0)), frozenset(frozenset()),
frozenset(f), f]
# All of the empty frozensets should have just one id()
#self.assertEqual(len(set(map(id, efs))), 1)
self.assertEqual(len(set(map(hash, efs))), 1)
#def test_constructor_identity(self):
# s = self.thetype(range(3))
# t = self.thetype(s)
# self.assertEqual(id(s), id(t))
def test_hash(self):
self.assertEqual(hash(self.thetype('abcdeb')),
hash(self.thetype('ebecda')))
# make sure that all permutations give the same hash value
n = 100
seq = [randrange(n) for i in xrange(n)]
results = set()
for i in xrange(200):
shuffle(seq)
results.add(hash(self.thetype(seq)))
self.assertEqual(len(results), 1)
def test_copy(self):
dup = self.s.copy()
self.assertEqual(self.s, dup)
#self.assertEqual(id(self.s), id(dup))
def test_frozen_as_dictkey(self):
seq = range(10) + list('abcdefg') + ['apple']
key1 = self.thetype(seq)
key2 = self.thetype(reversed(seq))
self.assertEqual(key1, key2)
#self.assertNotEqual(id(key1), id(key2))
d = {}
d[key1] = 42
self.assertEqual(d[key2], 42)
def test_hash_caching(self):
f = self.thetype('abcdcda')
self.assertEqual(hash(f), hash(f))
def test_hash_effectiveness(self):
n = 13
hashvalues = set()
addhashvalue = hashvalues.add
#elemmasks = [(i+1, 1<<i) for i in range(n)]
elemmasks = [(1, 1), (2, 2), (3, 4), (4, 8), (5, 16), (6, 32), (7, 64),
(8, 128), (9, 256), (10, 512), (11, 1024), (12, 2048),
(13, 4096)]
for i in xrange(2 ** n):
addhashvalue(hash(frozenset([e for e, m in elemmasks if m & i])))
self.assertEqual(len(hashvalues), 2 ** n)
class FrozenSetSubclass(frozenset):
pass
class FrozenSetSubclassTest(FrozenSetTest):
thetype = FrozenSetSubclass
#def test_constructor_identity(self):
# s = self.thetype(range(3))
# t = self.thetype(s)
# self.assertNotEqual(id(s), id(t))
def test_copy(self):
dup = self.s.copy()
self.assertEqual(self.s, dup)
#self.assertNotEqual(id(self.s), id(dup))
def test_nested_empty_constructor(self):
s = self.thetype()
t = self.thetype(s)
self.assertEqual(s, t)
def test_singleton_empty_frozenset(self):
Frozenset = self.thetype
f = frozenset()
F = Frozenset()
efs = [Frozenset(), Frozenset([]), Frozenset(()), Frozenset(''),
Frozenset(), Frozenset([]), Frozenset(()), Frozenset(''),
Frozenset(xrange(0)), Frozenset(Frozenset()),
Frozenset(frozenset()), f, F, Frozenset(f), Frozenset(F)]
# All empty frozenset subclass instances should have different ids
#self.assertEqual(len(set(map(id, efs))), len(efs))
self.assertEqual(len(set(map(hash, efs))), len(efs))
# Tests taken from test_sets.py =============================================
empty_set = set()
#==============================================================================
class TestBasicOps(UnitTest):
def test_repr(self):
if self.repr is not None:
self.assertEqual(repr(self.set), self.repr)
def test_length(self):
self.assertEqual(len(self.set), self.length)
def test_self_equality(self):
self.assertEqual(self.set, self.set)
def test_equivalent_equality(self):
self.assertEqual(self.set, self.dup)
def test_copy(self):
self.assertEqual(self.set.copy(), self.dup)
def test_self_union(self):
result = self.set | self.set
self.assertEqual(result, self.dup)
def test_empty_union(self):
result = self.set | empty_set
self.assertEqual(result, self.dup)
def test_union_empty(self):
result = empty_set | self.set
self.assertEqual(result, self.dup)
def test_self_intersection(self):
result = self.set & self.set
self.assertEqual(result, self.dup)
def test_empty_intersection(self):
result = self.set & empty_set
self.assertEqual(result, empty_set)
def test_intersection_empty(self):
result = empty_set & self.set
self.assertEqual(result, empty_set)
def test_self_isdisjoint(self):
result = self.set.isdisjoint(self.set)
self.assertEqual(result, not self.set)
def test_empty_isdisjoint(self):
result = self.set.isdisjoint(empty_set)
self.assertEqual(result, True)
def test_isdisjoint_empty(self):
result = empty_set.isdisjoint(self.set)
self.assertEqual(result, True)
def test_self_symmetric_difference(self):
result = self.set ^ self.set
self.assertEqual(result, empty_set)
def checkempty_symmetric_difference(self):
result = self.set ^ empty_set
self.assertEqual(result, self.set)
def test_self_difference(self):
result = self.set - self.set
self.assertEqual(result, empty_set)
def test_empty_difference(self):
result = self.set - empty_set
self.assertEqual(result, self.dup)
def test_empty_difference_rev(self):
result = empty_set - self.set
self.assertEqual(result, empty_set)
def test_iteration(self):
for v in self.set:
self.assert_(v in self.values)
setiter = iter(self.set)
# note: __length_hint__ is an internal undocumented API,
# don't rely on it in your own programs
self.assertEqual(setiter.__length_hint__(), len(self.set))
#------------------------------------------------------------------------------
class TestBasicOpsEmpty(TestBasicOps):
def setUp(self):
self.case = "empty set"
self.values = []
self.set = set(self.values)
self.dup = set(self.values)
self.length = 0
self.repr = "set([])"
#------------------------------------------------------------------------------
class TestBasicOpsSingleton(TestBasicOps):
def setUp(self):
self.case = "unit set (number)"
self.values = [3]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 1
self.repr = "set([3])"
def test_in(self):
self.failUnless(3 in self.set)
def test_not_in(self):
self.failUnless(2 not in self.set)
#------------------------------------------------------------------------------
class TestBasicOpsTuple(TestBasicOps):
def setUp(self):
self.case = "unit set (tuple)"
self.values = [(0, "zero")]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 1
self.repr = "set([(0, 'zero')])"
def test_in(self):
self.failUnless((0, "zero") in self.set)
def test_not_in(self):
self.failUnless(9 not in self.set)
#------------------------------------------------------------------------------
#==============================================================================
def baditer():
raise TypeError
yield True
def gooditer():
yield True
class TestExceptionPropagation(UnitTest):
"""SF 628246: Set constructor should not trap iterator TypeErrors"""
def test_instanceWithException(self):
self.assertRaises(TypeError, set, baditer())
def test_instancesWithoutException(self):
# All of these iterables should load without exception.
set([1, 2, 3])
set((1, 2, 3))
set({'one': 1, 'two': 2, 'three': 3})
set(xrange(3))
set('abc')
set(gooditer())
def test_changingSizeWhileIterating(self):
s = set([1, 2, 3])
try:
for i in s:
s.update([4])
except RuntimeError:
pass
else:
self.fail("no exception when changing size during iteration")
#==============================================================================
class TestSetOfSets(UnitTest):
def test_constructor(self):
inner = frozenset([1])
outer = set([inner])
element = outer.pop()
#self.assertEqual(type(element), frozenset)
outer.add(inner) # Rebuild set of sets with .add method
outer.remove(inner)
self.assertEqual(outer, set()) # Verify that remove worked
outer.discard(inner) # Absence of KeyError indicates working fine
#==============================================================================
class TestBinaryOps(UnitTest):
def setUp(self):
self.set = set((2, 4, 6))
def test_eq(self): # SF bug 643115
self.assertEqual(self.set, set({2: 1, 4: 3, 6: 5}))
def test_union_subset(self):
result = self.set | set([2])
self.assertEqual(result, set((2, 4, 6)))
def test_union_superset(self):
result = self.set | set([2, 4, 6, 8])
self.assertEqual(result, set([2, 4, 6, 8]))
def test_union_overlap(self):
result = self.set | set([3, 4, 5])
self.assertEqual(result, set([2, 3, 4, 5, 6]))
def test_union_non_overlap(self):
result = self.set | set([8])
self.assertEqual(result, set([2, 4, 6, 8]))
def test_intersection_subset(self):
result = self.set & set((2, 4))
self.assertEqual(result, set((2, 4)))
def test_intersection_superset(self):
result = self.set & set([2, 4, 6, 8])
self.assertEqual(result, set([2, 4, 6]))
def test_intersection_overlap(self):
result = self.set & set([3, 4, 5])
self.assertEqual(result, set([4]))
def test_intersection_non_overlap(self):
result = self.set & set([8])
self.assertEqual(result, empty_set)
def test_isdisjoint_subset(self):
result = self.set.isdisjoint(set((2, 4)))
self.assertEqual(result, False)
def test_isdisjoint_superset(self):
result = self.set.isdisjoint(set([2, 4, 6, 8]))
self.assertEqual(result, False)
def test_isdisjoint_overlap(self):
result = self.set.isdisjoint(set([3, 4, 5]))
self.assertEqual(result, False)
def test_isdisjoint_non_overlap(self):
result = self.set.isdisjoint(set([8]))
self.assertEqual(result, True)
def test_sym_difference_subset(self):
result = self.set ^ set((2, 4))
self.assertEqual(result, set([6]))
def test_sym_difference_superset(self):
result = self.set ^ set((2, 4, 6, 8))
self.assertEqual(result, set([8]))
def test_sym_difference_overlap(self):
result = self.set ^ set((3, 4, 5))
self.assertEqual(result, set([2, 3, 5, 6]))
def test_sym_difference_non_overlap(self):
result = self.set ^ set([8])
self.assertEqual(result, set([2, 4, 6, 8]))
def test_cmp(self):
a, b = set('a'), set('b')
self.assertRaises(TypeError, cmp, a, b)
# You can view this as a buglet: cmp(a, a) does not raise TypeError,
# because __eq__ is tried before __cmp__, and a.__eq__(a) returns True,
# which Python thinks is good enough to synthesize a cmp() result
# without calling __cmp__.
self.assertEqual(cmp(a, a), 0)
self.assertRaises(TypeError, cmp, a, 12)
self.assertRaises(TypeError, cmp, "abc", a)
#==============================================================================
class TestUpdateOps(UnitTest):
def setUp(self):
self.set = set((2, 4, 6))
def test_union_subset(self):
self.set |= set([2])
self.assertEqual(self.set, set((2, 4, 6)))
def test_union_superset(self):
self.set |= set([2, 4, 6, 8])
self.assertEqual(self.set, set([2, 4, 6, 8]))
def test_union_overlap(self):
self.set |= set([3, 4, 5])
self.assertEqual(self.set, set([2, 3, 4, 5, 6]))
def test_union_non_overlap(self):
self.set |= set([8])
self.assertEqual(self.set, set([2, 4, 6, 8]))
def test_union_method_call(self):
self.set.update(set([3, 4, 5]))
self.assertEqual(self.set, set([2, 3, 4, 5, 6]))
def test_intersection_subset(self):
self.set &= set((2, 4))
self.assertEqual(self.set, set((2, 4)))
def test_intersection_superset(self):
self.set &= set([2, 4, 6, 8])
self.assertEqual(self.set, set([2, 4, 6]))
def test_intersection_overlap(self):
self.set &= set([3, 4, 5])
self.assertEqual(self.set, set([4]))
def test_intersection_non_overlap(self):
self.set &= set([8])
self.assertEqual(self.set, empty_set)
def test_intersection_method_call(self):
self.set.intersection_update(set([3, 4, 5]))
self.assertEqual(self.set, set([4]))
def test_sym_difference_subset(self):
self.set ^= set((2, 4))
self.assertEqual(self.set, set([6]))
def test_sym_difference_superset(self):
self.set ^= set((2, 4, 6, 8))
self.assertEqual(self.set, set([8]))
def test_sym_difference_overlap(self):
self.set ^= set((3, 4, 5))
self.assertEqual(self.set, set([2, 3, 5, 6]))
def test_sym_difference_non_overlap(self):
self.set ^= set([8])
self.assertEqual(self.set, set([2, 4, 6, 8]))
def test_sym_difference_method_call(self):
self.set.symmetric_difference_update(set([3, 4, 5]))
self.assertEqual(self.set, set([2, 3, 5, 6]))
def test_difference_subset(self):
self.set -= set((2, 4))
self.assertEqual(self.set, set([6]))
def test_difference_superset(self):
self.set -= set((2, 4, 6, 8))
self.assertEqual(self.set, set([]))
def test_difference_overlap(self):
self.set -= set((3, 4, 5))
self.assertEqual(self.set, set([2, 6]))
def test_difference_non_overlap(self):
self.set -= set([8])
self.assertEqual(self.set, set([2, 4, 6]))
def test_difference_method_call(self):
self.set.difference_update(set([3, 4, 5]))
self.assertEqual(self.set, set([2, 6]))
#==============================================================================
class TestMutate(UnitTest):
def setUp(self):
self.values = ["a", "b", "c"]
self.set = set(self.values)
def test_add_present(self):
self.set.add("c")
self.assertEqual(self.set, set("abc"))
def test_add_absent(self):
self.set.add("d")
self.assertEqual(self.set, set("abcd"))
def test_add_until_full(self):
tmp = set()
expected_len = 0
for v in self.values:
tmp.add(v)
expected_len += 1
self.assertEqual(len(tmp), expected_len)
self.assertEqual(tmp, self.set)
def test_remove_present(self):
self.set.remove("b")
self.assertEqual(self.set, set("ac"))
def test_remove_absent(self):
try:
self.set.remove("d")
self.fail("Removing missing element should have raised LookupError")
except LookupError:
pass
def test_remove_until_empty(self):
expected_len = len(self.set)
for v in self.values:
self.set.remove(v)
expected_len -= 1
self.assertEqual(len(self.set), expected_len)
def test_discard_present(self):
self.set.discard("c")
self.assertEqual(self.set, set("ab"))
def test_discard_absent(self):
self.set.discard("d")
self.assertEqual(self.set, set("abc"))
def test_clear(self):
self.set.clear()
self.assertEqual(len(self.set), 0)
def test_pop(self):
popped = {}
while self.set:
popped[self.set.pop()] = None
self.assertEqual(len(popped), len(self.values))
for v in self.values:
self.failUnless(v in popped)
def test_update_empty_tuple(self):
self.set.update(())
self.assertEqual(self.set, set(self.values))
def test_update_unit_tuple_overlap(self):
self.set.update(("a",))
self.assertEqual(self.set, set(self.values))
def test_update_unit_tuple_non_overlap(self):
self.set.update(("a", "z"))
self.assertEqual(self.set, set(self.values + ["z"]))
#==============================================================================
class TestSubsets(UnitTest):
case2method = {"<=": "issubset",
">=": "issuperset",
}
reverse = {"==": "==",
"!=": "!=",
"<": ">",
">": "<",
"<=": ">=",
">=": "<=",
}
def test_issubset(self):
x = self.left
y = self.right
for case in "!=", "==", "<", "<=", ">", ">=":
expected = case in self.cases
# Test the binary infix spelling.
result = eval("x" + case + "y", locals())
self.assertEqual(result, expected)
# Test the "friendly" method-name spelling, if one exists.
if case in TestSubsets.case2method:
method = getattr(x, TestSubsets.case2method[case])
result = method(y)
self.assertEqual(result, expected)
# Now do the same for the operands reversed.
rcase = TestSubsets.reverse[case]
result = eval("y" + rcase + "x", locals())
self.assertEqual(result, expected)
if rcase in TestSubsets.case2method:
method = getattr(y, TestSubsets.case2method[rcase])
result = method(x)
self.assertEqual(result, expected)
#------------------------------------------------------------------------------
class TestSubsetEqualEmpty(TestSubsets):
left = set()
right = set()
name = "both empty"
cases = "==", "<=", ">="
#------------------------------------------------------------------------------
class TestSubsetEqualNonEmpty(TestSubsets):
left = set([1, 2])
right = set([1, 2])
name = "equal pair"
cases = "==", "<=", ">="
#------------------------------------------------------------------------------
class TestSubsetEmptyNonEmpty(TestSubsets):
left = set()
right = set([1, 2])
name = "one empty, one non-empty"
cases = "!=", "<", "<="
#------------------------------------------------------------------------------
class TestSubsetPartial(TestSubsets):
left = set([1])
right = set([1, 2])
name = "one a non-empty proper subset of other"
cases = "!=", "<", "<="
#------------------------------------------------------------------------------
class TestSubsetNonOverlap(TestSubsets):
left = set([1])
right = set([2])
name = "neither empty, neither contains"
cases = "!="
#==============================================================================
class TestOnlySetsInBinaryOps(UnitTest):
def test_eq_ne(self):
# Unlike the others, this is testing that == and != *are* allowed.
self.assertEqual(self.other == self.set, False)
self.assertEqual(self.set == self.other, False)
self.assertEqual(self.other != self.set, True)
self.assertEqual(self.set != self.other, True)
def test_ge_gt_le_lt(self):
self.assertRaises(TypeError, lambda: self.set < self.other)
self.assertRaises(TypeError, lambda: self.set <= self.other)
self.assertRaises(TypeError, lambda: self.set > self.other)
self.assertRaises(TypeError, lambda: self.set >= self.other)
self.assertRaises(TypeError, lambda: self.other < self.set)
self.assertRaises(TypeError, lambda: self.other <= self.set)
self.assertRaises(TypeError, lambda: self.other > self.set)
self.assertRaises(TypeError, lambda: self.other >= self.set)
def test_update_operator(self):
try:
self.set |= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_update(self):
if self.otherIsIterable:
self.set.update(self.other)
else:
self.assertRaises(TypeError, self.set.update, self.other)
def test_union(self):
self.assertRaises(TypeError, lambda: self.set | self.other)
self.assertRaises(TypeError, lambda: self.other | self.set)
if self.otherIsIterable:
self.set.union(self.other)
else:
self.assertRaises(TypeError, self.set.union, self.other)
def test_intersection_update_operator(self):
try:
self.set &= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_intersection_update(self):
if self.otherIsIterable:
self.set.intersection_update(self.other)
else:
self.assertRaises(TypeError,
self.set.intersection_update,
self.other)
def test_intersection(self):
self.assertRaises(TypeError, lambda: self.set & self.other)
self.assertRaises(TypeError, lambda: self.other & self.set)
if self.otherIsIterable:
self.set.intersection(self.other)
else:
self.assertRaises(TypeError, self.set.intersection, self.other)
def test_sym_difference_update_operator(self):
try:
self.set ^= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_sym_difference_update(self):
if self.otherIsIterable:
self.set.symmetric_difference_update(self.other)
else:
self.assertRaises(TypeError,
self.set.symmetric_difference_update,
self.other)
def test_sym_difference(self):
self.assertRaises(TypeError, lambda: self.set ^ self.other)
self.assertRaises(TypeError, lambda: self.other ^ self.set)
if self.otherIsIterable:
self.set.symmetric_difference(self.other)
else:
self.assertRaises(TypeError, self.set.symmetric_difference,
self.other)
def test_difference_update_operator(self):
try:
self.set -= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_difference_update(self):
if self.otherIsIterable:
self.set.difference_update(self.other)
else:
self.assertRaises(TypeError,
self.set.difference_update,
self.other)
def test_difference(self):
self.assertRaises(TypeError, lambda: self.set - self.other)
self.assertRaises(TypeError, lambda: self.other - self.set)
if self.otherIsIterable:
self.set.difference(self.other)
else:
self.assertRaises(TypeError, self.set.difference, self.other)
#------------------------------------------------------------------------------
class TestOnlySetsNumeric(TestOnlySetsInBinaryOps):
def setUp(self):
self.set = set((1, 2, 3))
self.other = 19
self.otherIsIterable = False
#------------------------------------------------------------------------------
class TestOnlySetsDict(TestOnlySetsInBinaryOps):
def setUp(self):
self.set = set((1, 2, 3))
self.other = {1: 2, 3: 4}
self.otherIsIterable = True
#------------------------------------------------------------------------------
class TestOnlySetsOperator(TestOnlySetsInBinaryOps):
def setUp(self):
self.set = set((1, 2, 3))
self.other = operator.add
self.otherIsIterable = False
#------------------------------------------------------------------------------
class TestOnlySetsTuple(TestOnlySetsInBinaryOps):
def setUp(self):
self.set = set((1, 2, 3))
self.other = (2, 4, 6)
self.otherIsIterable = True
#------------------------------------------------------------------------------
class TestOnlySetsString(TestOnlySetsInBinaryOps):
def setUp(self):
self.set = set((1, 2, 3))
self.other = 'abc'
self.otherIsIterable = True
#------------------------------------------------------------------------------
class TestOnlySetsGenerator(TestOnlySetsInBinaryOps):
def setUp(self):
def gen():
for i in xrange(0, 10, 2):
yield i
self.set = set((1, 2, 3))
self.other = gen()
self.otherIsIterable = True
#==============================================================================
class TestCopying(UnitTest):
def test_copy(self):
dup = self.set.copy()
dup_list = list(dup);
dup_list.sort()
set_list = list(self.set);
set_list.sort()
self.assertEqual(len(dup_list), len(set_list))
for i in range(len(dup_list)):
self.failUnless(dup_list[i] is set_list[i])
def test_deep_copy(self):
dup = copy.deepcopy(self.set)
##print type(dup), repr(dup)
dup_list = list(dup);
dup_list.sort()
set_list = list(self.set);
set_list.sort()
self.assertEqual(len(dup_list), len(set_list))
for i in range(len(dup_list)):
self.assertEqual(dup_list[i], set_list[i])
#------------------------------------------------------------------------------
class TestCopyingEmpty(TestCopying):
def setUp(self):
self.set = set()
#------------------------------------------------------------------------------
class TestCopyingSingleton(TestCopying):
def setUp(self):
self.set = set(["hello"])
#------------------------------------------------------------------------------
class TestCopyingTriple(TestCopying):
def setUp(self):
self.set = set(["zero", 0, None])
#------------------------------------------------------------------------------
class TestCopyingTuple(TestCopying):
def setUp(self):
self.set = set([(1, 2)])
#------------------------------------------------------------------------------
class TestCopyingNested(TestCopying):
def setUp(self):
self.set = set([((1, 2), (3, 4))])
#==============================================================================
class TestIdentities(UnitTest):
def setUp(self):
self.a = set('abracadabra')
self.b = set('alacazam')
def test_binopsVsSubsets(self):
a, b = self.a, self.b
self.assert_(a - b < a)
self.assert_(b - a < b)
self.assert_(a & b < a)
self.assert_(a & b < b)
self.assert_(a | b > a)
self.assert_(a | b > b)
self.assert_(a ^ b < a | b)
def test_commutativity(self):
a, b = self.a, self.b
self.assertEqual(a & b, b & a)
self.assertEqual(a | b, b | a)
self.assertEqual(a ^ b, b ^ a)
if a != b:
self.assertNotEqual(a - b, b - a)
def test_summations(self):
# check that sums of parts equal the whole
a, b = self.a, self.b
self.assertEqual((a - b) | (a & b) | (b - a), a | b)
self.assertEqual((a & b) | (a ^ b), a | b)
self.assertEqual(a | (b - a), a | b)
self.assertEqual((a - b) | b, a | b)
self.assertEqual((a - b) | (a & b), a)
self.assertEqual((b - a) | (a & b), b)
self.assertEqual((a - b) | (b - a), a ^ b)
def test_exclusion(self):
# check that inverse operations show non-overlap
a, b, zero = self.a, self.b, set()
self.assertEqual((a - b) & b, zero)
self.assertEqual((b - a) & a, zero)
self.assertEqual((a & b) & (a ^ b), zero)
# Tests derived from test_itertools.py =======================================
def R(seqn):
'Regular generator'
for i in seqn:
yield i
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def next(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def next(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class N:
'Iterator missing next()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def next(self):
t = 3 // 0
class S:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def next(self):
raise StopIteration
class TestVariousIteratorArgs(UnitTest):
def test_constructor(self):
for cons in (set, frozenset):
for s in (
"123", "", range(1000), ('do', 1.2), xrange(2000, 2200, 5)):
for g in (G, I, Ig, S, R):
self.assertEqual(sorted(cons(g(s))), sorted(g(s)))
self.assertRaises(TypeError, cons, X(s))
self.assertRaises(TypeError, cons, N(s))
self.assertRaises(ZeroDivisionError, cons, E(s))
def test_inline_methods(self):
s = set('november')
for data in (
"123", "", range(1000), ('do', 1.2), xrange(2000, 2200, 5), 'december'):
for meth in (
s.union, s.intersection, s.difference, s.symmetric_difference,
s.isdisjoint):
for g in (G, I, Ig, R):
expected = meth(data)
actual = meth(G(data))
if isinstance(expected, bool):
self.assertEqual(actual, expected)
else:
self.assertEqual(sorted(actual), sorted(expected))
self.assertRaises(TypeError, meth, X(s))
self.assertRaises(TypeError, meth, N(s))
self.assertRaises(ZeroDivisionError, meth, E(s))
def test_inplace_methods(self):
for data in (
"123", "", range(1000), ('do', 1.2), xrange(2000, 2200, 5), 'december'):
for methname in ('update', 'intersection_update',
'difference_update',
'symmetric_difference_update'):
for g in (G, I, Ig, S, R):
s = set('january')
t = s.copy()
getattr(s, methname)(list(g(data)))
getattr(t, methname)(g(data))
self.assertEqual(sorted(s), sorted(t))
self.assertRaises(TypeError, getattr(set('january'), methname),
X(data))
self.assertRaises(TypeError, getattr(set('january'), methname),
N(data))
self.assertRaises(ZeroDivisionError,
getattr(set('january'), methname), E(data))
|
Hasimir/pyjs
|
examples/libtest/SetTest.py
|
Python
|
apache-2.0
| 54,895
|
"""one spout bolt multi tasks integration test topology"""
__all__ = ['one_spout_bolt_multi_tasks']
from .one_spout_bolt_multi_tasks import one_spout_bolt_multi_tasks_builder
|
objmagic/heron
|
integration-test/src/python/integration_test/topology/one_spout_bolt_multi_tasks/__init__.py
|
Python
|
apache-2.0
| 176
|
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.db import connection
from django.db.models import Prefetch, QuerySet
from django.db.models.query import get_prefetcher
from django.test import TestCase, override_settings
from django.test.utils import CaptureQueriesContext
from .models import (
Author, Author2, AuthorAddress, AuthorWithAge, Bio, Book, Bookmark,
BookReview, BookWithYear, Comment, Department, Employee, FavoriteAuthors,
House, LessonEntry, ModelIterableSubclass, Person, Qualification, Reader,
Room, TaggedItem, Teacher, WordEntry,
)
class PrefetchRelatedTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.book1 = Book.objects.create(title='Poems')
cls.book2 = Book.objects.create(title='Jane Eyre')
cls.book3 = Book.objects.create(title='Wuthering Heights')
cls.book4 = Book.objects.create(title='Sense and Sensibility')
cls.author1 = Author.objects.create(name='Charlotte', first_book=cls.book1)
cls.author2 = Author.objects.create(name='Anne', first_book=cls.book1)
cls.author3 = Author.objects.create(name='Emily', first_book=cls.book1)
cls.author4 = Author.objects.create(name='Jane', first_book=cls.book4)
cls.book1.authors.add(cls.author1, cls.author2, cls.author3)
cls.book2.authors.add(cls.author1)
cls.book3.authors.add(cls.author3)
cls.book4.authors.add(cls.author4)
cls.reader1 = Reader.objects.create(name='Amy')
cls.reader2 = Reader.objects.create(name='Belinda')
cls.reader1.books_read.add(cls.book1, cls.book4)
cls.reader2.books_read.add(cls.book2, cls.book4)
def assertWhereContains(self, sql, needle):
where_idx = sql.index('WHERE')
self.assertEqual(
sql.count(str(needle), where_idx), 1,
msg="WHERE clause doesn't contain %s, actual SQL: %s" % (needle, sql[where_idx:])
)
def test_m2m_forward(self):
with self.assertNumQueries(2):
lists = [list(b.authors.all()) for b in Book.objects.prefetch_related('authors')]
normal_lists = [list(b.authors.all()) for b in Book.objects.all()]
self.assertEqual(lists, normal_lists)
def test_m2m_reverse(self):
with self.assertNumQueries(2):
lists = [list(a.books.all()) for a in Author.objects.prefetch_related('books')]
normal_lists = [list(a.books.all()) for a in Author.objects.all()]
self.assertEqual(lists, normal_lists)
def test_foreignkey_forward(self):
with self.assertNumQueries(2):
books = [a.first_book for a in Author.objects.prefetch_related('first_book')]
normal_books = [a.first_book for a in Author.objects.all()]
self.assertEqual(books, normal_books)
def test_foreignkey_reverse(self):
with self.assertNumQueries(2):
[list(b.first_time_authors.all())
for b in Book.objects.prefetch_related('first_time_authors')]
self.assertQuerysetEqual(self.book2.authors.all(), ["<Author: Charlotte>"])
def test_onetoone_reverse_no_match(self):
# Regression for #17439
with self.assertNumQueries(2):
book = Book.objects.prefetch_related('bookwithyear').all()[0]
with self.assertNumQueries(0):
with self.assertRaises(BookWithYear.DoesNotExist):
book.bookwithyear
def test_onetoone_reverse_with_to_field_pk(self):
"""
A model (Bio) with a OneToOneField primary key (author) that references
a non-pk field (name) on the related model (Author) is prefetchable.
"""
Bio.objects.bulk_create([
Bio(author=self.author1),
Bio(author=self.author2),
Bio(author=self.author3),
])
authors = Author.objects.filter(
name__in=[self.author1, self.author2, self.author3],
).prefetch_related('bio')
with self.assertNumQueries(2):
for author in authors:
self.assertEqual(author.name, author.bio.author.name)
def test_survives_clone(self):
with self.assertNumQueries(2):
[list(b.first_time_authors.all())
for b in Book.objects.prefetch_related('first_time_authors').exclude(id=1000)]
def test_len(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
len(qs)
[list(b.first_time_authors.all()) for b in qs]
def test_bool(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
bool(qs)
[list(b.first_time_authors.all()) for b in qs]
def test_count(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
[b.first_time_authors.count() for b in qs]
def test_exists(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
[b.first_time_authors.exists() for b in qs]
def test_in_and_prefetch_related(self):
"""
Regression test for #20242 - QuerySet "in" didn't work the first time
when using prefetch_related. This was fixed by the removal of chunked
reads from QuerySet iteration in
70679243d1786e03557c28929f9762a119e3ac14.
"""
qs = Book.objects.prefetch_related('first_time_authors')
self.assertIn(qs[0], qs)
def test_clear(self):
with self.assertNumQueries(5):
with_prefetch = Author.objects.prefetch_related('books')
without_prefetch = with_prefetch.prefetch_related(None)
[list(a.books.all()) for a in without_prefetch]
def test_m2m_then_m2m(self):
"""A m2m can be followed through another m2m."""
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books__read_by')
lists = [[[str(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists, [
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
def test_overriding_prefetch(self):
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books', 'books__read_by')
lists = [[[str(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists, [
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books__read_by', 'books')
lists = [[[str(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists, [
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
def test_get(self):
"""
Objects retrieved with .get() get the prefetch behavior.
"""
# Need a double
with self.assertNumQueries(3):
author = Author.objects.prefetch_related('books__read_by').get(name="Charlotte")
lists = [[str(r) for r in b.read_by.all()] for b in author.books.all()]
self.assertEqual(lists, [["Amy"], ["Belinda"]]) # Poems, Jane Eyre
def test_foreign_key_then_m2m(self):
"""
A m2m relation can be followed after a relation like ForeignKey that
doesn't have many objects.
"""
with self.assertNumQueries(2):
qs = Author.objects.select_related('first_book').prefetch_related('first_book__read_by')
lists = [[str(r) for r in a.first_book.read_by.all()]
for a in qs]
self.assertEqual(lists, [["Amy"], ["Amy"], ["Amy"], ["Amy", "Belinda"]])
def test_reverse_one_to_one_then_m2m(self):
"""
A m2m relation can be followed afterr going through the select_related
reverse of an o2o.
"""
qs = Author.objects.prefetch_related('bio__books').select_related('bio')
with self.assertNumQueries(1):
list(qs.all())
Bio.objects.create(author=self.author1)
with self.assertNumQueries(2):
list(qs.all())
def test_attribute_error(self):
qs = Reader.objects.all().prefetch_related('books_read__xyz')
msg = (
"Cannot find 'xyz' on Book object, 'books_read__xyz' "
"is an invalid parameter to prefetch_related()"
)
with self.assertRaisesMessage(AttributeError, msg) as cm:
list(qs)
self.assertIn('prefetch_related', str(cm.exception))
def test_invalid_final_lookup(self):
qs = Book.objects.prefetch_related('authors__name')
msg = (
"'authors__name' does not resolve to an item that supports "
"prefetching - this is an invalid parameter to prefetch_related()."
)
with self.assertRaisesMessage(ValueError, msg) as cm:
list(qs)
self.assertIn('prefetch_related', str(cm.exception))
self.assertIn("name", str(cm.exception))
def test_forward_m2m_to_attr_conflict(self):
msg = 'to_attr=authors conflicts with a field on the Book model.'
authors = Author.objects.all()
with self.assertRaisesMessage(ValueError, msg):
list(Book.objects.prefetch_related(
Prefetch('authors', queryset=authors, to_attr='authors'),
))
# Without the ValueError, an author was deleted due to the implicit
# save of the relation assignment.
self.assertEqual(self.book1.authors.count(), 3)
def test_reverse_m2m_to_attr_conflict(self):
msg = 'to_attr=books conflicts with a field on the Author model.'
poems = Book.objects.filter(title='Poems')
with self.assertRaisesMessage(ValueError, msg):
list(Author.objects.prefetch_related(
Prefetch('books', queryset=poems, to_attr='books'),
))
# Without the ValueError, a book was deleted due to the implicit
# save of reverse relation assignment.
self.assertEqual(self.author1.books.count(), 2)
def test_m2m_then_reverse_fk_object_ids(self):
with CaptureQueriesContext(connection) as queries:
list(Book.objects.prefetch_related('authors__addresses'))
sql = queries[-1]['sql']
self.assertWhereContains(sql, self.author1.name)
def test_m2m_then_m2m_object_ids(self):
with CaptureQueriesContext(connection) as queries:
list(Book.objects.prefetch_related('authors__favorite_authors'))
sql = queries[-1]['sql']
self.assertWhereContains(sql, self.author1.name)
def test_m2m_then_reverse_one_to_one_object_ids(self):
with CaptureQueriesContext(connection) as queries:
list(Book.objects.prefetch_related('authors__authorwithage'))
sql = queries[-1]['sql']
self.assertWhereContains(sql, self.author1.id)
class CustomPrefetchTests(TestCase):
@classmethod
def traverse_qs(cls, obj_iter, path):
"""
Helper method that returns a list containing a list of the objects in the
obj_iter. Then for each object in the obj_iter, the path will be
recursively travelled and the found objects are added to the return value.
"""
ret_val = []
if hasattr(obj_iter, 'all'):
obj_iter = obj_iter.all()
try:
iter(obj_iter)
except TypeError:
obj_iter = [obj_iter]
for obj in obj_iter:
rel_objs = []
for part in path:
if not part:
continue
try:
related = getattr(obj, part[0])
except ObjectDoesNotExist:
continue
if related is not None:
rel_objs.extend(cls.traverse_qs(related, [part[1:]]))
ret_val.append((obj, rel_objs))
return ret_val
@classmethod
def setUpTestData(cls):
cls.person1 = Person.objects.create(name='Joe')
cls.person2 = Person.objects.create(name='Mary')
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
cls.house1 = House.objects.create(name='House 1', address='123 Main St', owner=cls.person1)
cls.room1_1 = Room.objects.create(name='Dining room', house=cls.house1)
cls.room1_2 = Room.objects.create(name='Lounge', house=cls.house1)
cls.room1_3 = Room.objects.create(name='Kitchen', house=cls.house1)
cls.house1.main_room = cls.room1_1
cls.house1.save()
cls.person1.houses.add(cls.house1)
cls.house2 = House.objects.create(name='House 2', address='45 Side St', owner=cls.person1)
cls.room2_1 = Room.objects.create(name='Dining room', house=cls.house2)
cls.room2_2 = Room.objects.create(name='Lounge', house=cls.house2)
cls.room2_3 = Room.objects.create(name='Kitchen', house=cls.house2)
cls.house2.main_room = cls.room2_1
cls.house2.save()
cls.person1.houses.add(cls.house2)
cls.house3 = House.objects.create(name='House 3', address='6 Downing St', owner=cls.person2)
cls.room3_1 = Room.objects.create(name='Dining room', house=cls.house3)
cls.room3_2 = Room.objects.create(name='Lounge', house=cls.house3)
cls.room3_3 = Room.objects.create(name='Kitchen', house=cls.house3)
cls.house3.main_room = cls.room3_1
cls.house3.save()
cls.person2.houses.add(cls.house3)
cls.house4 = House.objects.create(name='house 4', address="7 Regents St", owner=cls.person2)
cls.room4_1 = Room.objects.create(name='Dining room', house=cls.house4)
cls.room4_2 = Room.objects.create(name='Lounge', house=cls.house4)
cls.room4_3 = Room.objects.create(name='Kitchen', house=cls.house4)
cls.house4.main_room = cls.room4_1
cls.house4.save()
cls.person2.houses.add(cls.house4)
def test_traverse_qs(self):
qs = Person.objects.prefetch_related('houses')
related_objs_normal = [list(p.houses.all()) for p in qs],
related_objs_from_traverse = [[inner[0] for inner in o[1]]
for o in self.traverse_qs(qs, [['houses']])]
self.assertEqual(related_objs_normal, (related_objs_from_traverse,))
def test_ambiguous(self):
# Ambiguous: Lookup was already seen with a different queryset.
msg = (
"'houses' lookup was already seen with a different queryset. You "
"may need to adjust the ordering of your lookups."
)
with self.assertRaisesMessage(ValueError, msg):
self.traverse_qs(
Person.objects.prefetch_related('houses__rooms', Prefetch('houses', queryset=House.objects.all())),
[['houses', 'rooms']]
)
# Ambiguous: Lookup houses_lst doesn't yet exist when performing houses_lst__rooms.
msg = (
"Cannot find 'houses_lst' on Person object, 'houses_lst__rooms' is "
"an invalid parameter to prefetch_related()"
)
with self.assertRaisesMessage(AttributeError, msg):
self.traverse_qs(
Person.objects.prefetch_related(
'houses_lst__rooms',
Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')
),
[['houses', 'rooms']]
)
# Not ambiguous.
self.traverse_qs(
Person.objects.prefetch_related('houses__rooms', 'houses'),
[['houses', 'rooms']]
)
self.traverse_qs(
Person.objects.prefetch_related(
'houses__rooms',
Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')
),
[['houses', 'rooms']]
)
def test_m2m(self):
# Control lookups.
with self.assertNumQueries(2):
lst1 = self.traverse_qs(
Person.objects.prefetch_related('houses'),
[['houses']]
)
# Test lookups.
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses')),
[['houses']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses', to_attr='houses_lst')),
[['houses_lst']]
)
self.assertEqual(lst1, lst2)
def test_reverse_m2m(self):
# Control lookups.
with self.assertNumQueries(2):
lst1 = self.traverse_qs(
House.objects.prefetch_related('occupants'),
[['occupants']]
)
# Test lookups.
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
House.objects.prefetch_related(Prefetch('occupants')),
[['occupants']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
House.objects.prefetch_related(Prefetch('occupants', to_attr='occupants_lst')),
[['occupants_lst']]
)
self.assertEqual(lst1, lst2)
def test_m2m_through_fk(self):
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
Room.objects.prefetch_related('house__occupants'),
[['house', 'occupants']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Room.objects.prefetch_related(Prefetch('house__occupants')),
[['house', 'occupants']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Room.objects.prefetch_related(Prefetch('house__occupants', to_attr='occupants_lst')),
[['house', 'occupants_lst']]
)
self.assertEqual(lst1, lst2)
def test_m2m_through_gfk(self):
TaggedItem.objects.create(tag="houses", content_object=self.house1)
TaggedItem.objects.create(tag="houses", content_object=self.house2)
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
TaggedItem.objects.filter(tag='houses').prefetch_related('content_object__rooms'),
[['content_object', 'rooms']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
TaggedItem.objects.prefetch_related(
Prefetch('content_object'),
Prefetch('content_object__rooms', to_attr='rooms_lst')
),
[['content_object', 'rooms_lst']]
)
self.assertEqual(lst1, lst2)
def test_o2m_through_m2m(self):
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
Person.objects.prefetch_related('houses', 'houses__rooms'),
[['houses', 'rooms']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses'), 'houses__rooms'),
[['houses', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses'), Prefetch('houses__rooms')),
[['houses', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses', to_attr='houses_lst'), 'houses_lst__rooms'),
[['houses_lst', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
Prefetch('houses', to_attr='houses_lst'),
Prefetch('houses_lst__rooms', to_attr='rooms_lst')
),
[['houses_lst', 'rooms_lst']]
)
self.assertEqual(lst1, lst2)
def test_generic_rel(self):
bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=bookmark, tag='django')
TaggedItem.objects.create(content_object=bookmark, favorite=bookmark, tag='python')
# Control lookups.
with self.assertNumQueries(4):
lst1 = self.traverse_qs(
Bookmark.objects.prefetch_related('tags', 'tags__content_object', 'favorite_tags'),
[['tags', 'content_object'], ['favorite_tags']]
)
# Test lookups.
with self.assertNumQueries(4):
lst2 = self.traverse_qs(
Bookmark.objects.prefetch_related(
Prefetch('tags', to_attr='tags_lst'),
Prefetch('tags_lst__content_object'),
Prefetch('favorite_tags'),
),
[['tags_lst', 'content_object'], ['favorite_tags']]
)
self.assertEqual(lst1, lst2)
def test_traverse_single_item_property(self):
# Control lookups.
with self.assertNumQueries(5):
lst1 = self.traverse_qs(
Person.objects.prefetch_related(
'houses__rooms',
'primary_house__occupants__houses',
),
[['primary_house', 'occupants', 'houses']]
)
# Test lookups.
with self.assertNumQueries(5):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
'houses__rooms',
Prefetch('primary_house__occupants', to_attr='occupants_lst'),
'primary_house__occupants_lst__houses',
),
[['primary_house', 'occupants_lst', 'houses']]
)
self.assertEqual(lst1, lst2)
def test_traverse_multiple_items_property(self):
# Control lookups.
with self.assertNumQueries(4):
lst1 = self.traverse_qs(
Person.objects.prefetch_related(
'houses',
'all_houses__occupants__houses',
),
[['all_houses', 'occupants', 'houses']]
)
# Test lookups.
with self.assertNumQueries(4):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
'houses',
Prefetch('all_houses__occupants', to_attr='occupants_lst'),
'all_houses__occupants_lst__houses',
),
[['all_houses', 'occupants_lst', 'houses']]
)
self.assertEqual(lst1, lst2)
def test_custom_qs(self):
# Test basic.
with self.assertNumQueries(2):
lst1 = list(Person.objects.prefetch_related('houses'))
with self.assertNumQueries(2):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')))
self.assertEqual(
self.traverse_qs(lst1, [['houses']]),
self.traverse_qs(lst2, [['houses_lst']])
)
# Test queryset filtering.
with self.assertNumQueries(2):
lst2 = list(
Person.objects.prefetch_related(
Prefetch(
'houses',
queryset=House.objects.filter(pk__in=[self.house1.pk, self.house3.pk]),
to_attr='houses_lst',
)
)
)
self.assertEqual(len(lst2[0].houses_lst), 1)
self.assertEqual(lst2[0].houses_lst[0], self.house1)
self.assertEqual(len(lst2[1].houses_lst), 1)
self.assertEqual(lst2[1].houses_lst[0], self.house3)
# Test flattened.
with self.assertNumQueries(3):
lst1 = list(Person.objects.prefetch_related('houses__rooms'))
with self.assertNumQueries(3):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses__rooms', queryset=Room.objects.all(), to_attr='rooms_lst')))
self.assertEqual(
self.traverse_qs(lst1, [['houses', 'rooms']]),
self.traverse_qs(lst2, [['houses', 'rooms_lst']])
)
# Test inner select_related.
with self.assertNumQueries(3):
lst1 = list(Person.objects.prefetch_related('houses__owner'))
with self.assertNumQueries(2):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.select_related('owner'))))
self.assertEqual(
self.traverse_qs(lst1, [['houses', 'owner']]),
self.traverse_qs(lst2, [['houses', 'owner']])
)
# Test inner prefetch.
inner_rooms_qs = Room.objects.filter(pk__in=[self.room1_1.pk, self.room1_2.pk])
houses_qs_prf = House.objects.prefetch_related(
Prefetch('rooms', queryset=inner_rooms_qs, to_attr='rooms_lst'))
with self.assertNumQueries(4):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=houses_qs_prf.filter(pk=self.house1.pk), to_attr='houses_lst'),
Prefetch('houses_lst__rooms_lst__main_room_of')
))
self.assertEqual(len(lst2[0].houses_lst[0].rooms_lst), 2)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[0], self.room1_1)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[1], self.room1_2)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[0].main_room_of, self.house1)
self.assertEqual(len(lst2[1].houses_lst), 0)
# Test ForwardManyToOneDescriptor.
houses = House.objects.select_related('owner')
with self.assertNumQueries(6):
rooms = Room.objects.all().prefetch_related('house')
lst1 = self.traverse_qs(rooms, [['house', 'owner']])
with self.assertNumQueries(2):
rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=houses.all()))
lst2 = self.traverse_qs(rooms, [['house', 'owner']])
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
houses = House.objects.select_related('owner')
rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=houses.all(), to_attr='house_attr'))
lst2 = self.traverse_qs(rooms, [['house_attr', 'owner']])
self.assertEqual(lst1, lst2)
room = Room.objects.all().prefetch_related(
Prefetch('house', queryset=houses.filter(address='DoesNotExist'))
).first()
with self.assertRaises(ObjectDoesNotExist):
getattr(room, 'house')
room = Room.objects.all().prefetch_related(
Prefetch('house', queryset=houses.filter(address='DoesNotExist'), to_attr='house_attr')
).first()
self.assertIsNone(room.house_attr)
rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=House.objects.only('name')))
with self.assertNumQueries(2):
getattr(rooms.first().house, 'name')
with self.assertNumQueries(3):
getattr(rooms.first().house, 'address')
# Test ReverseOneToOneDescriptor.
houses = House.objects.select_related('owner')
with self.assertNumQueries(6):
rooms = Room.objects.all().prefetch_related('main_room_of')
lst1 = self.traverse_qs(rooms, [['main_room_of', 'owner']])
with self.assertNumQueries(2):
rooms = Room.objects.all().prefetch_related(Prefetch('main_room_of', queryset=houses.all()))
lst2 = self.traverse_qs(rooms, [['main_room_of', 'owner']])
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
rooms = list(
Room.objects.all().prefetch_related(
Prefetch('main_room_of', queryset=houses.all(), to_attr='main_room_of_attr')
)
)
lst2 = self.traverse_qs(rooms, [['main_room_of_attr', 'owner']])
self.assertEqual(lst1, lst2)
room = Room.objects.filter(main_room_of__isnull=False).prefetch_related(
Prefetch('main_room_of', queryset=houses.filter(address='DoesNotExist'))
).first()
with self.assertRaises(ObjectDoesNotExist):
getattr(room, 'main_room_of')
room = Room.objects.filter(main_room_of__isnull=False).prefetch_related(
Prefetch('main_room_of', queryset=houses.filter(address='DoesNotExist'), to_attr='main_room_of_attr')
).first()
self.assertIsNone(room.main_room_of_attr)
# The custom queryset filters should be applied to the queryset
# instance returned by the manager.
person = Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.filter(name='House 1')),
).get(pk=self.person1.pk)
self.assertEqual(
list(person.houses.all()),
list(person.houses.all().all()),
)
def test_nested_prefetch_related_are_not_overwritten(self):
# Regression test for #24873
houses_2 = House.objects.prefetch_related(Prefetch('rooms'))
persons = Person.objects.prefetch_related(Prefetch('houses', queryset=houses_2))
houses = House.objects.prefetch_related(Prefetch('occupants', queryset=persons))
list(houses) # queryset must be evaluated once to reproduce the bug.
self.assertEqual(
houses.all()[0].occupants.all()[0].houses.all()[1].rooms.all()[0],
self.room2_1
)
def test_values_queryset(self):
with self.assertRaisesMessage(ValueError, 'Prefetch querysets cannot use values().'):
Prefetch('houses', House.objects.values('pk'))
# That error doesn't affect managers with custom ModelIterable subclasses
self.assertIs(Teacher.objects_custom.all()._iterable_class, ModelIterableSubclass)
Prefetch('teachers', Teacher.objects_custom.all())
def test_to_attr_doesnt_cache_through_attr_as_list(self):
house = House.objects.prefetch_related(
Prefetch('rooms', queryset=Room.objects.all(), to_attr='to_rooms'),
).get(pk=self.house3.pk)
self.assertIsInstance(house.rooms.all(), QuerySet)
def test_to_attr_cached_property(self):
persons = Person.objects.prefetch_related(
Prefetch('houses', House.objects.all(), to_attr='cached_all_houses'),
)
for person in persons:
# To bypass caching at the related descriptor level, don't use
# person.houses.all() here.
all_houses = list(House.objects.filter(occupants=person))
with self.assertNumQueries(0):
self.assertEqual(person.cached_all_houses, all_houses)
class DefaultManagerTests(TestCase):
def setUp(self):
self.qual1 = Qualification.objects.create(name="BA")
self.qual2 = Qualification.objects.create(name="BSci")
self.qual3 = Qualification.objects.create(name="MA")
self.qual4 = Qualification.objects.create(name="PhD")
self.teacher1 = Teacher.objects.create(name="Mr Cleese")
self.teacher2 = Teacher.objects.create(name="Mr Idle")
self.teacher3 = Teacher.objects.create(name="Mr Chapman")
self.teacher1.qualifications.add(self.qual1, self.qual2, self.qual3, self.qual4)
self.teacher2.qualifications.add(self.qual1)
self.teacher3.qualifications.add(self.qual2)
self.dept1 = Department.objects.create(name="English")
self.dept2 = Department.objects.create(name="Physics")
self.dept1.teachers.add(self.teacher1, self.teacher2)
self.dept2.teachers.add(self.teacher1, self.teacher3)
def test_m2m_then_m2m(self):
with self.assertNumQueries(3):
# When we prefetch the teachers, and force the query, we don't want
# the default manager on teachers to immediately get all the related
# qualifications, since this will do one query per teacher.
qs = Department.objects.prefetch_related('teachers')
depts = "".join("%s department: %s\n" %
(dept.name, ", ".join(str(t) for t in dept.teachers.all()))
for dept in qs)
self.assertEqual(depts,
"English department: Mr Cleese (BA, BSci, MA, PhD), Mr Idle (BA)\n"
"Physics department: Mr Cleese (BA, BSci, MA, PhD), Mr Chapman (BSci)\n")
class GenericRelationTests(TestCase):
@classmethod
def setUpTestData(cls):
book1 = Book.objects.create(title="Winnie the Pooh")
book2 = Book.objects.create(title="Do you like green eggs and spam?")
book3 = Book.objects.create(title="Three Men In A Boat")
reader1 = Reader.objects.create(name="me")
reader2 = Reader.objects.create(name="you")
reader3 = Reader.objects.create(name="someone")
book1.read_by.add(reader1, reader2)
book2.read_by.add(reader2)
book3.read_by.add(reader3)
cls.book1, cls.book2, cls.book3 = book1, book2, book3
cls.reader1, cls.reader2, cls.reader3 = reader1, reader2, reader3
def test_prefetch_GFK(self):
TaggedItem.objects.create(tag="awesome", content_object=self.book1)
TaggedItem.objects.create(tag="great", content_object=self.reader1)
TaggedItem.objects.create(tag="outstanding", content_object=self.book2)
TaggedItem.objects.create(tag="amazing", content_object=self.reader3)
# 1 for TaggedItem table, 1 for Book table, 1 for Reader table
with self.assertNumQueries(3):
qs = TaggedItem.objects.prefetch_related('content_object')
list(qs)
def test_prefetch_GFK_nonint_pk(self):
Comment.objects.create(comment="awesome", content_object=self.book1)
# 1 for Comment table, 1 for Book table
with self.assertNumQueries(2):
qs = Comment.objects.prefetch_related('content_object')
[c.content_object for c in qs]
def test_traverse_GFK(self):
"""
A 'content_object' can be traversed with prefetch_related() and
get to related objects on the other side (assuming it is suitably
filtered)
"""
TaggedItem.objects.create(tag="awesome", content_object=self.book1)
TaggedItem.objects.create(tag="awesome", content_object=self.book2)
TaggedItem.objects.create(tag="awesome", content_object=self.book3)
TaggedItem.objects.create(tag="awesome", content_object=self.reader1)
TaggedItem.objects.create(tag="awesome", content_object=self.reader2)
ct = ContentType.objects.get_for_model(Book)
# We get 3 queries - 1 for main query, 1 for content_objects since they
# all use the same table, and 1 for the 'read_by' relation.
with self.assertNumQueries(3):
# If we limit to books, we know that they will have 'read_by'
# attributes, so the following makes sense:
qs = TaggedItem.objects.filter(content_type=ct, tag='awesome').prefetch_related('content_object__read_by')
readers_of_awesome_books = {r.name for tag in qs
for r in tag.content_object.read_by.all()}
self.assertEqual(readers_of_awesome_books, {"me", "you", "someone"})
def test_nullable_GFK(self):
TaggedItem.objects.create(tag="awesome", content_object=self.book1,
created_by=self.reader1)
TaggedItem.objects.create(tag="great", content_object=self.book2)
TaggedItem.objects.create(tag="rubbish", content_object=self.book3)
with self.assertNumQueries(2):
result = [t.created_by for t in TaggedItem.objects.prefetch_related('created_by')]
self.assertEqual(result,
[t.created_by for t in TaggedItem.objects.all()])
def test_generic_relation(self):
bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=bookmark, tag='django')
TaggedItem.objects.create(content_object=bookmark, tag='python')
with self.assertNumQueries(2):
tags = [t.tag for b in Bookmark.objects.prefetch_related('tags')
for t in b.tags.all()]
self.assertEqual(sorted(tags), ["django", "python"])
def test_charfield_GFK(self):
b = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=b, tag='django')
TaggedItem.objects.create(content_object=b, favorite=b, tag='python')
with self.assertNumQueries(3):
bookmark = Bookmark.objects.filter(pk=b.pk).prefetch_related('tags', 'favorite_tags')[0]
self.assertEqual(sorted(i.tag for i in bookmark.tags.all()), ["django", "python"])
self.assertEqual([i.tag for i in bookmark.favorite_tags.all()], ["python"])
def test_custom_queryset(self):
bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/')
django_tag = TaggedItem.objects.create(content_object=bookmark, tag='django')
TaggedItem.objects.create(content_object=bookmark, tag='python')
with self.assertNumQueries(2):
bookmark = Bookmark.objects.prefetch_related(
Prefetch('tags', TaggedItem.objects.filter(tag='django')),
).get()
with self.assertNumQueries(0):
self.assertEqual(list(bookmark.tags.all()), [django_tag])
# The custom queryset filters should be applied to the queryset
# instance returned by the manager.
self.assertEqual(list(bookmark.tags.all()), list(bookmark.tags.all().all()))
class MultiTableInheritanceTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.book1 = BookWithYear.objects.create(title='Poems', published_year=2010)
cls.book2 = BookWithYear.objects.create(title='More poems', published_year=2011)
cls.author1 = AuthorWithAge.objects.create(name='Jane', first_book=cls.book1, age=50)
cls.author2 = AuthorWithAge.objects.create(name='Tom', first_book=cls.book1, age=49)
cls.author3 = AuthorWithAge.objects.create(name='Robert', first_book=cls.book2, age=48)
cls.author_address = AuthorAddress.objects.create(author=cls.author1, address='SomeStreet 1')
cls.book2.aged_authors.add(cls.author2, cls.author3)
cls.br1 = BookReview.objects.create(book=cls.book1, notes='review book1')
cls.br2 = BookReview.objects.create(book=cls.book2, notes='review book2')
def test_foreignkey(self):
with self.assertNumQueries(2):
qs = AuthorWithAge.objects.prefetch_related('addresses')
addresses = [[str(address) for address in obj.addresses.all()] for obj in qs]
self.assertEqual(addresses, [[str(self.author_address)], [], []])
def test_foreignkey_to_inherited(self):
with self.assertNumQueries(2):
qs = BookReview.objects.prefetch_related('book')
titles = [obj.book.title for obj in qs]
self.assertEqual(titles, ["Poems", "More poems"])
def test_m2m_to_inheriting_model(self):
qs = AuthorWithAge.objects.prefetch_related('books_with_year')
with self.assertNumQueries(2):
lst = [[str(book) for book in author.books_with_year.all()] for author in qs]
qs = AuthorWithAge.objects.all()
lst2 = [[str(book) for book in author.books_with_year.all()] for author in qs]
self.assertEqual(lst, lst2)
qs = BookWithYear.objects.prefetch_related('aged_authors')
with self.assertNumQueries(2):
lst = [[str(author) for author in book.aged_authors.all()] for book in qs]
qs = BookWithYear.objects.all()
lst2 = [[str(author) for author in book.aged_authors.all()] for book in qs]
self.assertEqual(lst, lst2)
def test_parent_link_prefetch(self):
with self.assertNumQueries(2):
[a.author for a in AuthorWithAge.objects.prefetch_related('author')]
@override_settings(DEBUG=True)
def test_child_link_prefetch(self):
with self.assertNumQueries(2):
authors = [a.authorwithage for a in Author.objects.prefetch_related('authorwithage')]
# Regression for #18090: the prefetching query must include an IN clause.
# Note that on Oracle the table name is upper case in the generated SQL,
# thus the .lower() call.
self.assertIn('authorwithage', connection.queries[-1]['sql'].lower())
self.assertIn(' IN ', connection.queries[-1]['sql'])
self.assertEqual(authors, [a.authorwithage for a in Author.objects.all()])
class ForeignKeyToFieldTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.book = Book.objects.create(title='Poems')
cls.author1 = Author.objects.create(name='Jane', first_book=cls.book)
cls.author2 = Author.objects.create(name='Tom', first_book=cls.book)
cls.author3 = Author.objects.create(name='Robert', first_book=cls.book)
cls.author_address = AuthorAddress.objects.create(author=cls.author1, address='SomeStreet 1')
FavoriteAuthors.objects.create(author=cls.author1, likes_author=cls.author2)
FavoriteAuthors.objects.create(author=cls.author2, likes_author=cls.author3)
FavoriteAuthors.objects.create(author=cls.author3, likes_author=cls.author1)
def test_foreignkey(self):
with self.assertNumQueries(2):
qs = Author.objects.prefetch_related('addresses')
addresses = [[str(address) for address in obj.addresses.all()]
for obj in qs]
self.assertEqual(addresses, [[str(self.author_address)], [], []])
def test_m2m(self):
with self.assertNumQueries(3):
qs = Author.objects.all().prefetch_related('favorite_authors', 'favors_me')
favorites = [(
[str(i_like) for i_like in author.favorite_authors.all()],
[str(likes_me) for likes_me in author.favors_me.all()]
) for author in qs]
self.assertEqual(
favorites,
[
([str(self.author2)], [str(self.author3)]),
([str(self.author3)], [str(self.author1)]),
([str(self.author1)], [str(self.author2)])
]
)
class LookupOrderingTest(TestCase):
"""
Test cases that demonstrate that ordering of lookups is important, and
ensure it is preserved.
"""
def setUp(self):
self.person1 = Person.objects.create(name="Joe")
self.person2 = Person.objects.create(name="Mary")
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
self.house1 = House.objects.create(address="123 Main St")
self.room1_1 = Room.objects.create(name="Dining room", house=self.house1)
self.room1_2 = Room.objects.create(name="Lounge", house=self.house1)
self.room1_3 = Room.objects.create(name="Kitchen", house=self.house1)
self.house1.main_room = self.room1_1
self.house1.save()
self.person1.houses.add(self.house1)
self.house2 = House.objects.create(address="45 Side St")
self.room2_1 = Room.objects.create(name="Dining room", house=self.house2)
self.room2_2 = Room.objects.create(name="Lounge", house=self.house2)
self.house2.main_room = self.room2_1
self.house2.save()
self.person1.houses.add(self.house2)
self.house3 = House.objects.create(address="6 Downing St")
self.room3_1 = Room.objects.create(name="Dining room", house=self.house3)
self.room3_2 = Room.objects.create(name="Lounge", house=self.house3)
self.room3_3 = Room.objects.create(name="Kitchen", house=self.house3)
self.house3.main_room = self.room3_1
self.house3.save()
self.person2.houses.add(self.house3)
self.house4 = House.objects.create(address="7 Regents St")
self.room4_1 = Room.objects.create(name="Dining room", house=self.house4)
self.room4_2 = Room.objects.create(name="Lounge", house=self.house4)
self.house4.main_room = self.room4_1
self.house4.save()
self.person2.houses.add(self.house4)
def test_order(self):
with self.assertNumQueries(4):
# The following two queries must be done in the same order as written,
# otherwise 'primary_house' will cause non-prefetched lookups
qs = Person.objects.prefetch_related('houses__rooms',
'primary_house__occupants')
[list(p.primary_house.occupants.all()) for p in qs]
class NullableTest(TestCase):
@classmethod
def setUpTestData(cls):
boss = Employee.objects.create(name="Peter")
Employee.objects.create(name="Joe", boss=boss)
Employee.objects.create(name="Angela", boss=boss)
def test_traverse_nullable(self):
# Because we use select_related() for 'boss', it doesn't need to be
# prefetched, but we can still traverse it although it contains some nulls
with self.assertNumQueries(2):
qs = Employee.objects.select_related('boss').prefetch_related('boss__serfs')
co_serfs = [list(e.boss.serfs.all()) if e.boss is not None else []
for e in qs]
qs2 = Employee.objects.select_related('boss')
co_serfs2 = [list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs2]
self.assertEqual(co_serfs, co_serfs2)
def test_prefetch_nullable(self):
# One for main employee, one for boss, one for serfs
with self.assertNumQueries(3):
qs = Employee.objects.prefetch_related('boss__serfs')
co_serfs = [list(e.boss.serfs.all()) if e.boss is not None else []
for e in qs]
qs2 = Employee.objects.all()
co_serfs2 = [list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs2]
self.assertEqual(co_serfs, co_serfs2)
def test_in_bulk(self):
"""
In-bulk does correctly prefetch objects by not using .iterator()
directly.
"""
boss1 = Employee.objects.create(name="Peter")
boss2 = Employee.objects.create(name="Jack")
with self.assertNumQueries(2):
# Prefetch is done and it does not cause any errors.
bulk = Employee.objects.prefetch_related('serfs').in_bulk([boss1.pk, boss2.pk])
for b in bulk.values():
list(b.serfs.all())
class MultiDbTests(TestCase):
multi_db = True
def test_using_is_honored_m2m(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Jane Eyre")
book3 = B.create(title="Wuthering Heights")
book4 = B.create(title="Sense and Sensibility")
author1 = A.create(name="Charlotte", first_book=book1)
author2 = A.create(name="Anne", first_book=book1)
author3 = A.create(name="Emily", first_book=book1)
author4 = A.create(name="Jane", first_book=book4)
book1.authors.add(author1, author2, author3)
book2.authors.add(author1)
book3.authors.add(author3)
book4.authors.add(author4)
# Forward
qs1 = B.prefetch_related('authors')
with self.assertNumQueries(2, using='other'):
books = "".join("%s (%s)\n" %
(book.title, ", ".join(a.name for a in book.authors.all()))
for book in qs1)
self.assertEqual(books,
"Poems (Charlotte, Anne, Emily)\n"
"Jane Eyre (Charlotte)\n"
"Wuthering Heights (Emily)\n"
"Sense and Sensibility (Jane)\n")
# Reverse
qs2 = A.prefetch_related('books')
with self.assertNumQueries(2, using='other'):
authors = "".join("%s: %s\n" %
(author.name, ", ".join(b.title for b in author.books.all()))
for author in qs2)
self.assertEqual(authors,
"Charlotte: Poems, Jane Eyre\n"
"Anne: Poems\n"
"Emily: Poems, Wuthering Heights\n"
"Jane: Sense and Sensibility\n")
def test_using_is_honored_fkey(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Sense and Sensibility")
A.create(name="Charlotte Bronte", first_book=book1)
A.create(name="Jane Austen", first_book=book2)
# Forward
with self.assertNumQueries(2, using='other'):
books = ", ".join(a.first_book.title for a in A.prefetch_related('first_book'))
self.assertEqual("Poems, Sense and Sensibility", books)
# Reverse
with self.assertNumQueries(2, using='other'):
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related('first_time_authors'))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
def test_using_is_honored_inheritance(self):
B = BookWithYear.objects.using('other')
A = AuthorWithAge.objects.using('other')
book1 = B.create(title="Poems", published_year=2010)
B.create(title="More poems", published_year=2011)
A.create(name='Jane', first_book=book1, age=50)
A.create(name='Tom', first_book=book1, age=49)
# parent link
with self.assertNumQueries(2, using='other'):
authors = ", ".join(a.author.name for a in A.prefetch_related('author'))
self.assertEqual(authors, "Jane, Tom")
# child link
with self.assertNumQueries(2, using='other'):
ages = ", ".join(str(a.authorwithage.age) for a in A.prefetch_related('authorwithage'))
self.assertEqual(ages, "50, 49")
def test_using_is_honored_custom_qs(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Sense and Sensibility")
A.create(name="Charlotte Bronte", first_book=book1)
A.create(name="Jane Austen", first_book=book2)
# Implicit hinting
with self.assertNumQueries(2, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.all())
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
# Explicit using on the same db.
with self.assertNumQueries(2, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.using('other'))
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
# Explicit using on a different db.
with self.assertNumQueries(1, using='default'), self.assertNumQueries(1, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.using('default'))
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems ()\n"
"Sense and Sensibility ()\n")
class Ticket19607Tests(TestCase):
def setUp(self):
for id, name1, name2 in [
(1, 'einfach', 'simple'),
(2, 'schwierig', 'difficult'),
]:
LessonEntry.objects.create(id=id, name1=name1, name2=name2)
for id, lesson_entry_id, name in [
(1, 1, 'einfach'),
(2, 1, 'simple'),
(3, 2, 'schwierig'),
(4, 2, 'difficult'),
]:
WordEntry.objects.create(id=id, lesson_entry_id=lesson_entry_id, name=name)
def test_bug(self):
list(WordEntry.objects.prefetch_related('lesson_entry', 'lesson_entry__wordentry_set'))
class Ticket21410Tests(TestCase):
def setUp(self):
self.book1 = Book.objects.create(title="Poems")
self.book2 = Book.objects.create(title="Jane Eyre")
self.book3 = Book.objects.create(title="Wuthering Heights")
self.book4 = Book.objects.create(title="Sense and Sensibility")
self.author1 = Author2.objects.create(name="Charlotte", first_book=self.book1)
self.author2 = Author2.objects.create(name="Anne", first_book=self.book1)
self.author3 = Author2.objects.create(name="Emily", first_book=self.book1)
self.author4 = Author2.objects.create(name="Jane", first_book=self.book4)
self.author1.favorite_books.add(self.book1, self.book2, self.book3)
self.author2.favorite_books.add(self.book1)
self.author3.favorite_books.add(self.book2)
self.author4.favorite_books.add(self.book3)
def test_bug(self):
list(Author2.objects.prefetch_related('first_book', 'favorite_books'))
class Ticket21760Tests(TestCase):
def setUp(self):
self.rooms = []
for _ in range(3):
house = House.objects.create()
for _ in range(3):
self.rooms.append(Room.objects.create(house=house))
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
house.main_room = self.rooms[-3]
house.save()
def test_bug(self):
prefetcher = get_prefetcher(self.rooms[0], 'house', 'house')[0]
queryset = prefetcher.get_prefetch_queryset(list(Room.objects.all()))[0]
self.assertNotIn(' JOIN ', str(queryset.query))
class DirectPrefechedObjectCacheReuseTests(TestCase):
"""
prefetch_related() reuses objects fetched in _prefetched_objects_cache.
When objects are prefetched and not stored as an instance attribute (often
intermediary relationships), they are saved to the
_prefetched_objects_cache attribute. prefetch_related() takes
_prefetched_objects_cache into account when determining whether an object
has been fetched[1] and retrieves results from it when it is populated [2].
[1]: #25546 (duplicate queries on nested Prefetch)
[2]: #27554 (queryset evaluation fails with a mix of nested and flattened
prefetches)
"""
@classmethod
def setUpTestData(cls):
cls.book1, cls.book2 = [
Book.objects.create(title='book1'),
Book.objects.create(title='book2'),
]
cls.author11, cls.author12, cls.author21 = [
Author.objects.create(first_book=cls.book1, name='Author11'),
Author.objects.create(first_book=cls.book1, name='Author12'),
Author.objects.create(first_book=cls.book2, name='Author21'),
]
cls.author1_address1, cls.author1_address2, cls.author2_address1 = [
AuthorAddress.objects.create(author=cls.author11, address='Happy place'),
AuthorAddress.objects.create(author=cls.author12, address='Haunted house'),
AuthorAddress.objects.create(author=cls.author21, address='Happy place'),
]
def test_detect_is_fetched(self):
"""
Nested prefetch_related() shouldn't trigger duplicate queries for the same
lookup.
"""
with self.assertNumQueries(3):
books = Book.objects.filter(
title__in=['book1', 'book2'],
).prefetch_related(
Prefetch(
'first_time_authors',
Author.objects.prefetch_related(
Prefetch(
'addresses',
AuthorAddress.objects.filter(address='Happy place'),
)
),
),
)
book1, book2 = list(books)
with self.assertNumQueries(0):
self.assertSequenceEqual(book1.first_time_authors.all(), [self.author11, self.author12])
self.assertSequenceEqual(book2.first_time_authors.all(), [self.author21])
self.assertSequenceEqual(book1.first_time_authors.all()[0].addresses.all(), [self.author1_address1])
self.assertSequenceEqual(book1.first_time_authors.all()[1].addresses.all(), [])
self.assertSequenceEqual(book2.first_time_authors.all()[0].addresses.all(), [self.author2_address1])
self.assertEqual(
list(book1.first_time_authors.all()), list(book1.first_time_authors.all().all())
)
self.assertEqual(
list(book2.first_time_authors.all()), list(book2.first_time_authors.all().all())
)
self.assertEqual(
list(book1.first_time_authors.all()[0].addresses.all()),
list(book1.first_time_authors.all()[0].addresses.all().all())
)
self.assertEqual(
list(book1.first_time_authors.all()[1].addresses.all()),
list(book1.first_time_authors.all()[1].addresses.all().all())
)
self.assertEqual(
list(book2.first_time_authors.all()[0].addresses.all()),
list(book2.first_time_authors.all()[0].addresses.all().all())
)
def test_detect_is_fetched_with_to_attr(self):
with self.assertNumQueries(3):
books = Book.objects.filter(
title__in=['book1', 'book2'],
).prefetch_related(
Prefetch(
'first_time_authors',
Author.objects.prefetch_related(
Prefetch(
'addresses',
AuthorAddress.objects.filter(address='Happy place'),
to_attr='happy_place',
)
),
to_attr='first_authors',
),
)
book1, book2 = list(books)
with self.assertNumQueries(0):
self.assertEqual(book1.first_authors, [self.author11, self.author12])
self.assertEqual(book2.first_authors, [self.author21])
self.assertEqual(book1.first_authors[0].happy_place, [self.author1_address1])
self.assertEqual(book1.first_authors[1].happy_place, [])
self.assertEqual(book2.first_authors[0].happy_place, [self.author2_address1])
class ReadPrefetchedObjectsCacheTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.book1 = Book.objects.create(title='Les confessions Volume I')
cls.book2 = Book.objects.create(title='Candide')
cls.author1 = AuthorWithAge.objects.create(name='Rousseau', first_book=cls.book1, age=70)
cls.author2 = AuthorWithAge.objects.create(name='Voltaire', first_book=cls.book2, age=65)
cls.book1.authors.add(cls.author1)
cls.book2.authors.add(cls.author2)
FavoriteAuthors.objects.create(author=cls.author1, likes_author=cls.author2)
def test_retrieves_results_from_prefetched_objects_cache(self):
"""
When intermediary results are prefetched without a destination
attribute, they are saved in the RelatedManager's cache
(_prefetched_objects_cache). prefetch_related() uses this cache
(#27554).
"""
authors = AuthorWithAge.objects.prefetch_related(
Prefetch(
'author',
queryset=Author.objects.prefetch_related(
# Results are saved in the RelatedManager's cache
# (_prefetched_objects_cache) and do not replace the
# RelatedManager on Author instances (favorite_authors)
Prefetch('favorite_authors__first_book'),
),
),
)
with self.assertNumQueries(4):
# AuthorWithAge -> Author -> FavoriteAuthors, Book
self.assertQuerysetEqual(authors, ['<AuthorWithAge: Rousseau>', '<AuthorWithAge: Voltaire>'])
|
tomchristie/django
|
tests/prefetch_related/tests.py
|
Python
|
bsd-3-clause
| 62,680
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`LinearOperator` that wraps a [batch] matrix."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.linalg.python.ops import linear_operator
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
__all__ = ["LinearOperatorMatrix"]
class LinearOperatorMatrix(linear_operator.LinearOperator):
"""`LinearOperator` that wraps a [batch] matrix.
This operator wraps a [batch] matrix `A` (which is a `Tensor`) with shape
`[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `M x N` matrix.
```python
# Create a 2 x 2 linear operator.
matrix = [[1., 2.], [3., 4.]]
operator = LinearOperatorMatrix(matrix)
operator.to_dense()
==> [[1., 2.]
[3., 4.]]
operator.shape
==> [2, 2]
operator.log_determinant()
==> scalar Tensor
x = ... Shape [2, 4] Tensor
operator.apply(x)
==> Shape [2, 4] Tensor
# Create a [2, 3] batch of 4 x 4 linear operators.
matrix = tf.random_normal(shape=[2, 3, 4, 4])
operator = LinearOperatorMatrix(matrix)
```
#### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `apply` and `solve` if
```
operator.shape = [B1,...,Bb] + [M, N], with b >= 0
x.shape = [B1,...,Bb] + [N, R], with R >= 0.
```
#### Performance
`LinearOperatorMatrix` has exactly the same performance as would be achieved
by using standard `TensorFlow` matrix ops. Intelligent choices are made
based on the following initialization hints.
* If `dtype` is real, and `is_self_adjoint` and `is_positive_definite`, a
Cholesky factorization is used for the determinant and solve.
In all cases, suppose `operator` is a `LinearOperatorMatrix` of shape
`[M, N]`, and `x.shape = [N, R]`. Then
* `operator.apply(x)` is `O(M * N * R)`.
* If `M=N`, `operator.solve(x)` is `O(N^3 * R)`.
* If `M=N`, `operator.determinant()` is `O(N^3)`.
If instead `operator` and `x` have shape `[B1,...,Bb, M, N]` and
`[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite`.
These have the following meaning
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
matrix,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
name="LinearOperatorMatrix"):
"""Initialize a `LinearOperatorMatrix`.
Args:
matrix: Shape `[B1,...,Bb, M, N]` with `b >= 0`, `M, N >= 0`.
Allowed dtypes: `float32`, `float64`, `complex64`, `complex128`.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the real part of all eigenvalues is positive. We do not require
the operator to be self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix
#Extension_for_non_symmetric_matrices
name: A name for this `LinearOperator`.
Raises:
TypeError: If `diag.dtype` is not an allowed type.
"""
allowed_dtypes = [
dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128]
with ops.name_scope(name, values=[matrix]):
self._matrix = ops.convert_to_tensor(matrix, name="matrix")
dtype = self._matrix.dtype
if dtype not in allowed_dtypes:
raise TypeError(
"Argument matrix must have dtype in %s. Found: %s"
% (allowed_dtypes, dtype))
# Special treatment for (real) Symmetric Positive Definite.
self._is_spd = (
(not dtype.is_complex) and is_self_adjoint and is_positive_definite)
if self._is_spd:
self._chol = linalg_ops.cholesky(self._matrix)
super(LinearOperatorMatrix, self).__init__(
dtype=self._matrix.dtype,
graph_parents=[self._matrix],
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
name=name)
def _shape(self):
return self._matrix.get_shape()
def _shape_dynamic(self):
return array_ops.shape(self._matrix)
def _apply(self, x, adjoint=False):
return math_ops.matmul(self._matrix, x, adjoint_a=adjoint)
def _determinant(self):
if self._is_spd:
return math_ops.exp(self.log_abs_determinant())
return linalg_ops.matrix_determinant(self._matrix)
def _log_abs_determinant(self):
if self._is_spd:
diag = array_ops.matrix_diag_part(self._chol)
return 2 * math_ops.reduce_sum(math_ops.log(diag), reduction_indices=[-1])
abs_det = math_ops.abs(self.determinant())
return math_ops.log(abs_det)
def _solve(self, rhs, adjoint=False):
if self._is_spd:
return linalg_ops.cholesky_solve(self._chol, rhs)
return linalg_ops.matrix_solve(self._matrix, rhs, adjoint=adjoint)
def _to_dense(self):
return self._matrix
|
jjas0nn/solvem
|
tensorflow/lib/python2.7/site-packages/tensorflow/contrib/linalg/python/ops/linear_operator_matrix.py
|
Python
|
mit
| 6,593
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth import get_user_model
from django.conf import settings
from ...topic.models import Topic
from ...category.models import Category
from ...comment.models import Comment
from ...topic.private.models import TopicPrivate
User = get_user_model()
def create_user(**kwargs):
if 'username' not in kwargs:
kwargs['username'] = "user_foo%d" % User.objects.all().count()
if 'email' not in kwargs:
kwargs['email'] = "%s@bar.com" % kwargs['username']
if 'password' not in kwargs:
kwargs['password'] = "bar"
return User.objects.create_user(**kwargs)
def create_topic(category, **kwargs):
if 'user' not in kwargs:
kwargs['user'] = create_user()
if 'title' not in kwargs:
kwargs['title'] = "topic_foo%d" % Topic.objects.all().count()
return Topic.objects.create(category=category, **kwargs)
def create_private_topic(**kwargs):
assert 'category' not in kwargs, "do not pass category param"
category = Category.objects.get(pk=settings.ST_TOPIC_PRIVATE_CATEGORY_PK)
topic = create_topic(category=category, **kwargs)
return TopicPrivate.objects.create(topic=topic, user=topic.user)
def create_category(**kwargs):
if 'title' not in kwargs:
kwargs['title'] = "category_foo%d" % Category.objects.all().count()
return Category.objects.create(**kwargs)
def create_subcategory(category, **kwargs):
if 'title' not in kwargs:
kwargs['title'] = "subcategory_foo%d" % Category.objects.all().count()
return Category.objects.create(parent=category, **kwargs)
def create_comment(**kwargs):
if 'comment' not in kwargs:
kwargs['comment'] = "comment_foobar%d" % Comment.objects.all().count()
if 'comment_html' not in kwargs:
kwargs['comment_html'] = kwargs['comment']
if 'user' not in kwargs:
kwargs['user'] = create_user()
return Comment.objects.create(**kwargs)
def login(test_case_instance, user=None, password=None):
user = user or test_case_instance.user
password = password or "bar"
login_successful = test_case_instance.client.login(username=user.username, password=password)
test_case_instance.assertTrue(login_successful)
|
ramaseshan/Spirit
|
spirit/core/tests/utils.py
|
Python
|
mit
| 2,284
|
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""A simple Set class."""
class Set(object):
"""A simple set class.
Sets are not in Python until 2.3, and rdata are not immutable so
we cannot use sets.Set anyway. This class implements subset of
the 2.3 Set interface using a list as the container.
@ivar items: A list of the items which are in the set
@type items: list"""
__slots__ = ['items']
def __init__(self, items=None):
"""Initialize the set.
@param items: the initial set of items
@type items: any iterable or None
"""
self.items = []
if not items is None:
for item in items:
self.add(item)
def __repr__(self):
return "dns.simpleset.Set(%s)" % repr(self.items)
def add(self, item):
"""Add an item to the set."""
if not item in self.items:
self.items.append(item)
def remove(self, item):
"""Remove an item from the set."""
self.items.remove(item)
def discard(self, item):
"""Remove an item from the set if present."""
try:
self.items.remove(item)
except ValueError:
pass
def _clone(self):
"""Make a (shallow) copy of the set.
There is a 'clone protocol' that subclasses of this class
should use. To make a copy, first call your super's _clone()
method, and use the object returned as the new instance. Then
make shallow copies of the attributes defined in the subclass.
This protocol allows us to write the set algorithms that
return new instances (e.g. union) once, and keep using them in
subclasses.
"""
cls = self.__class__
obj = cls.__new__(cls)
obj.items = list(self.items)
return obj
def __copy__(self):
"""Make a (shallow) copy of the set."""
return self._clone()
def copy(self):
"""Make a (shallow) copy of the set."""
return self._clone()
def union_update(self, other):
"""Update the set, adding any elements from other which are not
already in the set.
@param other: the collection of items with which to update the set
@type other: Set object
"""
if not isinstance(other, Set):
raise ValueError('other must be a Set instance')
if self is other:
return
for item in other.items:
self.add(item)
def intersection_update(self, other):
"""Update the set, removing any elements from other which are not
in both sets.
@param other: the collection of items with which to update the set
@type other: Set object
"""
if not isinstance(other, Set):
raise ValueError('other must be a Set instance')
if self is other:
return
# we make a copy of the list so that we can remove items from
# the list without breaking the iterator.
for item in list(self.items):
if item not in other.items:
self.items.remove(item)
def difference_update(self, other):
"""Update the set, removing any elements from other which are in
the set.
@param other: the collection of items with which to update the set
@type other: Set object
"""
if not isinstance(other, Set):
raise ValueError('other must be a Set instance')
if self is other:
self.items = []
else:
for item in other.items:
self.discard(item)
def union(self, other):
"""Return a new set which is the union of I{self} and I{other}.
@param other: the other set
@type other: Set object
@rtype: the same type as I{self}
"""
obj = self._clone()
obj.union_update(other)
return obj
def intersection(self, other):
"""Return a new set which is the intersection of I{self} and I{other}.
@param other: the other set
@type other: Set object
@rtype: the same type as I{self}
"""
obj = self._clone()
obj.intersection_update(other)
return obj
def difference(self, other):
"""Return a new set which I{self} - I{other}, i.e. the items
in I{self} which are not also in I{other}.
@param other: the other set
@type other: Set object
@rtype: the same type as I{self}
"""
obj = self._clone()
obj.difference_update(other)
return obj
def __or__(self, other):
return self.union(other)
def __and__(self, other):
return self.intersection(other)
def __add__(self, other):
return self.union(other)
def __sub__(self, other):
return self.difference(other)
def __ior__(self, other):
self.union_update(other)
return self
def __iand__(self, other):
self.intersection_update(other)
return self
def __iadd__(self, other):
self.union_update(other)
return self
def __isub__(self, other):
self.difference_update(other)
return self
def update(self, other):
"""Update the set, adding any elements from other which are not
already in the set.
@param other: the collection of items with which to update the set
@type other: any iterable type"""
for item in other:
self.add(item)
def clear(self):
"""Make the set empty."""
self.items = []
def __hash__(self):
return hash(self.items)
def __eq__(self, other):
# Yes, this is inefficient but the sets we're dealing with are
# usually quite small, so it shouldn't hurt too much.
for item in self.items:
if not item in other.items:
return False
for item in other.items:
if not item in self.items:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __len__(self):
return len(self.items)
def __iter__(self):
return iter(self.items)
def __getitem__(self, i):
return self.items[i]
def __delitem__(self, i):
del self.items[i]
def __getslice__(self, i, j):
return self.items[i:j]
def __delslice__(self, i, j):
del self.items[i:j]
def issubset(self, other):
"""Is I{self} a subset of I{other}?
@rtype: bool
"""
if not isinstance(other, Set):
raise ValueError('other must be a Set instance')
for item in self.items:
if not item in other.items:
return False
return True
def issuperset(self, other):
"""Is I{self} a superset of I{other}?
@rtype: bool
"""
if not isinstance(other, Set):
raise ValueError('other must be a Set instance')
for item in other.items:
if not item in self.items:
return False
return True
|
terryyin/linkchecker
|
third_party/dnspython/dns/set.py
|
Python
|
gpl-2.0
| 7,899
|
import json
import unittest
from app import current_app as app
from test_post_api_auth import PostApiAuthTestCase
from tests.unittests.api.utils import create_event, get_path
from tests.unittests.auth_helper import register
from tests.unittests.setup_database import Setup
from tests.unittests.utils import OpenEventTestCase
# Post API Permissions Success is being already sort of
# tested in test_post_api_auth
class TestPostApiPermissionDenied(PostApiAuthTestCase, OpenEventTestCase):
"""
Test 403 permission denied in Post API
"""
def setUp(self):
self.app = Setup.create_app()
with app.test_request_context():
register(self.app, u'myemail@gmail.com', u'test')
create_event() # no creator_email, no organizer is set
# logout(self.app) # no need for this test
def _test_model(self, name, data):
if name == 'event':
return
with app.test_request_context():
path = get_path() if name == 'event' else get_path(1, name + 's')
response = self.app.post(
path,
data=json.dumps(data),
headers={
'content-type': 'application/json'
}
)
self.assertEqual(response.status_code, 403) # permission denied
if __name__ == '__main__':
unittest.main()
|
Achint08/open-event-orga-server
|
tests/unittests/api/test_post_api_permissions.py
|
Python
|
gpl-3.0
| 1,382
|
#!/usr/bin/env python3
# Copyright (C) 2018 Freie Universitat Berlin
# Copyright (C) 2018 Inria
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
#
# @author Koen Zandberg <koen@bergzand.net>
import os
import os.path
import sys
import shutil
import argparse
import git
from agithub.GitHub import GitHub
ORG = "RIOT-OS"
REPO = "RIOT"
GITHUBTOKEN_FILE = ".riotgithubtoken"
WORKTREE_SUBDIR = "backport_temp"
RELEASE_PREFIX = ""
RELEASE_SUFFIX = "-branch"
LABELS_REMOVE = ['Process: needs backport', 'Reviewed: ']
LABELS_ADD = ['Process: release backport']
BACKPORT_BRANCH = 'backport/{release}/{origbranch}'
def _get_labels(pr):
"""
>>> _get_labels({'labels': [{'name': 'test'}, {'name': 'abcd'}]})
['Process: release backport', 'abcd', 'test']
>>> _get_labels({'labels': [{'name': 'Reviewed: what'}, {'name': 'Reviewed: 3-testing'}]})
['Process: release backport']
>>> _get_labels({'labels': [{'name': 'Process: release backport'}]})
['Process: release backport']
>>> _get_labels({'labels': [{'name': 'Process: needs backport'}]})
['Process: release backport']
"""
labels = set(label['name'] for label in pr['labels']
if all(not label['name'].startswith(remove)
for remove in LABELS_REMOVE))
labels.update(LABELS_ADD)
return sorted(list(labels))
def _branch_name_strip(branch_name, prefix=RELEASE_PREFIX,
suffix=RELEASE_SUFFIX):
"""Strip suffix and prefix.
>>> _branch_name_strip('2018.10-branch')
'2018.10'
"""
if (branch_name.startswith(prefix) and
branch_name.endswith(suffix)):
if prefix:
branch_name = branch_name.split(prefix, maxsplit=1)[0]
if suffix:
branch_name = branch_name.rsplit(suffix, maxsplit=1)[0]
return branch_name
def _get_latest_release(branches):
version_latest = 0
release_fullname = ''
release_short = ''
for branch in branches:
branch_name = _branch_name_strip(branch['name'])
branch_num = 0
try:
branch_num = int(''.join(branch_name.split('.')))
except ValueError:
pass
if branch_num > version_latest:
version_latest = branch_num
release_short = branch_name
release_fullname = branch['name']
return (release_short, release_fullname)
def _find_remote(repo, user, repo_name):
for remote in repo.remotes:
if (remote.url.endswith("{}/{}.git".format(user, repo_name)) or
remote.url.endswith("{}/{}".format(user, repo_name))):
return remote
raise ValueError("Could not find remote with URL ending in {}/{}.git"
.format(user, repo_name))
def _get_upstream(repo):
return _find_remote(repo, ORG, REPO)
def _delete_worktree(repo, workdir):
shutil.rmtree(workdir)
repo.git.worktree('prune')
def main():
keyfile = os.path.join(os.environ['HOME'], GITHUBTOKEN_FILE)
parser = argparse.ArgumentParser()
parser.add_argument("-k", "--keyfile", type=argparse.FileType('r'),
default=keyfile,
help="File containing github token")
parser.add_argument("-c", "--comment", action="store_true",
help="Put a comment with a reference under"
"the original PR")
parser.add_argument("-n", "--noop", action="store_true",
help="Limited noop mode, creates branch, but doesn't"
"push and create the PR")
parser.add_argument("-r", "--release-branch", type=str,
help="Base the backport on this branch, "
"default is the latest")
parser.add_argument("--backport-branch-fmt", type=str,
default=BACKPORT_BRANCH,
help="Backport branch format. "
"Fields '{release}' and '{origbranch} will be "
"replaced by the release name and remote branch "
"name.")
parser.add_argument('-d', '--gitdir', type=str, default=os.getcwd(),
help="Base git repo to work from")
parser.add_argument("PR", type=int, help="Pull request number to backport")
args = parser.parse_args()
gittoken = args.keyfile.read().strip()
g = GitHub(token=gittoken)
# TODO: exception handling
status, user = g.user.get()
if status != 200:
print("Could not retrieve user: {}".format(user['message']))
exit(1)
username = user['login']
status, pulldata = g.repos[ORG][REPO].pulls[args.PR].get()
if status != 200:
print("Commit #{} not found: {}".format(args.PR, pulldata['message']))
sys.exit(2)
if not pulldata['merged']:
print("Original PR not yet merged")
exit(0)
print("Fetching for commit: #{}: {}".format(args.PR, pulldata['title']))
orig_branch = pulldata['head']['ref']
status, commits = g.repos[ORG][REPO].pulls[args.PR].commits.get()
if status != 200:
print("No commits found for #{}: {}".format(args.PR,
commits['message']))
sys.exit(3)
for commit in commits:
print("found {} : {}".format(commit['sha'],
commit['commit']['message']))
# Find latest release branch
if args.release_branch:
release_fullname = args.release_branch
release_shortname = _branch_name_strip(args.release_branch)
else:
status, branches = g.repos[ORG][REPO].branches.get()
if status != 200:
print("Could not retrieve branches for {}/{}: {}"
.format(ORG,
REPO,
branches['message']))
sys.exit(4)
release_shortname, release_fullname = _get_latest_release(branches)
if not release_fullname:
print("No release branch found, exiting")
sys.exit(5)
print("Backport based on branch {}".format(release_fullname))
repo = git.Repo(args.gitdir)
# Fetch current upstream
upstream_remote = _get_upstream(repo)
if not upstream_remote:
print("No upstream remote found, can't fetch")
exit(6)
print("Fetching {} remote".format(upstream_remote))
upstream_remote.fetch()
# Build topic branch in temp dir
new_branch = args.backport_branch_fmt.format(release=release_shortname,
origbranch=orig_branch)
if new_branch in repo.branches:
print("ERROR: Branch {} already exists".format(new_branch))
sys.exit(1)
worktree_dir = os.path.join(args.gitdir, WORKTREE_SUBDIR)
repo.git.worktree("add", "-b",
new_branch,
WORKTREE_SUBDIR,
"{}/{}".format(upstream_remote, release_fullname))
# transform branch name into Head object for later configuring
new_branch = repo.branches[new_branch]
try:
bp_repo = git.Repo(worktree_dir)
# Apply commits
for commit in commits:
bp_repo.git.cherry_pick('-x', commit['sha'])
# Push to github
origin = _find_remote(repo, username, REPO)
print("Pushing branch {} to {}".format(new_branch, origin))
if not args.noop:
push_info = origin.push('{0}:{0}'.format(new_branch))
new_branch.set_tracking_branch(push_info[0].remote_ref)
except Exception as exc:
# Delete worktree
print("Pruning temporary workdir at {}".format(worktree_dir))
_delete_worktree(repo, worktree_dir)
# also delete branch created by worktree; this is only possible after
# the worktree was deleted
repo.delete_head(new_branch)
raise exc
else:
# Delete worktree
print("Pruning temporary workdir at {}".format(worktree_dir))
_delete_worktree(repo, worktree_dir)
labels = _get_labels(pulldata)
merger = pulldata['merged_by']['login']
if not args.noop:
# Open new PR on github
pr = {
'title': "{} [backport {}]".format(pulldata['title'],
release_shortname),
'head': '{}:{}'.format(username, new_branch),
'base': release_fullname,
'body': "# Backport of #{}\n\n{}".format(args.PR,
pulldata['body']),
'maintainer_can_modify': True,
}
status, new_pr = g.repos[ORG][REPO].pulls.post(body=pr)
if status != 201:
print("Error creating the new pr: \"{}\". Is \"Public Repo\""
" access enabled for the token"
.format(new_pr['message']))
pr_number = new_pr['number']
print("Create PR number #{} for backport".format(pr_number))
g.repos[ORG][REPO].issues[pr_number].labels.post(body=labels)
review_request = {"reviewers": [merger]}
g.repos[ORG][REPO].pulls[pr_number].\
requested_reviewers.post(body=review_request)
# Put commit under old PR
if args.comment and not args.noop:
comment = {"body": "Backport provided in #{}".format(pr_number)}
status, res = g.repos[ORG][REPO].\
issues[args.PR].comments.post(body=comment)
if status != 201:
print("Something went wrong adding the comment: {}"
.format(res['message']))
print("Added comment to #{}".format(args.PR))
if __name__ == "__main__":
main()
|
mtausig/RIOT
|
dist/tools/backport_pr/backport_pr.py
|
Python
|
lgpl-2.1
| 9,797
|
# Natural Language Toolkit: Finite State Automata
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Authors: Steven Bird <sb@ldc.upenn.edu>
# Rob Speer <rspeer@mit.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
A module for finite state automata.
Operations are based on Aho, Sethi & Ullman (1986) Chapter 3.
"""
from nltk import tokenize, Tree, cfg
from nltk.parse import pchart
import yaml
epsilon = None
# some helper functions
# TODO - check that parse was complete, and report error otherwise
class FSA(yaml.YAMLObject):
"""
A class for finite state automata. In general, it represents
nondetermnistic finite state automata, with DFAs being a special case.
"""
yaml_tag = '!FSA'
def __init__(self, sigma='', transitions=None, start=0, finals=None):
"""Set up the FSA.
@param sigma: the alphabet of the FSA
@type sigma: sequence
@param transitions: A dictionary representing the states and
transitions in the FSA. The keys are state identifiers (any hashable
object), and the values are dictionaries that map input symbols to the
sets of states they lead to.
@type transitions: dict
@param start: The identifier of the start state
@type start: hashable object
@param finals: The identifiers of the accept states
@type finals: sequence
"""
self._transitions = transitions or {0: {}}
self._start = start
self._reverse = {}
self._build_reverse_transitions()
if finals: self._finals = set(finals)
else: self._finals = set([0])
self._sigma = set(sigma)
assert isinstance(self._transitions, dict)
self._next_state_num = 0
def _build_reverse_transitions(self):
for state in self._transitions:
self._reverse.setdefault(state, {})
for (state, symbol, target) in self.generate_transitions():
self._add_transition(self._reverse, target, symbol, state)
def generate_transitions(self):
"""
A generator that yields each transition arrow in the FSA in the form
(source, label, target).
"""
for (state, map) in self._transitions.items():
for (symbol, targets) in map.items():
for target in targets:
yield (state, symbol, target)
def labels(self, s1, s2):
"""
A generator for all possible labels taking state s1 to state s2.
"""
map = self._transitions.get(s1, {})
for (symbol, targets) in map.items():
if s2 in targets: yield symbol
def sigma(self):
"The alphabet of the FSA."
return self._sigma
alphabet = sigma
def check_in_sigma(self, label):
"Check whether a given object is in the alphabet."
if label and label not in self._sigma:
raise ValueError('Label "%s" not in alphabet: %s' % (label, str(self._sigma)))
def __len__(self):
"The number of states in the FSA."
return len(self._transitions)
def new_state(self):
"""
Add a new state to the FSA.
@returns: the ID of the new state (a sequentially-assigned number).
@rtype: int
"""
while self._next_state_num in self._transitions:
self._next_state_num += 1
self._transitions[self._next_state_num] = {}
self._reverse[self._next_state_num] = {}
return self._next_state_num
def add_state(self, name):
self._transitions[name] = {}
self._reverse[name] = {}
return name
def start(self):
"""
@returns: the ID of the FSA's start state.
"""
return self._start
def finals(self):
"""
@returns: the IDs of all accept states.
@rtype: set
"""
# was a tuple before
return self._finals
def states(self):
"""
@returns: a list of all states in the FSA.
@rtype: list
"""
return self._transitions.keys()
def add_final(self, state):
"""
Make a state into an accept state.
"""
self._finals.add(state)
def delete_final(self, state):
"""
Make an accept state no longer be an accept state.
"""
self._finals = self._finals.difference(set([state]))
# del self._finals[state]
def set_final(self, states):
"""
Set the list of accept states.
"""
self._finals = set(states)
def set_start(self, start):
"""
Set the start state of the FSA.
"""
self._start = start
def in_finals(self, list):
"""
Check whether a sequence contains any final states.
"""
return [state for state in list
if state in self.finals()] != []
def insert_safe(self, s1, label, s2):
if s1 not in self.states():
self.add_state(s1)
if s2 not in self.states():
self.add_state(s2)
self.insert(s1, label, s2)
def insert(self, s1, label, s2):
"""
Add a new transition to the FSA.
@param s1: the source of the transition
@param label: the element of the alphabet that labels the transition
@param s2: the destination of the transition
"""
if s1 not in self.states():
raise ValueError, "State %s does not exist" % s1
if s2 not in self.states():
raise ValueError, "State %s does not exist" % s1
self._add_transition(self._transitions, s1, label, s2)
self._add_transition(self._reverse, s2, label, s1)
def _add_transition(self, map, s1, label, s2):
mapping = map[s1]
targets = mapping.setdefault(label, set())
targets.add(s2)
def _del_transition(self, map, s1, label, s2):
mapping = map[s1]
targets = mapping.setdefault(label, set())
targets.remove(s2)
if len(targets) == 0: del mapping[label]
def delete(self, s1, label, s2):
"""
Removes a transition from the FSA.
@param s1: the source of the transition
@param label: the element of the alphabet that labels the transition
@param s2: the destination of the transition
"""
if s1 not in self.states():
raise ValueError, "State %s does not exist" % s1
if s2 not in self.states():
raise ValueError, "State %s does not exist" % s1
self._del_transition(self._transitions, s1, label, s2)
self._del_transition(self._reverse, s2, label, s1)
def delete_state(self, state):
"Removes a state and all its transitions from the FSA."
if state not in self.states():
raise ValueError, "State %s does not exist" % state
for (s1, label, s2) in self.incident_transitions(state):
self.delete(s1, label, s2)
del self._transitions[state]
del self._reverse[state]
def incident_transitions(self, state):
"""
@returns: a set of transitions into or out of a state.
@rtype: set
"""
result = set()
forward = self._transitions[state]
backward = self._reverse[state]
for label, targets in forward.items():
for target in targets:
result.add((state, label, target))
for label, targets in backward.items():
for target in targets:
result.add((target, label, state))
return result
def relabel_state(self, old, new):
"""
Assigns a state a new identifier.
"""
if old not in self.states():
raise ValueError, "State %s does not exist" % old
if new in self.states():
raise ValueError, "State %s already exists" % new
changes = []
for (s1, symbol, s2) in self.generate_transitions():
if s1 == old and s2 == old:
changes.append((s1, symbol, s2, new, symbol, new))
elif s1 == old:
changes.append((s1, symbol, s2, new, symbol, s2))
elif s2 == old:
changes.append((s1, symbol, s2, s1, symbol, new))
for (leftstate, symbol, rightstate, newleft, newsym, newright)\
in changes:
self.remove(leftstate, symbol, rightstate)
self.insert(newleft, newsym, newright)
del self._transitions[old]
del self._reverse[old]
def next(self, state, symbol):
"The set of states reached from a certain state via a given symbol."
return self.e_closure(self._transitions[state].get(symbol, set()))
nextStates = next
def move(self, states, symbol):
"The set of states reached from a set of states via a given symbol."
result = set()
for state in states:
result = result.union(self.next(state, symbol))
return self.e_closure(result)
def is_deterministic(self):
"""
Return whether this is a DFA
(every symbol leads from a state to at most one target state).
"""
for map in self._transitions.values():
for targets in map.values():
if len(targets) > 1: return False
return True
def nextState(self, state, symbol):
"""
The single state reached from a state via a given symbol.
If there is more than one such state, raises a ValueError.
If there is no such state, returns None.
"""
next = self.next(state, symbol)
if len(next) > 1:
raise ValueError, "This FSA is nondeterministic -- use nextStates instead."
elif len(next) == 1: return list(next)[0]
else: return None
def forward_traverse(self, state):
"All states reachable by following transitions from a given state."
result = set()
for (symbol, targets) in self._transitions[state].items():
result = result.union(targets)
return result
def reverse_traverse(self, state):
"""All states from which a given state is reachable by following
transitions."""
result = set()
for (symbol, targets) in self._reverse[state].items():
result = result.union(targets)
return result
def _forward_accessible(self, s1, visited):
for s2 in self.forward_traverse(s1):
if not s2 in visited:
visited.add(s2)
self._forward_accessible(s2, visited)
return visited
def _reverse_accessible(self, s1, visited):
for s2 in self.reverse_traverse(s1):
if not s2 in visited:
visited.add(s2)
self._reverse_accessible(s2, visited)
return visited
# delete inaccessible nodes and unused transitions
def prune(self):
"""
Modifies an FSA to remove inaccessible states and unused transitions.
"""
acc = self.accessible()
for state in self.states():
if state not in acc:
self.delete_state(state)
else:
self._clean_map(self._transitions[state])
self._clean_map(self._reverse[state])
def _clean_map(self, map):
for (key, value) in map.items():
if len(value) == 0:
del map[key]
# mark accessible nodes
def accessible(self):
acc = set()
for final in self.finals():
reverse_acc = set([final])
self._reverse_accessible(final, reverse_acc)
acc = acc.union(reverse_acc)
forward_acc = set([self.start()])
self._forward_accessible(self.start(), forward_acc)
acc = acc.intersection(forward_acc)
return acc
def e_closure(self, states):
"""
Given a set of states, return the set of states reachable from
those states by following epsilon transitions.
@param states: the initial set of states
@type states: sequence
@returns: a superset of the given states, reachable by epsilon
transitions
@rtype: set
"""
stack = list(states)
closure = list(states)
while stack:
s1 = stack.pop()
for s2 in self.next(s1, epsilon):
if s2 not in closure:
closure.append(s2)
stack.append(s2)
return set(closure)
# return the corresponding DFA using subset construction (ASU p118)
# NB representation of (a*) still isn't minimal; should have 1 state not 2
def dfa(self):
"Return a DFA that is equivalent to this FSA."
dfa = FSA(self.sigma())
dfa_initial = dfa.start()
nfa_initial = tuple(self.e_closure((self.start(),)))
map = {}
map[dfa_initial] = nfa_initial
map[nfa_initial] = dfa_initial
if nfa_initial in self.finals():
dfa.add_final(dfa_initial)
unmarked = [dfa_initial]
marked = []
while unmarked:
dfa_state = unmarked.pop()
marked.append(dfa_state)
# is a final state accessible via epsilon transitions?
if self.in_finals(self.e_closure(map[dfa_state])):
dfa.add_final(dfa_state)
for label in self.sigma():
nfa_next = tuple(self.e_closure(self.move(map[dfa_state],
label)))
if map.has_key(nfa_next):
dfa_next = map[nfa_next]
else:
dfa_next = dfa.new_state()
map[dfa_next] = nfa_next
map[nfa_next] = dfa_next
if self.in_finals(nfa_next):
dfa.add_final(dfa_next)
unmarked.append(dfa_next)
dfa.insert(dfa_state, label, dfa_next)
return dfa
def generate(self, maxlen, state=0, prefix=""):
"Generate all accepting sequences of length at most maxlen."
if maxlen > 0:
if state in self._finals:
print prefix
for (s1, labels, s2) in self.outgoing_transitions(state):
for label in labels():
self.generate(maxlen-1, s2, prefix+label)
def pp(self):
"""
Print a representation of this FSA (in human-readable YAML format).
"""
print yaml.dump(self)
@classmethod
def from_yaml(cls, loader, node):
map = loader.construct_mapping(node)
result = cls(map.get('sigma', []), {}, map.get('finals', []))
for (s1, map1) in map['transitions'].items():
for (symbol, targets) in map1.items():
for s2 in targets:
result.insert(s1, symbol, s2)
return result
@classmethod
def to_yaml(cls, dumper, data):
sigma = data.sigma()
transitions = {}
for (s1, symbol, s2) in data.generate_transitions():
map1 = transitions.setdefault(s1, {})
map2 = map1.setdefault(symbol, [])
map2.append(s2)
try: sigma = "".join(sigma)
except: sigma = list(sigma)
node = dumper.represent_mapping(cls.yaml_tag, dict(
sigma = sigma,
finals = list(data.finals()),
start = data._start,
transitions = transitions))
return node
def __str__(self):
return yaml.dump(self)
### FUNCTIONS TO BUILD FSA FROM REGEXP
# the grammar of regular expressions
# (probabilities ensure that unary operators
# have stronger associativity than juxtaposition)
def grammar(terminals):
(S, Expr, Star, Plus, Qmk, Paren) = [cfg.Nonterminal(s) for s in 'SE*+?(']
rules = [cfg.WeightedProduction(Expr, [Star], prob=0.2),
cfg.WeightedProduction(Expr, [Plus], prob=0.2),
cfg.WeightedProduction(Expr, [Qmk], prob=0.2),
cfg.WeightedProduction(Expr, [Paren], prob=0.2),
cfg.WeightedProduction(S, [Expr], prob=0.5),
cfg.WeightedProduction(S, [S, Expr], prob=0.5),
cfg.WeightedProduction(Star, [Expr, '*'], prob=1),
cfg.WeightedProduction(Plus, [Expr, '+'], prob=1),
cfg.WeightedProduction(Qmk, [Expr, '?'], prob=1),
cfg.WeightedProduction(Paren, ['(', S, ')'], prob=1)]
prob_term = 0.2/len(terminals) # divide remaining pr. mass
for terminal in terminals:
rules.append(cfg.WeightedProduction(Expr, [terminal], prob=prob_term))
return cfg.WeightedGrammar(S, rules)
_parser = pchart.InsideParser(grammar('abcde'))
# create NFA from regexp (Thompson's construction)
# assumes unique start and final states
def re2nfa(fsa, re):
tokens = tokenize.regexp(re, pattern=r'.')
tree = _parser.parse(tokens)
if tree is None: raise ValueError('Bad Regexp')
state = re2nfa_build(fsa, fsa.start(), tree)
fsa.set_final([state])
# fsa.minimize()
def re2nfa_build(fsa, node, tree):
# Terminals.
if not isinstance(tree, Tree):
return re2nfa_char(fsa, node, tree)
elif len(tree) == 1:
return re2nfa_build(fsa, node, tree[0])
elif tree.node == '(':
return re2nfa_build(fsa, node, tree[1])
elif tree.node == '*': return re2nfa_star(fsa, node, tree[0])
elif tree.node == '+': return re2nfa_plus(fsa, node, tree[0])
elif tree.node == '?': return re2nfa_qmk(fsa, node, tree[0])
else:
node = re2nfa_build(fsa, node, tree[0])
return re2nfa_build(fsa, node, tree[1])
def re2nfa_char(fsa, node, char):
new = fsa.new_state()
fsa.insert(node, char, new)
return new
def re2nfa_qmk(fsa, node, tree):
node1 = fsa.new_state()
node2 = re2nfa_build(fsa, node1, tree)
node3 = fsa.new_state()
fsa.insert(node, epsilon, node1)
fsa.insert(node, epsilon, node3)
fsa.insert(node2, epsilon, node3)
return node3
def re2nfa_plus(fsa, node, tree):
node1 = re2nfa_build(fsa, node, tree[0])
fsa.insert(node1, epsilon, node)
return node1
def re2nfa_star(fsa, node, tree):
node1 = fsa.new_state()
node2 = re2nfa_build(fsa, node1, tree)
node3 = fsa.new_state()
fsa.insert(node, epsilon, node1)
fsa.insert(node, epsilon, node3)
fsa.insert(node2, epsilon, node1)
fsa.insert(node2, epsilon, node3)
return node3
#################################################################
# Demonstration
#################################################################
def demo():
"""
A demonstration showing how FSAs can be created and used.
"""
# Define an alphabet.
alphabet = "abcd"
# Create a new FSA.
fsa = FSA(alphabet)
# Use a regular expression to initialize the FSA.
re = 'abcd'
print 'Regular Expression:', re
re2nfa(fsa, re)
print "NFA:"
fsa.pp()
# Convert the (nondeterministic) FSA to a deterministic FSA.
dfa = fsa.dfa()
print "DFA:"
dfa.pp()
# Prune the DFA
dfa.prune()
print "PRUNED DFA:"
dfa.pp()
# Use the FSA to generate all strings of length less than 3
# (broken)
#fsa.generate(3)
if __name__ == '__main__': demo()
|
hectormartinez/rougexstem
|
taln2016/icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk_contrib/fsa.py
|
Python
|
apache-2.0
| 19,337
|
import unittest, time, sys
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(3, java_heap_GB=4)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_rf_hhp_2_fvec(self):
# NAs cause CM to zero..don't run for now
csvPathname = 'hhp_9_17_12.predict.data.gz'
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, schema='put', timeoutSecs=30)
h2o_cmd.runRF(parseResult=parseResult, trees=6, timeoutSecs=300)
if __name__ == '__main__':
h2o.unit_main()
|
vbelakov/h2o
|
py/testdir_multi_jvm/test_rf_hhp_2_fvec.py
|
Python
|
apache-2.0
| 723
|
##########################################################################
#
# Copyright (c) 2008-2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import os.path
import maya.cmds as cmds
import maya.OpenMaya as OpenMaya
import IECore
import IECoreMaya
import sys
class TestParameterisedHolder( IECoreMaya.TestCase ) :
def __checkAllParameterPlugs( self, fnOH, parameter=None ) :
if parameter is not None :
plug = fnOH.parameterPlug( parameter )
self.failIf( plug.isNull() )
else :
parameter = fnOH.getParameterised()[0].parameters()
if parameter.isInstanceOf( IECore.CompoundParameter.staticTypeId() ) :
for p in parameter.values() :
self.__checkAllParameterPlugs( fnOH, p )
def testNode( self ):
""" Test ParameterisedHolderNode """
n = cmds.createNode( "ieParameterisedHolderNode" )
h = IECoreMaya.FnParameterisedHolder( str(n) )
self.assert_( h )
p = IECore.ParticleMeshOp()
h.setParameterised( p )
p.parameters()["filename"] = "testValue"
h.setNodeValue( p.parameters()["filename"] )
pl = h.parameterPlug( p.parameters()["filename"] )
v = IECoreMaya.FromMayaPlugConverter.create( pl, IECore.TypeId.StringData ).convert()
self.assertEqual( v.value, "testValue" )
cmds.setAttr( pl.name(), "testValue2", typ="string" )
h.setParameterisedValue( p.parameters()["filename"] )
self.assertEqual( p.parameters()["filename"].getValue().value, "testValue2" )
def testParameterisedHolderSetReference( self ):
""" Test multiple references to ieParameterisedHolderSet nodes """
nodeType = "ieParameterisedHolderSet"
nodeName = cmds.createNode( nodeType )
cmds.file( rename = os.path.join( os.getcwd(), "test", "IECoreMaya", "reference.ma" ) )
scene = cmds.file( force = True, type = "mayaAscii", save = True )
cmds.file( new = True, force = True )
cmds.file( scene, reference = True, namespace = "ns1" )
cmds.file( scene, reference = True, namespace = "ns2" )
cmds.file( rename = os.path.join( os.getcwd(), "test", "IECoreMaya", "referenceMaster.ma" ) )
masterScene = cmds.file( force = True, type = "mayaAscii", save = True )
cmds.file( masterScene, force = True, open = True )
nodeName1 = "ns1:" + nodeName
nodeName2 = "ns2:" + nodeName
l = OpenMaya.MSelectionList()
l.add( nodeName1 )
l.add( nodeName2 )
node1 = OpenMaya.MObject()
l.getDependNode( 0, node1 )
node2 = OpenMaya.MObject()
l.getDependNode( 1, node2 )
fn1 = OpenMaya.MFnDependencyNode( node1 )
fn2 = OpenMaya.MFnDependencyNode( node2 )
self.assert_( fn1.userNode() )
self.assert_( fn2.userNode() ) # This failure is due to a Maya bug. When referencing the same scene twice, as an optimisation Maya will duplicate existing nodes instead of creating new ones. There is a bug in MPxObjectSet::copy() which gets exercised here. Setting the environment variable MAYA_FORCE_REF_READ to 1 will disable this optimisation, however.
def testChangeDefault( self ) :
""" Test that changing parameter defaults is correctly reflected in Maya attributes """
def makeOp( defaultValue ) :
class TestOp( IECore.Op ) :
def __init__( self ) :
IECore.Op.__init__( self, "Tests stuff",
IECore.IntParameter(
name = "result",
description = "",
defaultValue = 0
)
)
self.parameters().addParameters(
[
IECore.Color3fParameter(
name = "c",
description = "",
defaultValue = defaultValue
),
]
)
return TestOp()
n = cmds.createNode( "ieParameterisedHolderNode" )
h = IECoreMaya.FnParameterisedHolder( str(n) )
self.assert_( h )
p = makeOp( IECore.Color3f( 0, 0, 0 ) )
h.setParameterised( p )
dv = cmds.attributeQuery ( "parm_c", node = n, listDefault = True )
self.assertEqual( dv, [ 0, 0, 0 ] )
p = makeOp( IECore.Color3f( 1, 1, 1 ) )
h.setParameterised( p )
dv = cmds.attributeQuery ( "parm_c", node = n, listDefault = True )
self.assertEqual( dv, [ 1, 1, 1 ] )
def testDirectSettingOfOp( self ) :
class TestOp( IECore.Op ) :
def __init__( self ) :
IECore.Op.__init__( self,
"",
IECore.FloatParameter(
"result",
"",
0.0
),
)
self.parameters().addParameter(
IECore.FloatParameter(
"a",
"",
0.0
)
)
def doOperation( self, operands ) :
return IECore.FloatData( operands["a"].value )
node = cmds.createNode( "ieOpHolderNode" )
fnOH = IECoreMaya.FnParameterisedHolder( str( node ) )
op = TestOp()
fnOH.setParameterised( op )
self.failUnless( cmds.objExists( node + ".result" ) )
aAttr = fnOH.parameterPlugPath( op["a"] )
cmds.setAttr( aAttr, 10 )
self.assertEqual( cmds.getAttr( node + ".result" ), 10 )
cmds.setAttr( aAttr, 20 )
self.assertEqual( cmds.getAttr( node + ".result" ), 20 )
def testLazySettingFromCompoundPlugs( self ) :
class TestProcedural( IECore.ParameterisedProcedural ) :
def __init__( self ) :
IECore.ParameterisedProcedural.__init__( self, "" )
self.parameters().addParameter(
IECore.V3fParameter(
"halfSize",
"",
IECore.V3f( 0 )
)
)
def doBound( self, args ) :
return IECore.Box3f( -args["halfSize"].value, args["halfSize"].value )
def doRenderState( self, args ) :
pass
def doRender( self, args ) :
pass
node = cmds.createNode( "ieProceduralHolder" )
fnPH = IECoreMaya.FnParameterisedHolder( str( node ) )
p = TestProcedural()
fnPH.setParameterised( p )
self.assertEqual( cmds.getAttr( node + ".boundingBoxMin" ), [( 0, 0, 0 )] )
cmds.setAttr( fnPH.parameterPlugPath( p["halfSize"] ), 1, 2, 3 )
self.assertEqual( cmds.getAttr( node + ".boundingBoxMin" ), [( -1, -2, -3 )] )
def testLazySettingFromArrayPlugs( self ) :
class TestProcedural( IECore.ParameterisedProcedural ) :
def __init__( self ) :
IECore.ParameterisedProcedural.__init__( self, "" )
self.parameters().addParameter(
IECore.SplineffParameter(
"spline",
"",
defaultValue = IECore.SplineffData(
IECore.Splineff(
IECore.CubicBasisf.catmullRom(),
(
( 0, 1 ),
( 0, 1 ),
( 1, 0 ),
( 1, 0 ),
),
),
),
),
)
def doBound( self, args ) :
v = args["spline"].value.points()[0][1]
return IECore.Box3f( IECore.V3f( -v ), IECore.V3f( v ) )
def doRenderState( self, args ) :
pass
def doRender( self, args ) :
pass
node = cmds.createNode( "ieProceduralHolder" )
fnPH = IECoreMaya.FnParameterisedHolder( str( node ) )
p = TestProcedural()
fnPH.setParameterised( p )
self.assertEqual( cmds.getAttr( node + ".boundingBoxMin" ), [( -1, -1, -1 )] )
plugPath = fnPH.parameterPlugPath( p["spline"] )
plugName = plugPath.partition( "." )[2]
pointValuePlugPath = plugPath + "[0]." + plugName + "_FloatValue"
cmds.setAttr( pointValuePlugPath, 2 )
self.assertEqual( cmds.getAttr( node + ".boundingBoxMin" ), [( -2, -2, -2 )] )
def testObjectParameterIOProblem( self ) :
fnPH = IECoreMaya.FnProceduralHolder.create( "procedural", "image", 1 )
p = fnPH.getProcedural()
w = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 255 ) )
image = IECore.ImagePrimitive( w, w )
image.createFloatChannel( "Y" )
image.createFloatChannel( "A" )
p.parameters()["image"].setValue( image )
fnPH.setNodeValues()
cmds.file( rename = os.getcwd() + "/test/IECoreMaya/objectParameterIO.ma" )
scene = cmds.file( force = True, type = "mayaAscii", save = True )
cmds.file( new = True, force = True )
cmds.file( scene, open = True )
fnPH = IECoreMaya.FnProceduralHolder( "proceduralShape" )
fnPH.setParameterisedValues()
p = fnPH.getProcedural()
i2 = p.parameters()["image"].getValue()
self.assertEqual( p.parameters()["image"].getValue(), image )
def testObjectMFnDataParameterIOProblem( self ) :
fnOH = IECoreMaya.FnOpHolder.create( "opHolder", "matrixParameter", 1 )
op = fnOH.getOp()
locator = cmds.spaceLocator()[0]
parameterPlugPath = fnOH.parameterPlugPath( op.parameters()['matrix'] )
attrPlugPath = '%s.worldMatrix' % ( locator )
cmds.connectAttr( attrPlugPath, parameterPlugPath )
cmds.file( rename = os.getcwd() + "/test/IECoreMaya/objectMFnDataParameterIO.ma" )
scene = cmds.file( force = True, type = "mayaAscii", save = True )
cmds.file( new = True, force = True )
cmds.file( scene, open = True )
connections = cmds.listConnections( parameterPlugPath, plugs=True, connections=True ) or []
self.failUnless( attrPlugPath in connections )
self.failUnless( parameterPlugPath in connections )
def testNonStorableObjectParameter( self ) :
fnOH = IECoreMaya.FnOpHolder.create( "opHolder", "unstorable", 1 )
op = fnOH.getOp()
node = fnOH.fullPathName()
testObj = IECore.CompoundObject( { "someData" : IECore.BoolData( False ) } )
with fnOH.parameterModificationContext() :
op["input"].setValue( testObj )
self.assertEqual( op["input"].getValue(), testObj )
cmds.file( rename = os.getcwd() + "/test/IECoreMaya/nonStorableObjectParameter.ma" )
scene = cmds.file( force = True, type = "mayaAscii", save = True )
cmds.file( new = True, force = True )
cmds.file( scene, open = True )
fnPH = IECoreMaya.FnParameterisedHolder( node )
fnPH.setParameterisedValues()
op = fnPH.getParameterised()[0]
self.assertEqual( op["input"].getValue(), op["input"].defaultValue )
def testMeshParameterIOProblem( self ) :
fnOP = IECoreMaya.FnOpHolder.create( "merge", "meshMerge", 1 )
op = fnOP.getOp()
mesh = IECore.MeshPrimitive.createBox( IECore.Box3f( IECore.V3f( -2, -2, -2 ), IECore.V3f( 2, 3, 4 ) ) )
op.parameters()["input"].setValue( mesh )
fnOP.setNodeValues()
cmds.file( rename = os.getcwd() + "/test/IECoreMaya/meshParameterIO.ma" )
scene = cmds.file( force = True, type = "mayaAscii", save = True )
cmds.file( new = True, force = True )
cmds.file( scene, open = True )
fnOP = IECoreMaya.FnOpHolder( "merge" )
fnOP.setParameterisedValues()
op = fnOP.getOp()
mesh2 = op.parameters()["input"].getValue()
self.failUnless( mesh2.arePrimitiveVariablesValid() )
del mesh2["N"]
self.assertEqual( mesh2, mesh )
def testOpHolder( self ) :
fnOH = IECoreMaya.FnOpHolder.create( "opHolder", "maths/multiply", 2 )
op = fnOH.getOp()
self.assertEqual( cmds.attributeQuery( "result", node="opHolder", storable=True ), False )
self.assertEqual( cmds.attributeQuery( "result", node="opHolder", writable=True ), False )
aPlug = fnOH.parameterPlugPath( op["a"] )
bPlug = fnOH.parameterPlugPath( op["b"] )
cmds.setAttr( aPlug, 20 )
cmds.setAttr( bPlug, 100 )
self.failUnless( cmds.getAttr( "opHolder.result" ), 2000 )
def testParameterTypes( self ) :
node = cmds.createNode( "ieOpHolderNode" )
fnPH = IECoreMaya.FnParameterisedHolder( node )
op = IECore.ClassLoader.defaultOpLoader().load( "parameterTypes", 1 )()
op.parameters().removeParameter( "m" ) # no color4f support in maya
fnPH.setParameterised( op )
for parameter in op.parameters().values() :
self.failUnless( cmds.objExists( fnPH.parameterPlugPath( parameter ) ) )
def testCompoundObjectConnections( self ) :
fnOHA = IECoreMaya.FnOpHolder.create( "opA", "compoundObjectInOut", 1 )
fnOHB = IECoreMaya.FnOpHolder.create( "opB", "compoundObjectInOut", 1 )
opB = fnOHB.getOp()
inputPlug = fnOHB.parameterPlugPath( opB["input"] )
cmds.connectAttr( "opA.result", inputPlug )
self.assertEqual( cmds.listConnections( inputPlug, source=True, destination=False, plugs=True ), [ "opA.result" ] )
self.assertEqual( cmds.listConnections( inputPlug, source=False, destination=True, plugs=True ), None )
cmds.file( rename = os.path.join( os.getcwd(), "test", "IECoreMaya", "compoundObjectConnections.ma" ) )
scene = cmds.file( force = True, type = "mayaAscii", save = True )
cmds.file( new = True, force = True )
cmds.file( scene, open = True )
self.assertEqual( cmds.listConnections( inputPlug, source=True, destination=False, plugs=True ), [ "opA.result" ] )
self.assertEqual( cmds.listConnections( inputPlug, source=False, destination=True, plugs=True ), None )
def testDefaultConnections( self ) :
# make an opholder for an op with default connections
# and make sure they are made.
fnOH = IECoreMaya.FnOpHolder.create( "opA", "mayaUserData", 1 )
op = fnOH.getOp()
tPlug = fnOH.parameterPlugPath( op["t"] )
self.assertEqual( cmds.listConnections( tPlug, source=True, destination=False, plugs=True, skipConversionNodes=True ), [ "time1.outTime" ] )
self.assertEqual( cmds.listConnections( tPlug, source=False, destination=True, plugs=True ), None )
ePlug = fnOH.parameterPlugPath( op["e"] )
eInputPlugs = cmds.listConnections( ePlug, source=True, destination=False, plugs=True )
eInputNodes = cmds.listConnections( ePlug, source=True, destination=False )
self.assertEqual( len( eInputNodes ), 1 )
self.assertEqual( cmds.nodeType( eInputNodes[0] ), "expression" )
# save the file
cmds.file( rename = os.getcwd() + "/test/IECoreMaya/defaultConnections.ma" )
scene = cmds.file( force = True, type = "mayaAscii", save = True )
# load it again and check the connections are still there
cmds.file( new = True, force = True )
cmds.file( scene, open = True )
self.assertEqual( cmds.listConnections( tPlug, source=True, destination=False, plugs=True, skipConversionNodes=True ), [ "time1.outTime" ] )
self.assertEqual( cmds.listConnections( tPlug, source=False, destination=True, plugs=True ), None )
eInputNodes = cmds.listConnections( ePlug, source=True, destination=False )
self.assertEqual( len( eInputNodes ), 1 )
self.assertEqual( cmds.nodeType( eInputNodes[0] ), "expression" )
# remove the connections and save
cmds.disconnectAttr( "time1.outTime", tPlug )
cmds.disconnectAttr( eInputPlugs[0], ePlug )
self.assertEqual( cmds.listConnections( tPlug, source=True, destination=False, plugs=True, skipConversionNodes=True ), None )
self.assertEqual( cmds.listConnections( ePlug, source=True, destination=False, plugs=True, skipConversionNodes=True ), None )
scene = cmds.file( force = True, type = "mayaAscii", save = True )
# load again and check they remain disconnected
cmds.file( new = True, force = True )
cmds.file( scene, open = True )
self.assertEqual( cmds.listConnections( tPlug, source=True, destination=False, plugs=True, skipConversionNodes=True ), None )
self.assertEqual( cmds.listConnections( ePlug, source=True, destination=False, plugs=True, skipConversionNodes=True ), None )
def testConnectedNodeNameValueProvider( self ) :
fnOH = IECoreMaya.FnOpHolder.create( "opA", "mayaUserData", 1 )
op = fnOH.getOp()
fnOH.setParameterisedValues()
self.assertEqual( op["s"].getTypedValue(), "" )
sPlug = fnOH.parameterPlugPath( op["s"] )
cmds.connectAttr( "time1.outTime", sPlug )
fnOH.setParameterisedValues()
self.assertEqual( op["s"].getTypedValue(), "time1" )
def testReferencedConnectedNodeNameValueProvider( self ) :
# Create a scene with a ClassVector parameter containing a StringParameter
# that uses the "connectedNodeName" value provider, and hook it up.
##########################################################################
fnOH = IECoreMaya.FnOpHolder.create( "node", "classVectorParameterTest", 2 )
op = fnOH.getOp()
c = op["cv"]
with fnOH.parameterModificationContext() :
c.setClasses(
[
( "mud", "mayaUserData", 1 ),
]
)
plugPath = fnOH.parameterPlugPath( c["mud"]["s"] )
camera = cmds.createNode( "camera" )
cmds.connectAttr( "%s.message" % camera, plugPath )
cmds.file( rename = os.path.join( os.getcwd(), "test", "IECoreMaya", "connectedNodeReference.ma" ) )
referenceScene = cmds.file( force = True, type = "mayaAscii", save = True )
# Reference this scene into a new one, and add another class.
#############################################################
cmds.file( new = True, force = True )
cmds.file( referenceScene, reference = True, namespace = "ns1" )
fnOH = IECoreMaya.FnOpHolder( "ns1:node" )
op = fnOH.getOp()
c = op["cv"]
with fnOH.parameterModificationContext() :
c.setClasses(
[
( "mud", "mayaUserData", 1 ),
( "maths", "maths/multiply", 1 )
]
)
plugPath = fnOH.parameterPlugPath( c["mud"]["s"] )
self.failUnless( cmds.isConnected( "ns1:%s.message" % camera, plugPath ) )
# Save, and re-open scene, and make sure that the message connection survived
#############################################################################
cmds.file( rename = os.path.join( os.getcwd(), "test", "IECoreMaya", "connectedNodeReference2.ma" ) )
thisScene = cmds.file( force = True, type = "mayaAscii", save = True )
cmds.file( thisScene, open = True, force = True )
self.failUnless( cmds.isConnected( "ns1:%s.message" % camera, plugPath ) )
def testClassParameter( self ) :
class TestOp( IECore.Op ) :
def __init__( self ) :
IECore.Op.__init__( self,
"",
IECore.FloatParameter(
"result",
"",
0.0
),
)
self.parameters().addParameter(
IECore.ClassParameter(
"cp",
"",
"IECORE_OP_PATHS"
)
)
def doOperation( self, operands ) :
return IECore.FloatData( 1 )
node = cmds.createNode( "ieOpHolderNode" )
fnOH = IECoreMaya.FnParameterisedHolder( str( node ) )
op = TestOp()
fnOH.setParameterised( op )
with fnOH.parameterModificationContext() :
op["cp"].setClass( "maths/multiply", 1, "IECORE_OP_PATHS" )
aPlugPath = fnOH.parameterPlugPath( op["cp"]["a"] )
bPlugPath = fnOH.parameterPlugPath( op["cp"]["b"] )
self.assertEqual( cmds.getAttr( aPlugPath ), 1 )
self.assertEqual( cmds.getAttr( bPlugPath ), 2 )
with fnOH.parameterModificationContext() :
op["cp"].setClass( "stringParsing", 1, "IECORE_OP_PATHS" )
self.failIf( cmds.objExists( aPlugPath ) )
self.failIf( cmds.objExists( bPlugPath ) )
emptyStringPlugPath = fnOH.parameterPlugPath( op["cp"]["emptyString"] )
self.assertEqual( cmds.getAttr( emptyStringPlugPath ), "notEmpty" )
def testClassParameterSaveAndLoad( self ) :
# make an opholder with a ClassParameter, and set the held class
####################################################################
fnOH = IECoreMaya.FnOpHolder.create( "node", "classParameterTest", 1 )
op = fnOH.getOp()
with fnOH.parameterModificationContext() :
op["cp"].setClass( "maths/multiply", 1, "IECORE_OP_PATHS" )
heldClass, className, classVersion, searchPath = op["cp"].getClass( True )
self.assertEqual( heldClass.typeName(), "multiply" )
self.assertEqual( className, "maths/multiply" )
self.assertEqual( classVersion, 1 )
self.assertEqual( searchPath, "IECORE_OP_PATHS" )
# check that maya has appropriate attributes for the held class,
# and that the held class hasn't changed in the process.
####################################################################
heldClass2, className, classVersion, searchPath = op["cp"].getClass( True )
self.assertEqual( heldClass.typeName(), "multiply" )
self.assertEqual( className, "maths/multiply" )
self.assertEqual( classVersion, 1 )
self.assertEqual( searchPath, "IECORE_OP_PATHS" )
self.failUnless( heldClass is heldClass2 )
# change some parameter values and push them into maya.
####################################################################
op["cp"]["a"].setNumericValue( 10 )
op["cp"]["b"].setNumericValue( 20 )
fnOH.setNodeValues()
self.assertEqual( cmds.getAttr( fnOH.parameterPlugPath( op["cp"]["a"] ) ), 10 )
self.assertEqual( cmds.getAttr( fnOH.parameterPlugPath( op["cp"]["b"] ) ), 20 )
# save the scene
####################################################################
cmds.file( rename = os.path.join( os.getcwd(), "test", "IECoreMaya", "classParameter.ma" ) )
scene = cmds.file( force = True, type = "mayaAscii", save = True )
cmds.file( new = True, force = True )
# reload it and check we have the expected class and attributes
####################################################################
cmds.file( scene, open = True )
fnOH = IECoreMaya.FnOpHolder( "node" )
op = fnOH.getOp()
heldClass, className, classVersion, searchPath = op["cp"].getClass( True )
self.assertEqual( heldClass.typeName(), "multiply" )
self.assertEqual( className, "maths/multiply" )
self.assertEqual( classVersion, 1 )
self.assertEqual( searchPath, "IECORE_OP_PATHS" )
self.assertEqual( cmds.getAttr( fnOH.parameterPlugPath( op["cp"]["a"] ) ), 10 )
self.assertEqual( cmds.getAttr( fnOH.parameterPlugPath( op["cp"]["b"] ) ), 20 )
def testClassParameterUndo( self ) :
# make an opholder with a ClassParameter, and check that there is
# no class loaded
####################################################################
fnOH = IECoreMaya.FnOpHolder.create( "node", "classParameterTest", 1 )
op = fnOH.getOp()
heldClass, className, classVersion, searchPath = op["cp"].getClass( True )
self.assertEqual( heldClass, None )
self.assertEqual( className, "" )
self.assertEqual( classVersion, 0 )
self.assertEqual( searchPath, "IECORE_OP_PATHS" )
# check that undo is enabled
####################################################################
self.assert_( cmds.undoInfo( query=True, state=True ) )
# set the class and verify it worked
####################################################################
with fnOH.parameterModificationContext() :
op["cp"].setClass( "maths/multiply", 1, "IECORE_OP_PATHS" )
heldClass, className, classVersion, searchPath = op["cp"].getClass( True )
self.assertEqual( heldClass.typeName(), "multiply" )
self.assertEqual( className, "maths/multiply" )
self.assertEqual( classVersion, 1 )
self.assertEqual( searchPath, "IECORE_OP_PATHS" )
aPlugPath = fnOH.parameterPlugPath( heldClass["a"] )
bPlugPath = fnOH.parameterPlugPath( heldClass["b"] )
self.assertEqual( cmds.getAttr( aPlugPath ), 1 )
self.assertEqual( cmds.getAttr( bPlugPath ), 2 )
# undo and check the class is unset
#####################################################################
cmds.undo()
heldClass, className, classVersion, searchPath = op["cp"].getClass( True )
self.assertEqual( heldClass, None )
self.assertEqual( className, "" )
self.assertEqual( classVersion, 0 )
self.assertEqual( searchPath, "IECORE_OP_PATHS" )
self.failIf( cmds.objExists( aPlugPath ) )
self.failIf( cmds.objExists( bPlugPath ) )
def testClassParameterUndoWithPreviousValues( self ) :
# make an opholder with a ClassParameter, and check that there is
# no class loaded
####################################################################
fnOH = IECoreMaya.FnOpHolder.create( "node", "classParameterTest", 1 )
op = fnOH.getOp()
heldClass, className, classVersion, searchPath = op["cp"].getClass( True )
self.assertEqual( heldClass, None )
self.assertEqual( className, "" )
self.assertEqual( classVersion, 0 )
self.assertEqual( searchPath, "IECORE_OP_PATHS" )
# set the class and check it worked
####################################################################
with fnOH.parameterModificationContext() :
op["cp"].setClass( "maths/multiply", 1, "IECORE_OP_PATHS" )
heldClass, className, classVersion, searchPath = op["cp"].getClass( True )
self.assertEqual( heldClass.typeName(), "multiply" )
self.assertEqual( className, "maths/multiply" )
self.assertEqual( classVersion, 1 )
self.assertEqual( searchPath, "IECORE_OP_PATHS" )
aPlugPath = fnOH.parameterPlugPath( heldClass["a"] )
bPlugPath = fnOH.parameterPlugPath( heldClass["b"] )
self.assertEqual( cmds.getAttr( aPlugPath ), 1 )
self.assertEqual( cmds.getAttr( bPlugPath ), 2 )
# change some attribute values
####################################################################
cmds.setAttr( aPlugPath, 10 )
cmds.setAttr( bPlugPath, 20 )
self.assertEqual( cmds.getAttr( aPlugPath ), 10 )
self.assertEqual( cmds.getAttr( bPlugPath ), 20 )
# check that undo is enabled
####################################################################
self.assert_( cmds.undoInfo( query=True, state=True ) )
# change the class to something else and check it worked
####################################################################
with fnOH.parameterModificationContext() :
op["cp"].setClass( "stringParsing", 1, "IECORE_OP_PATHS" )
heldClass, className, classVersion, searchPath = op["cp"].getClass( True )
self.assertEqual( heldClass.typeName(), "stringParsing" )
self.assertEqual( className, "stringParsing" )
self.assertEqual( classVersion, 1 )
self.assertEqual( searchPath, "IECORE_OP_PATHS" )
plugPaths = []
for p in heldClass.parameters().values() :
plugPath = fnOH.parameterPlugPath( p )
self.failUnless( cmds.objExists( plugPath ) )
plugPaths.append( plugPath )
self.failIf( cmds.objExists( aPlugPath ) )
self.failIf( cmds.objExists( bPlugPath ) )
# undo and check the previous class reappears, along with the
# previous attribute values
#####################################################################
cmds.undo()
heldClass, className, classVersion, searchPath = op["cp"].getClass( True )
self.assertEqual( heldClass.typeName(), "multiply" )
self.assertEqual( className, "maths/multiply" )
self.assertEqual( classVersion, 1 )
self.assertEqual( searchPath, "IECORE_OP_PATHS" )
aPlugPath = fnOH.parameterPlugPath( heldClass["a"] )
bPlugPath = fnOH.parameterPlugPath( heldClass["b"] )
self.assertEqual( cmds.getAttr( aPlugPath ), 10 )
self.assertEqual( cmds.getAttr( bPlugPath ), 20 )
for p in plugPaths :
self.failIf( cmds.objExists( plugPath ) )
def testClassParameterRemovalUndoWithChildren( self ) :
# make an opholder with a ClassParameter, and check that there is
# no class loaded
####################################################################
fnOH = IECoreMaya.FnOpHolder.create( "node", "classParameterTest", 1 )
op = fnOH.getOp()
heldClass, className, classVersion, searchPath = op["cp"].getClass( True )
self.assertEqual( heldClass, None )
self.assertEqual( className, "" )
self.assertEqual( classVersion, 0 )
self.assertEqual( searchPath, "IECORE_OP_PATHS" )
# set the class and check it worked
####################################################################
with fnOH.parameterModificationContext() :
op["cp"].setClass( "classParameterTest", 1, "IECORE_OP_PATHS" )
heldClass, className, classVersion, searchPath = op["cp"].getClass( True )
self.assertEqual( heldClass.typeName(), "classParameterTest" )
self.assertEqual( className, "classParameterTest" )
self.assertEqual( classVersion, 1 )
self.assertEqual( searchPath, "IECORE_OP_PATHS" )
# put a class inside the class and check it worked
####################################################################
with fnOH.parameterModificationContext() :
op["cp"]["cp"].setClass( "maths/multiply", 1, "IECORE_OP_PATHS" )
aPlugPath = fnOH.parameterPlugPath( op["cp"]["cp"]["a"] )
bPlugPath = fnOH.parameterPlugPath( op["cp"]["cp"]["b"] )
self.assertEqual( cmds.getAttr( aPlugPath ), 1 )
self.assertEqual( cmds.getAttr( bPlugPath ), 2 )
# change some attribute values
####################################################################
cmds.setAttr( aPlugPath, 10 )
cmds.setAttr( bPlugPath, 20 )
self.assertEqual( cmds.getAttr( aPlugPath ), 10 )
self.assertEqual( cmds.getAttr( bPlugPath ), 20 )
# check that undo is enabled
####################################################################
self.assert_( cmds.undoInfo( query=True, state=True ) )
# remove the top level class
####################################################################
with fnOH.parameterModificationContext() :
op["cp"].setClass( "", -1, "IECORE_OP_PATHS" )
heldClass, className, classVersion, searchPath = op["cp"].getClass( True )
self.assertEqual( heldClass, None )
self.assertEqual( className, "" )
self.assertEqual( classVersion, -1 )
self.assertEqual( searchPath, "IECORE_OP_PATHS" )
self.failIf( cmds.objExists( aPlugPath ) )
self.failIf( cmds.objExists( bPlugPath ) )
# undo and check the previous class reappears, along with the child
# class and previous attribute values
#####################################################################
cmds.undo()
heldClass, className, classVersion, searchPath = op["cp"].getClass( True )
self.assertEqual( heldClass.typeName(), "classParameterTest" )
self.assertEqual( className, "classParameterTest" )
self.assertEqual( classVersion, 1 )
self.assertEqual( searchPath, "IECORE_OP_PATHS" )
childClass, childClassName, childClassVersion, childSearchPath = heldClass["cp"].getClass( True )
self.assertEqual( childClass.typeName(), "multiply" )
self.assertEqual( childClassName, "maths/multiply" )
self.assertEqual( childClassVersion, 1 )
self.assertEqual( childSearchPath, "IECORE_OP_PATHS" )
aPlugPath = fnOH.parameterPlugPath( childClass["a"] )
bPlugPath = fnOH.parameterPlugPath( childClass["b"] )
self.assertEqual( cmds.getAttr( aPlugPath ), 10 )
self.assertEqual( cmds.getAttr( bPlugPath ), 20 )
def testClassParameterReferenceEdits( self ) :
# make a file with a class parameter with no held class
#######################################################################
fnOH = IECoreMaya.FnOpHolder.create( "node", "classParameterTest", 1 )
op = fnOH.getOp()
heldClass, className, classVersion, searchPath = op["cp"].getClass( True )
self.assertEqual( heldClass, None )
self.assertEqual( className, "" )
self.assertEqual( classVersion, 0 )
self.assertEqual( searchPath, "IECORE_OP_PATHS" )
cmds.file( rename = os.path.join( os.getcwd(), "test", "IECoreMaya", "classParameterReference.ma" ) )
referenceScene = cmds.file( force = True, type = "mayaAscii", save = True )
# make a new scene referencing that file
#######################################################################
cmds.file( new = True, force = True )
cmds.file( referenceScene, reference = True, namespace = "ns1" )
# set the held class and change some attribute values
#######################################################################
fnOH = IECoreMaya.FnOpHolder( "ns1:node" )
op = fnOH.getOp()
with fnOH.parameterModificationContext() :
op["cp"].setClass( "maths/multiply", 1, "IECORE_OP_PATHS" )
heldClass, className, classVersion, searchPath = op["cp"].getClass( True )
self.assertEqual( heldClass.typeName(), "multiply" )
self.assertEqual( className, "maths/multiply" )
self.assertEqual( classVersion, 1 )
self.assertEqual( searchPath, "IECORE_OP_PATHS" )
aPlugPath = fnOH.parameterPlugPath( heldClass["a"] )
bPlugPath = fnOH.parameterPlugPath( heldClass["b"] )
cmds.setAttr( aPlugPath, 10 )
cmds.setAttr( bPlugPath, 20 )
# save the scene
#######################################################################
cmds.file( rename = os.path.join( os.getcwd(), "test", "IECoreMaya", "classParameterReferencer.ma" ) )
referencerScene = cmds.file( force = True, type = "mayaAscii", save = True )
# reload it and check all is well
#######################################################################
cmds.file( new = True, force = True )
cmds.file( referencerScene, force = True, open = True )
fnOH = IECoreMaya.FnOpHolder( "ns1:node" )
op = fnOH.getOp()
heldClass, className, classVersion, searchPath = op["cp"].getClass( True )
self.assertEqual( heldClass.typeName(), "multiply" )
self.assertEqual( className, "maths/multiply" )
self.assertEqual( classVersion, 1 )
self.assertEqual( searchPath, "IECORE_OP_PATHS" )
aPlugPath = fnOH.parameterPlugPath( heldClass["a"] )
bPlugPath = fnOH.parameterPlugPath( heldClass["b"] )
self.assertEqual( cmds.getAttr( aPlugPath ), 10 )
self.assertEqual( cmds.getAttr( bPlugPath ), 20 )
def testClassParameterReferenceEditsWithFloatParameters( self ) :
# make a file with a class parameter with no held class
#######################################################################
fnOH = IECoreMaya.FnOpHolder.create( "node", "classParameterTest", 1 )
op = fnOH.getOp()
heldClass, className, classVersion, searchPath = op["cp"].getClass( True )
self.assertEqual( heldClass, None )
self.assertEqual( className, "" )
self.assertEqual( classVersion, 0 )
self.assertEqual( searchPath, "IECORE_OP_PATHS" )
cmds.file( rename = os.path.join( os.getcwd(), "test", "IECoreMaya", "classParameterReference.ma" ) )
referenceScene = cmds.file( force = True, type = "mayaAscii", save = True )
# make a new scene referencing that file
#######################################################################
cmds.file( new = True, force = True )
cmds.file( referenceScene, reference = True, namespace = "ns1" )
# set the held class and change some attribute values
#######################################################################
fnOH = IECoreMaya.FnOpHolder( "ns1:node" )
op = fnOH.getOp()
with fnOH.parameterModificationContext() :
op["cp"].setClass( "floatParameter", 1, "IECORE_OP_PATHS" )
heldClass, className, classVersion, searchPath = op["cp"].getClass( True )
self.assertEqual( heldClass.typeName(), "floatParameter" )
self.assertEqual( className, "floatParameter" )
self.assertEqual( classVersion, 1 )
self.assertEqual( searchPath, "IECORE_OP_PATHS" )
fPlugPath = fnOH.parameterPlugPath( heldClass["f"] )
cmds.setAttr( fPlugPath, -1 )
self.assertEqual( cmds.getAttr( fPlugPath ), -1 )
# save the scene
#######################################################################
cmds.file( rename = os.path.join( os.getcwd(), "test", "IECoreMaya", "classParameterReferencer.ma" ) )
referencerScene = cmds.file( force = True, type = "mayaAscii", save = True )
# reload it and check all is well
#######################################################################
cmds.file( new = True, force = True )
cmds.file( referencerScene, force = True, open = True )
fnOH = IECoreMaya.FnOpHolder( "ns1:node" )
op = fnOH.getOp()
heldClass, className, classVersion, searchPath = op["cp"].getClass( True )
self.assertEqual( heldClass.typeName(), "floatParameter" )
self.assertEqual( className, "floatParameter" )
self.assertEqual( classVersion, 1 )
self.assertEqual( searchPath, "IECORE_OP_PATHS" )
fPlugPath = fnOH.parameterPlugPath( heldClass["f"] )
self.assertEqual( cmds.getAttr( fPlugPath ), -1 )
def testClassParameterCompactPlugs( self ) :
class TestOp( IECore.Op ) :
def __init__( self ) :
IECore.Op.__init__( self,
"",
IECore.FloatParameter(
"result",
"",
0.0
),
)
self.parameters().addParameter(
IECore.ClassParameter(
"cp",
"",
"IECORE_OP_PATHS"
)
)
def doOperation( self, operands ) :
return IECore.FloatData( 1 )
node = cmds.createNode( "ieOpHolderNode" )
fnOH = IECoreMaya.FnParameterisedHolder( str( node ) )
op = TestOp()
fnOH.setParameterised( op )
with fnOH.parameterModificationContext() :
op["cp"].setClass( "maths/multiply", 1, "IECORE_OP_PATHS" )
aPlugPath = fnOH.parameterPlugPath( op["cp"]["a"] )
bPlugPath = fnOH.parameterPlugPath( op["cp"]["b"] )
cpPlugPath = fnOH.parameterPlugPath( op["cp"] )
self.assertEqual( cmds.getAttr( cpPlugPath ), [ "maths/multiply", "1", "IECORE_OP_PATHS" ] )
self.failUnless( not cmds.objExists( cpPlugPath + "__className" ) )
self.failUnless( not cmds.objExists( cpPlugPath + "__classVersion" ) )
self.failUnless( not cmds.objExists( cpPlugPath + "__searchPathEnvVar" ) )
self.assertEqual( cmds.getAttr( aPlugPath ), 1 )
self.assertEqual( cmds.getAttr( bPlugPath ), 2 )
def testOpHolderImport( self ) :
# make a file with an op holder in it
#######################################################################
fnOH = IECoreMaya.FnOpHolder.create( "node", "maths/multiply", 2 )
op = fnOH.getOp()
aPlugPath = fnOH.parameterPlugPath( op["a"] )
bPlugPath = fnOH.parameterPlugPath( op["b"] )
cmds.file( rename = os.path.join( os.getcwd(), "test", "IECoreMaya", "op.ma" ) )
scene = cmds.file( force = True, type = "mayaAscii", save = True )
# import it into a new scene
#######################################################################
cmds.file( new = True, force = True )
cmds.file( scene, i = True )
cmds.setAttr( aPlugPath, 10 )
cmds.setAttr( bPlugPath, 12 )
self.assertEqual( cmds.getAttr( "node.result" ), 120 )
def testClassVectorParameter( self ) :
fnOH = IECoreMaya.FnOpHolder.create( "node", "classVectorParameterTest", 1 )
op = fnOH.getOp()
c = op["cv"]
self.assertEqual( c.typeName(), "ClassVectorParameter" )
self.assertEqual( len( c.getClasses() ), 0 )
with fnOH.parameterModificationContext() :
c.setClasses(
[
( "mult", "maths/multiply", 1 ),
( "coIO", "compoundObjectInOut", 1 ),
]
)
cl = c.getClasses()
self.failUnless( isinstance( cl, list ) )
self.assertEqual( len( cl ), 2 )
self.assertEqual( cl[0].typeName(), "multiply" )
self.assertEqual( cl[1].typeName(), "compoundObjectInOut" )
self.assertEqual( len( c ), 2 )
self.assertEqual( c.keys(), [ "mult", "coIO" ] )
self.assertEqual( c["mult"].keys(), [ "a", "b" ] )
self.assertEqual( c["coIO"].keys(), [ "input" ] )
cl = c.getClasses( True )
self.failUnless( isinstance( cl, list ) )
self.assertEqual( len( cl ), 2 )
self.assertEqual( cl[0][0].typeName(), "multiply" )
self.assertEqual( cl[1][0].typeName(), "compoundObjectInOut" )
self.assertEqual( cl[0][1], "mult" )
self.assertEqual( cl[1][1], "coIO" )
self.assertEqual( cl[0][2], "maths/multiply" )
self.assertEqual( cl[1][2], "compoundObjectInOut" )
self.assertEqual( cl[0][3], 1 )
self.assertEqual( cl[1][3], 1 )
self.__checkAllParameterPlugs( fnOH, c )
def testClassVectorParameterSaveAndLoad( self ) :
# make an opholder with a ClassVectorParameter, and modify some plug
# values
#####################################################################
fnOH = IECoreMaya.FnOpHolder.create( "node", "classVectorParameterTest", 1 )
op = fnOH.getOp()
c = op["cv"]
self.assertEqual( c.typeName(), "ClassVectorParameter" )
self.assertEqual( len( c.getClasses() ), 0 )
with fnOH.parameterModificationContext() :
c.setClasses(
[
( "mult", "maths/multiply", 1 ),
( "coIO", "compoundObjectInOut", 1 ),
]
)
cl = c.getClasses()
self.failUnless( isinstance( cl, list ) )
self.assertEqual( len( cl ), 2 )
self.assertEqual( cl[0].typeName(), "multiply" )
self.assertEqual( cl[1].typeName(), "compoundObjectInOut" )
self.__checkAllParameterPlugs( fnOH, c )
aPlugPath = fnOH.parameterPlugPath( c["mult"]["a"] )
bPlugPath = fnOH.parameterPlugPath( c["mult"]["b"] )
cmds.setAttr( aPlugPath, 10 )
cmds.setAttr( bPlugPath, 20 )
# save the scene
####################################################################
cmds.file( rename = os.path.join( os.getcwd(), "test", "IECoreMaya", "classVectorParameter.ma" ) )
scene = cmds.file( force = True, type = "mayaAscii", save = True )
# reload it and check we still have what we expect
####################################################################
cmds.file( new = True, force = True )
cmds.file( scene, open = True )
fnOH = IECoreMaya.FnOpHolder( "node" )
op = fnOH.getOp()
cl = op["cv"].getClasses()
self.failUnless( isinstance( cl, list ) )
self.assertEqual( len( cl ), 2 )
self.assertEqual( cl[0].typeName(), "multiply" )
self.assertEqual( cl[1].typeName(), "compoundObjectInOut" )
self.__checkAllParameterPlugs( fnOH, op["cv"] )
self.assertEqual( cmds.getAttr( aPlugPath ), 10 )
self.assertEqual( cmds.getAttr( bPlugPath ), 20 )
def testClassVectorParameterUndo( self ) :
# make an opholder and set a ClassVectorParameter
##########################################################################
fnOH = IECoreMaya.FnOpHolder.create( "node", "classVectorParameterTest", 1 )
op = fnOH.getOp()
c = op["cv"]
self.assertEqual( c.typeName(), "ClassVectorParameter" )
self.assertEqual( len( c.getClasses() ), 0 )
self.assert_( cmds.undoInfo( query=True, state=True ) )
with fnOH.parameterModificationContext() :
c.setClasses(
[
( "mult", "maths/multiply", 1 ),
( "str", "stringParsing", 1 ),
]
)
cl = c.getClasses()
self.failUnless( isinstance( cl, list ) )
self.assertEqual( len( cl ), 2 )
self.assertEqual( cl[0].typeName(), "multiply" )
self.assertEqual( cl[1].typeName(), "stringParsing" )
self.assertEqual( len( c ), 2 )
self.assertEqual( c.keys(), [ "mult", "str" ] )
self.assertEqual( c["mult"].keys(), [ "a", "b" ] )
self.assertEqual( c["str"].keys(), [ "emptyString", "normalString", "stringWithSpace", "stringWithManySpaces" ] )
cl = c.getClasses( True )
self.failUnless( isinstance( cl, list ) )
self.assertEqual( len( cl ), 2 )
self.assertEqual( cl[0][0].typeName(), "multiply" )
self.assertEqual( cl[1][0].typeName(), "stringParsing" )
self.assertEqual( cl[0][1], "mult" )
self.assertEqual( cl[1][1], "str" )
self.assertEqual( cl[0][2], "maths/multiply" )
self.assertEqual( cl[1][2], "stringParsing" )
self.assertEqual( cl[0][3], 1 )
self.assertEqual( cl[1][3], 1 )
self.__checkAllParameterPlugs( fnOH, c )
# undo and check we're back to square one
##########################################################################
cmds.undo()
self.assertEqual( c.getClasses(), [] )
def testClassVectorParameterUndoWithPreviousValues( self ) :
# make an opholder with a ClassVectorParameter, and modify some plug
# values
#####################################################################
fnOH = IECoreMaya.FnOpHolder.create( "node", "classVectorParameterTest", 1 )
op = fnOH.getOp()
c = op["cv"]
self.assertEqual( c.typeName(), "ClassVectorParameter" )
self.assertEqual( len( c.getClasses() ), 0 )
with fnOH.parameterModificationContext() :
c.setClasses(
[
( "mult", "maths/multiply", 1 ),
]
)
cl = c.getClasses( True )
self.failUnless( isinstance( cl, list ) )
self.assertEqual( len( cl ), 1 )
self.assertEqual( cl[0][0].typeName(), "multiply" )
self.assertEqual( cl[0][1], "mult" )
self.assertEqual( cl[0][2], "maths/multiply" )
self.assertEqual( cl[0][3], 1 )
self.__checkAllParameterPlugs( fnOH, c )
aPlugPath = fnOH.parameterPlugPath( c["mult"]["a"] )
bPlugPath = fnOH.parameterPlugPath( c["mult"]["b"] )
cmds.setAttr( aPlugPath, 10 )
cmds.setAttr( bPlugPath, 20 )
self.assertEqual( cmds.getAttr( aPlugPath ), 10 )
self.assertEqual( cmds.getAttr( bPlugPath ), 20 )
# change set of held classes to something totally different
# and check that it worked
#####################################################################
with fnOH.parameterModificationContext() :
c.setClasses(
[
( "str", "stringParsing", 1 ),
( "spl", "splineInput", 1 ),
]
)
cl = c.getClasses( True )
self.failUnless( isinstance( cl, list ) )
self.assertEqual( len( cl ), 2 )
self.assertEqual( cl[0][0].typeName(), "stringParsing" )
self.assertEqual( cl[1][0].typeName(), "splineInput" )
self.assertEqual( cl[0][1], "str" )
self.assertEqual( cl[1][1], "spl" )
self.assertEqual( cl[0][2], "stringParsing" )
self.assertEqual( cl[1][2], "splineInput" )
self.assertEqual( cl[0][3], 1 )
self.assertEqual( cl[1][3], 1 )
self.__checkAllParameterPlugs( fnOH, c )
# undo and check we're back where we want to be
#####################################################################
cmds.undo()
cl = c.getClasses( True )
self.failUnless( isinstance( cl, list ) )
self.assertEqual( len( cl ), 1 )
self.assertEqual( cl[0][0].typeName(), "multiply" )
self.assertEqual( cl[0][1], "mult" )
self.assertEqual( cl[0][2], "maths/multiply" )
self.assertEqual( cl[0][3], 1 )
self.__checkAllParameterPlugs( fnOH, c )
self.assertEqual( cmds.getAttr( aPlugPath ), 10 )
self.assertEqual( cmds.getAttr( bPlugPath ), 20 )
def testSetParameterisedUndo( self ) :
fnOH = IECoreMaya.FnOpHolder.create( "opHolder", "stringParsing", 1 )
op = fnOH.getOp()
self.assertEqual( op.typeName(), "stringParsing" )
self.__checkAllParameterPlugs( fnOH )
self.assert_( cmds.undoInfo( query=True, state=True ) )
fnOH.setOp( "maths/multiply", 1 )
op = fnOH.getOp()
self.assertEqual( op.typeName(), "multiply" )
self.__checkAllParameterPlugs( fnOH )
cmds.undo()
op = fnOH.getOp()
self.assertEqual( op.typeName(), "stringParsing" )
self.__checkAllParameterPlugs( fnOH )
cmds.redo()
op = fnOH.getOp()
self.assertEqual( op.typeName(), "multiply" )
self.__checkAllParameterPlugs( fnOH )
def testCreateOpHolderUndo( self ) :
self.assert_( cmds.undoInfo( query=True, state=True ) )
fnOH = IECoreMaya.FnOpHolder.create( "opHolder", "stringParsing", 1 )
self.failUnless( cmds.objExists( "opHolder" ) )
cmds.undo()
self.failIf( cmds.objExists( "opHolder" ) )
def testCreateParameterisedHolderSetUndo( self ) :
self.assert_( cmds.undoInfo( query=True, state=True ) )
fnOH = IECoreMaya.FnParameterisedHolderSet.create( "mySet", "stringParsing", 1, "IECORE_OP_PATHS" )
self.failUnless( cmds.objExists( "mySet" ) )
cmds.undo()
self.failIf( cmds.objExists( "mySet" ) )
def testSetParameterisedCallbacks( self ) :
self.__numCallbacks = 0
def c( fnPH ) :
self.assertEqual( fnPH.fullPathName(), "opHolder" )
self.__numCallbacks += 1
IECoreMaya.FnParameterisedHolder.addSetParameterisedCallback( c )
fnOH = IECoreMaya.FnOpHolder.create( "opHolder", "stringParsing", 1 )
self.assertEqual( self.__numCallbacks, 1 )
fnOH.setOp( "maths/multiply", 1 )
self.assertEqual( self.__numCallbacks, 2 )
cmds.undo()
self.assertEqual( self.__numCallbacks, 3 )
cmds.redo()
self.assertEqual( self.__numCallbacks, 4 )
IECoreMaya.FnParameterisedHolder.removeSetParameterisedCallback( c )
def testSetParameterisedAndUndoOnEmptyHolder( self ) :
n = cmds.createNode( "ieProceduralHolder" )
fnPh = IECoreMaya.FnParameterisedHolder( n )
self.assertEqual( fnPh.getParameterised()[0], None )
fnPh.setParameterised( "read", "-1", "IECORE_PROCEDURAL_PATHS" )
self.assertEqual( fnPh.getParameterised()[1], "read" )
cmds.undo()
self.assertEqual( fnPh.getParameterised()[0], None )
def testEditClassParameters( self ) :
fnOH = IECoreMaya.FnOpHolder.create( "opHolder", "classParameterTest", 1 )
op = fnOH.getOp()
with fnOH.parameterModificationContext() :
op["cp"].setClass( "classParameterTest", 1 )
op["cp"]["cp"].setClass( "maths/multiply", 1 )
self.__checkAllParameterPlugs( fnOH )
aPlugPath = fnOH.parameterPlugPath( op["cp"]["cp"]["a"] )
self.failUnless( cmds.objExists( aPlugPath ) )
cmds.undo()
self.__checkAllParameterPlugs( fnOH )
self.assertEqual( op["cp"].getClass(), None )
self.failIf( cmds.objExists( aPlugPath ) )
cmds.redo()
self.__checkAllParameterPlugs( fnOH )
aPlugPath = fnOH.parameterPlugPath( op["cp"]["cp"]["a"] )
self.failUnless( cmds.objExists( aPlugPath ) )
def testChangeClassAndRevertToClassWithClassParameters( self ) :
## create a holder and put an op using ClassParameter in there
fnOH = IECoreMaya.FnOpHolder.create( "opHolder", "classParameterTest", 1 )
op = fnOH.getOp()
with fnOH.parameterModificationContext() :
op["cp"].setClass( "classParameterTest", 1 )
op["cp"]["cp"].setClass( "maths/multiply", 1 )
self.__checkAllParameterPlugs( fnOH )
aPlugPath = fnOH.parameterPlugPath( op["cp"]["cp"]["a"] )
self.failUnless( cmds.objExists( aPlugPath ) )
## change the values being held
cmds.setAttr( aPlugPath, 123 )
## change the op to be something simple
fnOH.setOp( "maths/multiply", 1 )
self.failIf( cmds.objExists( aPlugPath ) )
## undo, and check we get all the original held classes and values back
cmds.undo()
op = fnOH.getOp()
self.assertEqual( op["cp"]["cp"].getClass( True )[1:3], ( "maths/multiply", 1 ) )
self.assertEqual( op["cp"]["cp"]["a"].getNumericValue(), 123 )
aPlugPath = fnOH.parameterPlugPath( op["cp"]["cp"]["a"] )
self.assertEqual( cmds.getAttr( aPlugPath ), 123 )
def testUpgradeClassWithClassVectorParameter( self ) :
# create a holder with a ClassVectorParameter with some classes
fnOH = IECoreMaya.FnOpHolder.create( "opHolder", "classVectorParameterTest", 1 )
op = fnOH.getOp()
with fnOH.parameterModificationContext() :
op["cv"].setClasses( [
( "m", "maths/multiply", 1 ),
( "n", "maths/multiply", 1 ),
] )
c = op["cv"].getClasses( True )
self.assertEqual( len( c ), 2 )
self.assertEqual( c[0][1:], ( "m", "maths/multiply", 1 ) )
self.assertEqual( c[1][1:], ( "n", "maths/multiply", 1 ) )
aPlugPath = fnOH.parameterPlugPath( op["cv"]["m"]["a"] )
# upgrade the parameterised class
fnOH.setOp( "classVectorParameterTest", 2 )
self.assertEqual( fnOH.getParameterised()[1:-1], ( "classVectorParameterTest", 2 ) )
# and check the classes are still intact
op = fnOH.getOp()
c = op["cv"].getClasses( True )
self.assertEqual( len( c ), 2 )
self.assertEqual( c[0][1:], ( "m", "maths/multiply", 1 ) )
self.assertEqual( c[1][1:], ( "n", "maths/multiply", 1 ) )
# undo the upgrade
cmds.undo()
self.assertEqual( fnOH.getParameterised()[1:-1], ( "classVectorParameterTest", 1 ) )
# and check the classes are still intact again
op = fnOH.getOp()
c = op["cv"].getClasses( True )
self.assertEqual( len( c ), 2 )
self.assertEqual( c[0][1:], ( "m", "maths/multiply", 1 ) )
self.assertEqual( c[1][1:], ( "n", "maths/multiply", 1 ) )
def testClassParameterCallbacks( self ) :
fnOH = IECoreMaya.FnOpHolder.create( "opHolder", "classParameterTest", 1 )
op = fnOH.getOp()
self.__numCallbacks = 0
def c( fnPH, parameter ) :
self.assertEqual( fnPH.fullPathName(), "opHolder" )
self.assertEqual( parameter.name, "cp" )
self.__numCallbacks += 1
IECoreMaya.FnParameterisedHolder.addSetClassParameterClassCallback( c )
with fnOH.parameterModificationContext() :
op["cp"].setClass( "maths/multiply", 1 )
self.assertEqual( self.__numCallbacks, 1 )
cmds.undo()
self.assertEqual( self.__numCallbacks, 2 )
cmds.redo()
self.assertEqual( self.__numCallbacks, 3 )
# setting the class to the same thing it already is should have
# no effect.
with fnOH.parameterModificationContext() :
op["cp"].setClass( "maths/multiply", 1 )
self.assertEqual( self.__numCallbacks, 3 )
IECoreMaya.FnParameterisedHolder.removeSetClassParameterClassCallback( c )
def testClassVectorParameterCallbacks( self ) :
fnOH = IECoreMaya.FnOpHolder.create( "opHolder", "classVectorParameterTest", 1 )
op = fnOH.getOp()
self.__numCallbacks = 0
def c( fnPH, parameter ) :
self.assertEqual( fnPH.fullPathName(), "opHolder" )
self.assertEqual( parameter.name, "cv" )
self.__numCallbacks += 1
IECoreMaya.FnParameterisedHolder.addSetClassVectorParameterClassesCallback( c )
with fnOH.parameterModificationContext() :
op["cv"].setClasses( [
( "m", "maths/multiply", 1 ),
( "n", "maths/multiply", 1 ),
] )
self.assertEqual( self.__numCallbacks, 1 )
cmds.undo()
self.assertEqual( self.__numCallbacks, 2 )
cmds.redo()
self.assertEqual( self.__numCallbacks, 3 )
# setting the class to the same thing it already is should have
# no effect.
with fnOH.parameterModificationContext() :
op["cv"].setClasses( [
( "m", "maths/multiply", 1 ),
( "n", "maths/multiply", 1 ),
] )
self.assertEqual( self.__numCallbacks, 3 )
IECoreMaya.FnParameterisedHolder.removeSetClassVectorParameterClassesCallback( c )
def testClassVectorParameterCompactPlugs( self ) :
fnOH = IECoreMaya.FnOpHolder.create( "node", "classVectorParameterTest", 1 )
op = fnOH.getOp()
c = op["cv"]
self.assertEqual( c.typeName(), "ClassVectorParameter" )
self.assertEqual( len( c.getClasses() ), 0 )
with fnOH.parameterModificationContext() :
c.setClasses( [
( "mult", "maths/multiply", 1 ),
( "coIO", "compoundObjectInOut", 1 ),
] )
cPlugPath = fnOH.parameterPlugPath( c )
self.assertEqual( cmds.getAttr( cPlugPath ), [ "mult", "maths/multiply", "1", "coIO", "compoundObjectInOut", "1" ] )
self.failUnless( not cmds.objExists( cPlugPath + "__parameterNames" ) )
self.failUnless( not cmds.objExists( cPlugPath + "__classNames" ) )
self.failUnless( not cmds.objExists( cPlugPath + "__classVersions" ) )
def testNumericParameterMinMax( self ) :
# test no range
op = IECore.Op( "", IECore.IntParameter( "result", "", 0 ) )
op.parameters().addParameter(
IECore.IntParameter(
"i",
"d",
0
)
)
opNode = cmds.createNode( "ieOpHolderNode" )
fnOH = IECoreMaya.FnOpHolder( opNode )
fnOH.setParameterised( op )
iPlugPath = fnOH.parameterPlugPath( op["i"] )
self.assertEqual( cmds.attributeQuery( iPlugPath.rpartition( "." )[-1], minExists=True, node=opNode ), False )
self.assertEqual( cmds.attributeQuery( iPlugPath.rpartition( "." )[-1], maxExists=True, node=opNode ), False )
# test min only
op = IECore.Op( "", IECore.IntParameter( "result", "", 0 ) )
op.parameters().addParameter(
IECore.IntParameter(
"i",
"d",
0,
minValue = -10
)
)
opNode = cmds.createNode( "ieOpHolderNode" )
fnOH = IECoreMaya.FnOpHolder( opNode )
fnOH.setParameterised( op )
iPlugPath = fnOH.parameterPlugPath( op["i"] )
self.assertEqual( cmds.attributeQuery( iPlugPath.rpartition( "." )[-1], minExists=True, node=opNode ), True )
self.assertEqual( cmds.attributeQuery( iPlugPath.rpartition( "." )[-1], minimum=True, node=opNode )[0], -10 )
self.assertEqual( cmds.attributeQuery( iPlugPath.rpartition( "." )[-1], maxExists=True, node=opNode ), False )
# test min and max
op = IECore.Op( "", IECore.IntParameter( "result", "", 0 ) )
op.parameters().addParameter(
IECore.IntParameter(
"i",
"d",
0,
minValue = -10,
maxValue = 10
)
)
opNode = cmds.createNode( "ieOpHolderNode" )
fnOH = IECoreMaya.FnOpHolder( opNode )
fnOH.setParameterised( op )
iPlugPath = fnOH.parameterPlugPath( op["i"] )
self.assertEqual( cmds.attributeQuery( iPlugPath.rpartition( "." )[-1], minExists=True, node=opNode ), True )
self.assertEqual( cmds.attributeQuery( iPlugPath.rpartition( "." )[-1], minimum=True, node=opNode )[0], -10 )
self.assertEqual( cmds.attributeQuery( iPlugPath.rpartition( "." )[-1], maxExists=True, node=opNode ), True )
self.assertEqual( cmds.attributeQuery( iPlugPath.rpartition( "." )[-1], maximum=True, node=opNode )[0], 10 )
def testNumericParameterRangeAdded( self ) :
op = IECore.Op( "", IECore.IntParameter( "result", "", 0 ) )
op.parameters().addParameter(
IECore.IntParameter(
"i",
"d",
0
)
)
opNode = cmds.createNode( "ieOpHolderNode" )
fnOH = IECoreMaya.FnOpHolder( opNode )
fnOH.setParameterised( op )
iPlugPath = fnOH.parameterPlugPath( op["i"] )
self.assertEqual( cmds.attributeQuery( iPlugPath.rpartition( "." )[-1], minExists=True, node=opNode ), False )
self.assertEqual( cmds.attributeQuery( iPlugPath.rpartition( "." )[-1], maxExists=True, node=opNode ), False )
op = IECore.Op( "", IECore.IntParameter( "result", "", 0 ) )
op.parameters().addParameter(
IECore.IntParameter(
"i",
"d",
0,
minValue = -2,
maxValue = 2,
)
)
fnOH.setParameterised( op )
self.assertEqual( cmds.attributeQuery( iPlugPath.rpartition( "." )[-1], minExists=True, node=opNode ), True )
self.assertEqual( cmds.attributeQuery( iPlugPath.rpartition( "." )[-1], minimum=True, node=opNode )[0], -2 )
self.assertEqual( cmds.attributeQuery( iPlugPath.rpartition( "." )[-1], maxExists=True, node=opNode ), True )
self.assertEqual( cmds.attributeQuery( iPlugPath.rpartition( "." )[-1], maximum=True, node=opNode )[0], 2 )
def testNumericParameterRangeRemoved( self ) :
op = IECore.Op( "", IECore.IntParameter( "result", "", 0 ) )
op.parameters().addParameter(
IECore.IntParameter(
"i",
"d",
0,
minValue = -2,
maxValue = 2,
)
)
opNode = cmds.createNode( "ieOpHolderNode" )
fnOH = IECoreMaya.FnOpHolder( opNode )
fnOH.setParameterised( op )
iPlugPath = fnOH.parameterPlugPath( op["i"] )
self.assertEqual( cmds.attributeQuery( iPlugPath.rpartition( "." )[-1], minExists=True, node=opNode ), True )
self.assertEqual( cmds.attributeQuery( iPlugPath.rpartition( "." )[-1], minimum=True, node=opNode )[0], -2 )
self.assertEqual( cmds.attributeQuery( iPlugPath.rpartition( "." )[-1], maxExists=True, node=opNode ), True )
self.assertEqual( cmds.attributeQuery( iPlugPath.rpartition( "." )[-1], maximum=True, node=opNode )[0], 2 )
op = IECore.Op( "", IECore.IntParameter( "result", "", 0 ) )
op.parameters().addParameter(
IECore.IntParameter(
"i",
"d",
0
)
)
fnOH.setParameterised( op )
self.assertEqual( cmds.attributeQuery( iPlugPath.rpartition( "." )[-1], minExists=True, node=opNode ), False )
self.assertEqual( cmds.attributeQuery( iPlugPath.rpartition( "." )[-1], maxExists=True, node=opNode ), False )
def testParameterTypeChanges( self ) :
"""Test maya attribute type with changing parameters types."""
n = cmds.createNode( 'ieParameterisedHolderNode' )
a = IECore.Parameterised( "a" )
a.parameters().addParameter( IECore.IntParameter( "theParameter", "", 1 ) )
b = IECore.Parameterised( "b" )
b.parameters().addParameter( IECore.StringParameter( "theParameter", "", "" ) )
c = IECore.Parameterised( "c" )
c.parameters().addParameter( IECore.FloatParameter( "theParameter", "", 1.0 ) )
fnPH = IECoreMaya.FnParameterisedHolder( n )
fnPH.setParameterised( a )
fnPH.setNodeValues()
# Check the Maya attribute holds an int.
plugPath = fnPH.parameterPlugPath( a["theParameter"] )
cmds.setAttr( plugPath, 2.75 )
self.assertEqual( cmds.getAttr(plugPath), 3 )
fnPH.setParameterised( b )
fnPH.setNodeValues()
# Should be a string now
plugPath = fnPH.parameterPlugPath( b["theParameter"] )
cmds.setAttr( plugPath, "test", type="string" )
self.assertEqual( cmds.getAttr(plugPath), "test" )
fnPH.setParameterised( c )
fnPH.setNodeValues()
# Should be a float now
plugPath = fnPH.parameterPlugPath( c["theParameter"] )
cmds.setAttr( plugPath, 3.75 )
self.assertEqual( cmds.getAttr(plugPath), 3.75 )
fnPH.setParameterised( a )
fnPH.setNodeValues()
# Should be an int again
plugPath = fnPH.parameterPlugPath( a["theParameter"] )
cmds.setAttr( plugPath, 4.75 )
self.assertEqual( cmds.getAttr(plugPath), 5 )
def testRemoveLockedAttributes( self ) :
op = IECore.Op( "", IECore.IntParameter( "result", "", 0 ) )
op.parameters().addParameter(
IECore.IntParameter(
"i",
"d",
0
)
)
opNode = cmds.createNode( "ieOpHolderNode" )
fnOH = IECoreMaya.FnOpHolder( opNode )
fnOH.setParameterised( op )
iPlugPath = fnOH.parameterPlugPath( op["i"] )
cmds.setAttr( iPlugPath, lock=True )
del op.parameters()["i"]
fnOH.setParameterised( op )
self.failIf( cmds.objExists( iPlugPath ) )
def testRemoveLockedChildAttributes( self ) :
op = IECore.Op( "", IECore.IntParameter( "result", "", 0 ) )
op.parameters().addParameter(
IECore.V3fParameter(
"v",
"d",
IECore.V3f( 0 ),
)
)
opNode = cmds.createNode( "ieOpHolderNode" )
fnOH = IECoreMaya.FnOpHolder( opNode )
fnOH.setParameterised( op )
vPlugPath = fnOH.parameterPlugPath( op["v"] )
cmds.setAttr( vPlugPath + "X", lock=True )
del op.parameters()["v"]
fnOH.setParameterised( op )
self.failIf( cmds.objExists( vPlugPath ) )
def testStorable( self ) :
op = IECore.Op( "", IECore.IntParameter( "result", "", 0 ) )
op.parameters().addParameters( [
IECore.BoolParameter(
name = "a",
description = "",
defaultValue = True,
),
IECore.IntParameter(
name = "b",
description = "",
defaultValue = 1,
userData = IECore.CompoundObject( { "maya" : { "storable" : IECore.BoolData( False ) } } )
),
IECore.StringParameter(
name = "c",
description = "",
defaultValue = "",
userData = IECore.CompoundObject( { "maya" : { "storable" : IECore.BoolData( True ) } } )
),
] )
opNode = cmds.createNode( "ieOpHolderNode" )
fnOH = IECoreMaya.FnOpHolder( opNode )
fnOH.setParameterised( op )
self.assertEqual( cmds.attributeQuery( fnOH.parameterPlugPath( op["a"] ).split( "." )[-1], storable=True, node=opNode ), True )
self.assertEqual( cmds.attributeQuery( fnOH.parameterPlugPath( op["b"] ).split( "." )[-1], storable=True, node=opNode ), False )
self.assertEqual( cmds.attributeQuery( fnOH.parameterPlugPath( op["c"] ).split( "." )[-1], storable=True, node=opNode ), True )
with fnOH.parameterModificationContext() :
op["a"].userData()["maya"] = IECore.CompoundObject( { "storable" : IECore.BoolData( False ) } )
op["b"].userData()["maya"]["storable"] = IECore.BoolData( True )
self.assertEqual( cmds.attributeQuery( fnOH.parameterPlugPath( op["a"] ).split( "." )[-1], storable=True, node=opNode ), False )
self.assertEqual( cmds.attributeQuery( fnOH.parameterPlugPath( op["b"] ).split( "." )[-1], storable=True, node=opNode ), True )
def testBadArgsDoNotSegFault( self ) :
opNode = cmds.createNode( "ieOpHolderNode" )
fnOH = IECoreMaya.FnOpHolder( opNode )
self.assertRaises( RuntimeError, IECore.curry( fnOH.setOp, "fake", -1 ) )
def testShouldSave( self ) :
class TestProcedural( IECore.ParameterisedProcedural ) :
def __init__( self ) :
IECore.ParameterisedProcedural.__init__( self, "" )
self.parameters().addParameter(
IECore.V3fParameter(
"halfSize",
"",
IECore.V3f( 0 )
)
)
def doBound( self, args ) :
return IECore.Box3f( -args["halfSize"].value, args["halfSize"].value )
def doRenderState( self, args ) :
pass
def doRender( self, args ) :
pass
node = cmds.createNode( "ieProceduralHolder" )
fnPH = IECoreMaya.FnParameterisedHolder( str( node ) )
p = TestProcedural()
fnPH.setParameterised( p )
cmds.setAttr( node + ".nodeState", 4 )
# Save the scene out so we can reference it
filename = os.path.join( os.getcwd(), "test", "IECoreMaya", "shouldSaveAttributes.ma")
cmds.file( rename = filename )
referenceScene = cmds.file( force = True, type = "mayaAscii", save = True )
mayaFile = open( filename, 'r' )
setAttrs = mayaFile.read().partition("createNode ieProceduralHolder")[2].partition("createNode")[0].split("\n")[1:]
splitAttrs = [i.split('"') for i in setAttrs if "setAttr" in i]
savedAttrNames = [ i[1] for i in splitAttrs if len(i) >= 2]
mayaFile.close()
self.assertTrue( ".nds" in savedAttrNames ) # Check that the nodeState attr we changed has been written
self.assertTrue( not ".ihi" in savedAttrNames ) # Check that the isHistoricallyInteresting parm that is left default is not exported
# Parm parameters are always saved, even when left default ( for backwards compatibility reasons )
self.assertTrue( ".parm_halfSize" in savedAttrNames, msg = " ".join( savedAttrNames ) ) # This test can be removed if we decide our parameters don't require a special case
def tearDown( self ) :
for f in [
"test/IECoreMaya/op.ma" ,
"test/IECoreMaya/defaultConnections.ma" ,
"test/IECoreMaya/compoundObjectConnections.ma" ,
"test/IECoreMaya/reference.ma" ,
"test/IECoreMaya/referenceMaster.ma",
"test/IECoreMaya/classParameterReference.ma" ,
"test/IECoreMaya/classParameterReferencer.ma" ,
"test/IECoreMaya/objectParameterIO.ma",
"test/IECoreMaya/objectMFnDataParameterIO.ma",
"test/IECoreMaya/imageProcedural.ma",
"test/IECoreMaya/classParameter.ma",
"test/IECoreMaya/classVectorParameter.ma",
"test/IECoreMaya/nonStorableObjectParameter.ma",
"test/IECoreMaya/connectedNodeReference.ma",
"test/IECoreMaya/connectedNodeReference2.ma",
"test/IECoreMaya/meshParameterIO.ma",
"test/IECoreMaya/shouldSaveAttributes.ma",
] :
if os.path.exists( f ) :
os.remove( f )
if __name__ == "__main__":
IECoreMaya.TestProgram( plugins = [ "ieCore" ] )
|
code-google-com/cortex-vfx
|
test/IECoreMaya/ParameterisedHolder.py
|
Python
|
bsd-3-clause
| 67,200
|
from __future__ import absolute_import, print_function
from django.core.urlresolvers import reverse
from exam import fixture
from sentry.models import ProjectKey, ProjectKeyStatus
from sentry.testutils import TestCase
class EnableProjectKeyTest(TestCase):
def setUp(self):
super(EnableProjectKeyTest, self).setUp()
self.key = ProjectKey.objects.create(
project=self.project,
status=ProjectKeyStatus.INACTIVE,
)
@fixture
def path(self):
return reverse('sentry-enable-project-key', args=[self.organization.slug, self.project.slug, self.key.id])
def test_requires_authentication(self):
self.assertRequiresAuthentication(self.path, 'POST')
def test_does_enable(self):
self.login_as(self.user)
resp = self.client.post(self.path)
assert resp.status_code == 302
key = ProjectKey.objects.get(id=self.key.id)
assert key.status == ProjectKeyStatus.ACTIVE
|
alexm92/sentry
|
tests/sentry/web/frontend/test_enable_project_key.py
|
Python
|
bsd-3-clause
| 977
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class StudentBatchCreationTool(Document):
def make_batch(self):
if self.academic_year and self.program and self.student_batch_name:
students = frappe.get_list("Program Enrollment", fields=["student", "student_name"],
filters={"academic_year":self.academic_year, "program": self.program, "student_batch_name": self.student_batch_name},
order_by= "student_name")
if students:
student_batch = frappe.new_doc("Student Batch")
student_batch.update({
"academic_year": self.academic_year,
"program": self.program,
"student_batch_name": self.student_batch_name,
"students": students
})
student_batch.save()
frappe.msgprint("Student Batch created.")
else:
frappe.msgprint("No students found.")
|
hassanibi/erpnext
|
erpnext/schools/doctype/student_batch_creation_tool/student_batch_creation_tool.py
|
Python
|
gpl-3.0
| 994
|
from __future__ import unicode_literals
try:
from unittest import skipIf
except ImportError:
from django.utils.unittest.case import skipIf
import django
from django.test import TestCase
from django.test.utils import override_settings
from django.core.exceptions import ValidationError
from django.utils import timezone
from ..models import get_application_model, Grant, AccessToken, RefreshToken
from ..compat import get_user_model
Application = get_application_model()
UserModel = get_user_model()
class TestModels(TestCase):
def setUp(self):
self.user = UserModel.objects.create_user("test_user", "test@user.com", "123456")
def test_allow_scopes(self):
self.client.login(username="test_user", password="123456")
app = Application.objects.create(
name="test_app",
redirect_uris="http://localhost http://example.com http://example.it",
user=self.user,
client_type=Application.CLIENT_CONFIDENTIAL,
authorization_grant_type=Application.GRANT_AUTHORIZATION_CODE,
)
access_token = AccessToken(
user=self.user,
scope='read write',
expires=0,
token='',
application=app
)
self.assertTrue(access_token.allow_scopes(['read', 'write']))
self.assertTrue(access_token.allow_scopes(['write', 'read']))
self.assertTrue(access_token.allow_scopes(['write', 'read', 'read']))
self.assertTrue(access_token.allow_scopes([]))
self.assertFalse(access_token.allow_scopes(['write', 'destroy']))
def test_grant_authorization_code_redirect_uris(self):
app = Application(
name="test_app",
redirect_uris="",
user=self.user,
client_type=Application.CLIENT_CONFIDENTIAL,
authorization_grant_type=Application.GRANT_AUTHORIZATION_CODE,
)
self.assertRaises(ValidationError, app.full_clean)
def test_grant_implicit_redirect_uris(self):
app = Application(
name="test_app",
redirect_uris="",
user=self.user,
client_type=Application.CLIENT_CONFIDENTIAL,
authorization_grant_type=Application.GRANT_IMPLICIT,
)
self.assertRaises(ValidationError, app.full_clean)
def test_str(self):
app = Application(
redirect_uris="",
user=self.user,
client_type=Application.CLIENT_CONFIDENTIAL,
authorization_grant_type=Application.GRANT_IMPLICIT,
)
self.assertEqual("%s" % app, app.client_id)
app.name = "test_app"
self.assertEqual("%s" % app, "test_app")
@skipIf(django.VERSION < (1, 5), "Behavior is broken on 1.4 and there is no solution")
@override_settings(OAUTH2_PROVIDER_APPLICATION_MODEL='tests.TestApplication')
class TestCustomApplicationModel(TestCase):
def setUp(self):
self.user = UserModel.objects.create_user("test_user", "test@user.com", "123456")
def test_related_objects(self):
"""
If a custom application model is installed, it should be present in
the related objects and not the swapped out one.
See issue #90 (https://github.com/evonove/django-oauth-toolkit/issues/90)
"""
# Django internals caches the related objects.
if django.VERSION < (1, 8):
del UserModel._meta._related_objects_cache
related_object_names = [ro.name for ro in UserModel._meta.get_all_related_objects()]
self.assertNotIn('oauth2_provider:application', related_object_names)
self.assertIn('tests%stestapplication' % (':' if django.VERSION < (1, 8) else '_'),
related_object_names)
class TestGrantModel(TestCase):
def test_str(self):
grant = Grant(code="test_code")
self.assertEqual("%s" % grant, grant.code)
class TestAccessTokenModel(TestCase):
def setUp(self):
self.user = UserModel.objects.create_user("test_user", "test@user.com", "123456")
def test_str(self):
access_token = AccessToken(token="test_token")
self.assertEqual("%s" % access_token, access_token.token)
def test_user_can_be_none(self):
app = Application.objects.create(
name="test_app",
redirect_uris="http://localhost http://example.com http://example.it",
user=self.user,
client_type=Application.CLIENT_CONFIDENTIAL,
authorization_grant_type=Application.GRANT_AUTHORIZATION_CODE,
)
access_token = AccessToken.objects.create(token="test_token", application=app, expires=timezone.now())
self.assertIsNone(access_token.user)
class TestRefreshTokenModel(TestCase):
def test_str(self):
refresh_token = RefreshToken(token="test_token")
self.assertEqual("%s" % refresh_token, refresh_token.token)
|
jensadne/django-oauth-toolkit
|
oauth2_provider/tests/test_models.py
|
Python
|
bsd-2-clause
| 4,920
|
"""
A dialog widget for selecting an item.
"""
from PyQt4.QtGui import QDialog, QWidget, QVBoxLayout, QDialogButtonBox, \
QStringListModel, QLabel, QSizePolicy
from PyQt4.QtCore import Qt
from PyQt4.QtCore import pyqtSignal as Signal
from . import previewbrowser
class PreviewDialog(QDialog):
"""A Dialog for selecting an item from a PreviewItem.
"""
currentIndexChanged = Signal(int)
def __init__(self, parent=None, flags=Qt.WindowFlags(0),
model=None, **kwargs):
QDialog.__init__(self, parent, flags, **kwargs)
self.__setupUi()
if model is not None:
self.setModel(model)
def __setupUi(self):
layout = QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
self.setContentsMargins(0, 0, 0, 0)
heading = self.tr("Preview")
heading = "<h3>{0}</h3>".format(heading)
self.__heading = QLabel(heading, self,
objectName="heading")
self.__heading.setContentsMargins(12, 12, 12, 0)
self.__browser = previewbrowser.PreviewBrowser(self)
self.__buttons = QDialogButtonBox(QDialogButtonBox.Open | \
QDialogButtonBox.Cancel,
Qt.Horizontal,)
self.__buttons.button(QDialogButtonBox.Open).setAutoDefault(True)
# Set the Open dialog as disabled until the current index changes
self.__buttons.button(QDialogButtonBox.Open).setEnabled(False)
# The QDialogButtonsWidget messes with the layout if it is
# contained directly in the QDialog. So we create an extra
# layer of indirection.
buttons = QWidget(objectName="button-container")
buttons_l = QVBoxLayout()
buttons_l.setContentsMargins(12, 0, 12, 12)
buttons.setLayout(buttons_l)
buttons_l.addWidget(self.__buttons)
layout.addWidget(self.__heading)
layout.addWidget(self.__browser)
layout.addWidget(buttons)
self.__buttons.accepted.connect(self.accept)
self.__buttons.rejected.connect(self.reject)
self.__browser.currentIndexChanged.connect(
self.__on_currentIndexChanged
)
self.__browser.activated.connect(self.__on_activated)
layout.setSizeConstraint(QVBoxLayout.SetFixedSize)
self.setLayout(layout)
self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
def setItems(self, items):
"""Set the items (a list of strings) for preview/selection.
"""
model = QStringListModel(items)
self.setModel(model)
def setModel(self, model):
"""Set the model for preview/selection.
"""
self.__browser.setModel(model)
def model(self):
"""Return the model.
"""
return self.__browser.model()
def currentIndex(self):
return self.__browser.currentIndex()
def setCurrentIndex(self, index):
"""Set the current selected (shown) index.
"""
self.__browser.setCurrentIndex(index)
def setHeading(self, heading):
"""Set `heading` as the heading string ('<h3>Preview</h3>'
by default).
"""
self.__heading.setText(heading)
def heading(self):
"""Return the heading string.
"""
def __on_currentIndexChanged(self, index):
button = self.__buttons.button(QDialogButtonBox.Open)
button.setEnabled(index >= 0)
self.currentIndexChanged.emit(index)
def __on_activated(self, index):
if self.currentIndex() != index:
self.setCurrentIndex(index)
self.accept()
|
marinkaz/orange3
|
Orange/canvas/preview/previewdialog.py
|
Python
|
bsd-2-clause
| 3,701
|
CONSTANT1 = 10
CONSTANT2 = 20
|
siddhika1889/Pydev-Editor
|
tests/pysrc/extendable/constants/__init__.py
|
Python
|
epl-1.0
| 29
|
import os
import re
import subprocess
from .. import vcs
from ..vcs import bind_to_repo, git, hg
def get_unique_name(existing, initial):
"""Get a name either equal to initial or of the form initial_N, for some
integer N, that is not in the set existing.
:param existing: Set of names that must not be chosen.
:param initial: Name, or name prefix, to use"""
if initial not in existing:
return initial
for i in xrange(len(existing) + 1):
test = "%s_%s" % (initial, i + 1)
if test not in existing:
return test
assert False
class NoVCSTree(object):
name = "non-vcs"
def __init__(self, root=None):
if root is None:
root = os.path.abspath(os.curdir)
self.root = root
@classmethod
def is_type(cls, path=None):
return True
@property
def is_clean(self):
return True
def add_new(self, prefix=None):
pass
def create_patch(self, patch_name, message):
pass
def update_patch(self, include=None):
pass
def commit_patch(self):
pass
class HgTree(object):
name = "mercurial"
def __init__(self, root=None):
if root is None:
root = hg("root").strip()
self.root = root
self.hg = vcs.bind_to_repo(hg, self.root)
def __getstate__(self):
rv = self.__dict__.copy()
del rv['hg']
return rv
def __setstate__(self, dict):
self.__dict__.update(dict)
self.hg = vcs.bind_to_repo(vcs.hg, self.root)
@classmethod
def is_type(cls, path=None):
kwargs = {"log_error": False}
if path is not None:
kwargs["repo"] = path
try:
hg("root", **kwargs)
except Exception:
return False
return True
@property
def is_clean(self):
return self.hg("status").strip() == ""
def add_new(self, prefix=None):
if prefix is not None:
args = ("-I", prefix)
else:
args = ()
self.hg("add", *args)
def create_patch(self, patch_name, message):
try:
self.hg("qinit", log_error=False)
except subprocess.CalledProcessError:
pass
patch_names = [item.strip() for item in self.hg("qseries").split("\n") if item.strip()]
suffix = 0
test_name = patch_name
while test_name in patch_names:
suffix += 1
test_name = "%s-%i" % (patch_name, suffix)
self.hg("qnew", test_name, "-X", self.root, "-m", message)
def update_patch(self, include=None):
if include is not None:
args = []
for item in include:
args.extend(["-I", item])
else:
args = ()
self.hg("qrefresh", *args)
return True
def commit_patch(self):
self.hg("qfinish")
def contains_commit(self, commit):
try:
self.hg("identify", "-r", commit.sha1)
return True
except subprocess.CalledProcessError:
return False
class GitTree(object):
name = "git"
def __init__(self, root=None):
if root is None:
root = git("rev-parse", "--show-toplevel").strip()
self.root = root
self.git = vcs.bind_to_repo(git, self.root)
self.message = None
self.commit_cls = Commit
def __getstate__(self):
rv = self.__dict__.copy()
del rv['git']
return rv
def __setstate__(self, dict):
self.__dict__.update(dict)
self.git = vcs.bind_to_repo(vcs.git, self.root)
@classmethod
def is_type(cls, path=None):
kwargs = {"log_error": False}
if path is not None:
kwargs["repo"] = path
try:
git("rev-parse", "--show-toplevel", **kwargs)
except Exception:
return False
return True
@property
def rev(self):
"""Current HEAD revision"""
if vcs.is_git_root(self.root):
return self.git("rev-parse", "HEAD").strip()
else:
return None
@property
def is_clean(self):
return self.git("status").strip() == ""
def add_new(self, prefix=None):
"""Add files to the staging area.
:param prefix: None to include all files or a path prefix to
add all files under that path.
"""
if prefix is None:
args = ("-a",)
else:
args = ("--no-ignore-removal", prefix)
self.git("add", *args)
def list_refs(self, ref_filter=None):
"""Get a list of sha1, name tuples for references in a repository.
:param ref_filter: Pattern that reference name must match (from the end,
matching whole /-delimited segments only
"""
args = []
if ref_filter is not None:
args.append(ref_filter)
data = self.git("show-ref", *args)
rv = []
for line in data.split("\n"):
if not line.strip():
continue
sha1, ref = line.split()
rv.append((sha1, ref))
return rv
def list_remote(self, remote, ref_filter=None):
"""Return a list of (sha1, name) tupes for references in a remote.
:param remote: URL of the remote to list.
:param ref_filter: Pattern that the reference name must match.
"""
args = []
if ref_filter is not None:
args.append(ref_filter)
data = self.git("ls-remote", remote, *args)
rv = []
for line in data.split("\n"):
if not line.strip():
continue
sha1, ref = line.split()
rv.append((sha1, ref))
return rv
def get_remote_sha1(self, remote, branch):
"""Return the SHA1 of a particular branch in a remote.
:param remote: the remote URL
:param branch: the branch name"""
for sha1, ref in self.list_remote(remote, branch):
if ref == "refs/heads/%s" % branch:
return self.commit_cls(self, sha1)
assert False
def create_patch(self, patch_name, message):
# In git a patch is actually a commit
self.message = message
def update_patch(self, include=None):
"""Commit the staged changes, or changes to listed files.
:param include: Either None, to commit staged changes, or a list
of filenames (which must already be in the repo)
to commit
"""
if include is not None:
args = tuple(include)
else:
args = ()
if self.git("status", "-uno", "-z", *args).strip():
self.git("add", *args)
return True
return False
def commit_patch(self):
assert self.message is not None
if self.git("diff", "--name-only", "--staged", "-z").strip():
self.git("commit", "-m", self.message)
return True
return False
def init(self):
self.git("init")
assert vcs.is_git_root(self.root)
def checkout(self, rev, branch=None, force=False):
"""Checkout a particular revision, optionally into a named branch.
:param rev: Revision identifier (e.g. SHA1) to checkout
:param branch: Branch name to use
:param force: Force-checkout
"""
assert rev is not None
args = []
if branch:
branches = [ref[len("refs/heads/"):] for sha1, ref in self.list_refs()
if ref.startswith("refs/heads/")]
branch = get_unique_name(branches, branch)
args += ["-b", branch]
if force:
args.append("-f")
args.append(rev)
self.git("checkout", *args)
def update(self, remote, remote_branch, local_branch):
"""Fetch from the remote and checkout into a local branch.
:param remote: URL to the remote repository
:param remote_branch: Branch on the remote repository to check out
:param local_branch: Local branch name to check out into
"""
if not vcs.is_git_root(self.root):
self.init()
self.git("clean", "-xdf")
self.git("fetch", remote, "%s:%s" % (remote_branch, local_branch))
self.checkout(local_branch)
self.git("submodule", "update", "--init", "--recursive")
def clean(self):
self.git("checkout", self.rev)
self.git("branch", "-D", self.local_branch)
def paths(self):
"""List paths in the tree"""
repo_paths = [self.root] + [os.path.join(self.root, path)
for path in self.submodules()]
rv = []
for repo_path in repo_paths:
paths = vcs.git("ls-tree", "-r", "--name-only", "HEAD", repo=repo_path).split("\n")
rv.extend(os.path.relpath(os.path.join(repo_path, item), self.root) for item in paths
if item.strip())
return rv
def submodules(self):
"""List submodule directories"""
output = self.git("submodule", "status", "--recursive")
rv = []
for line in output.split("\n"):
line = line.strip()
if not line:
continue
parts = line.split(" ")
rv.append(parts[1])
return rv
def contains_commit(self, commit):
try:
self.git("rev-parse", "--verify", commit.sha1)
return True
except subprocess.CalledProcessError:
return False
class CommitMessage(object):
def __init__(self, text):
self.text = text
self._parse_message()
def __str__(self):
return self.text
def _parse_message(self):
lines = self.text.splitlines()
self.full_summary = lines[0]
self.body = "\n".join(lines[1:])
class Commit(object):
msg_cls = CommitMessage
_sha1_re = re.compile("^[0-9a-f]{40}$")
def __init__(self, tree, sha1):
"""Object representing a commit in a specific GitTree.
:param tree: GitTree to which this commit belongs.
:param sha1: Full sha1 string for the commit
"""
assert self._sha1_re.match(sha1)
self.tree = tree
self.git = tree.git
self.sha1 = sha1
self.author, self.email, self.message = self._get_meta()
def __getstate__(self):
rv = self.__dict__.copy()
del rv['git']
return rv
def __setstate__(self, dict):
self.__dict__.update(dict)
self.git = self.tree.git
def _get_meta(self):
author, email, message = self.git("show", "-s", "--format=format:%an\n%ae\n%B", self.sha1).split("\n", 2)
return author, email, self.msg_cls(message)
|
anthgur/servo
|
tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/update/tree.py
|
Python
|
mpl-2.0
| 10,874
|
# coding=utf-8
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.utils.translation import ugettext_lazy as _
from pybb import defaults, util
from pybb.compat import get_image_field_class, get_username_field
TZ_CHOICES = [(float(x[0]), x[1]) for x in (
(-12, '-12'), (-11, '-11'), (-10, '-10'), (-9.5, '-09.5'), (-9, '-09'),
(-8.5, '-08.5'), (-8, '-08 PST'), (-7, '-07 MST'), (-6, '-06 CST'),
(-5, '-05 EST'), (-4, '-04 AST'), (-3.5, '-03.5'), (-3, '-03 ADT'),
(-2, '-02'), (-1, '-01'), (0, '00 GMT'), (1, '+01 CET'), (2, '+02'),
(3, '+03'), (3.5, '+03.5'), (4, '+04'), (4.5, '+04.5'), (5, '+05'),
(5.5, '+05.5'), (6, '+06'), (6.5, '+06.5'), (7, '+07'), (8, '+08'),
(9, '+09'), (9.5, '+09.5'), (10, '+10'), (10.5, '+10.5'), (11, '+11'),
(11.5, '+11.5'), (12, '+12'), (13, '+13'), (14, '+14'),
)]
class PybbProfile(models.Model):
"""
Abstract class for user profile, site profile should be inherted from this class
"""
class Meta(object):
abstract = True
permissions = (
("block_users", "Can block any user"),
)
signature = models.TextField(_('Signature'), blank=True, max_length=defaults.PYBB_SIGNATURE_MAX_LENGTH)
signature_html = models.TextField(_('Signature HTML Version'), blank=True,
max_length=defaults.PYBB_SIGNATURE_MAX_LENGTH + 30)
time_zone = models.FloatField(_('Time zone'), choices=TZ_CHOICES, default=float(defaults.PYBB_DEFAULT_TIME_ZONE))
language = models.CharField(_('Language'), max_length=10, blank=True, choices=settings.LANGUAGES,
default=settings.LANGUAGE_CODE)
show_signatures = models.BooleanField(_('Show signatures'), blank=True, default=True)
post_count = models.IntegerField(_('Post count'), blank=True, default=0)
avatar = get_image_field_class()(_('Avatar'), blank=True, null=True,
upload_to=util.FilePathGenerator(to='pybb/avatar'))
autosubscribe = models.BooleanField(_('Automatically subscribe'),
help_text=_('Automatically subscribe to topics that you answer'),
default=defaults.PYBB_DEFAULT_AUTOSUBSCRIBE)
def save(self, *args, **kwargs):
self.signature_html = util._get_markup_formatter()(self.signature)
super(PybbProfile, self).save(*args, **kwargs)
@property
def avatar_url(self):
try:
return self.avatar.url
except:
return defaults.PYBB_DEFAULT_AVATAR_URL
def get_display_name(self):
try:
if hasattr(self, 'user'): # we have OneToOne foreign key to user model
return self.user.get_username()
if not defaults.PYBB_PROFILE_RELATED_NAME: # we now in user custom model itself
return self.get_username()
except Exception:
return unicode(self)
|
just-work/pybbm
|
pybb/profiles.py
|
Python
|
bsd-2-clause
| 3,029
|
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import TestCase, run_module_suite, assert_array_almost_equal
def fft1(x):
L = len(x)
phase = -2j*np.pi*(np.arange(L)/float(L))
phase = np.arange(L).reshape(-1, 1) * phase
return np.sum(x*np.exp(phase), axis=1)
class TestFFTShift(TestCase):
def test_fft_n(self):
self.assertRaises(ValueError, np.fft.fft, [1, 2, 3], 0)
class TestFFT1D(TestCase):
def test_basic(self):
rand = np.random.random
x = rand(30) + 1j*rand(30)
assert_array_almost_equal(fft1(x), np.fft.fft(x))
if __name__ == "__main__":
run_module_suite()
|
AdaptiveApplications/carnegie
|
tarc_bus_locator_client/numpy-1.8.1/numpy/fft/tests/test_fftpack.py
|
Python
|
mit
| 686
|
# -*- coding: utf-8 -*-
from odoo.addons.account.tests.account_test_classes import AccountingTestCase
from odoo.exceptions import ValidationError
from odoo.tests import tagged
import time
@tagged('post_install', '-at_install')
class TestAccountInvoiceRounding(AccountingTestCase):
def setUp(self):
super(TestAccountInvoiceRounding, self).setUp()
self.account_receivable = self.env['account.account'].search(
[('user_type_id', '=', self.env.ref('account.data_account_type_receivable').id)], limit=1)
self.account_revenue = self.env['account.account'].search(
[('user_type_id', '=', self.env.ref('account.data_account_type_revenue').id)], limit=1)
self.fixed_tax = self.env['account.tax'].create({
'name': 'Test Tax',
'amount': 0.0,
'amount_type': 'fixed',
})
def create_cash_rounding(self, rounding, method, strategy):
return self.env['account.cash.rounding'].create({
'name': 'rounding ' + method,
'rounding': rounding,
'account_id': self.account_receivable.id,
'strategy': strategy,
'rounding_method': method,
})
def create_invoice(self, amount, cash_rounding_id, tax_amount=None):
""" Returns an open invoice """
invoice_id = self.env['account.invoice'].create({
'partner_id': self.env.ref("base.res_partner_2").id,
'currency_id': self.env.ref('base.USD').id,
'name': 'invoice test rounding',
'account_id': self.account_receivable.id,
'type': 'out_invoice',
'date_invoice': time.strftime('%Y') + '-06-26',
})
if tax_amount:
self.fixed_tax.amount = tax_amount
self.env['account.invoice.line'].create({
'product_id': self.env.ref("product.product_product_4").id,
'quantity': 1,
'price_unit': amount,
'invoice_id': invoice_id.id,
'name': 'something',
'account_id': self.account_revenue.id,
'invoice_line_tax_ids': [(6, 0, [self.fixed_tax.id])] if tax_amount else None
})
# Create the tax_line_ids
invoice_id._onchange_invoice_line_ids()
# We need to set the cash_rounding_id after the _onchange_invoice_line_ids
# to avoid a ValidationError from _check_cash_rounding because the onchange
# are not well triggered in the tests.
try:
invoice_id.cash_rounding_id = cash_rounding_id
except ValidationError:
pass
invoice_id._onchange_cash_rounding()
invoice_id.action_invoice_open()
return invoice_id
def _check_invoice_rounding(self, inv, exp_lines_values, exp_tax_values=None):
inv_lines = inv.invoice_line_ids
self.assertEquals(len(inv_lines), len(exp_lines_values))
for i in range(0, len(exp_lines_values)):
self.assertEquals(inv_lines[i].price_unit, exp_lines_values[i])
if exp_tax_values:
tax_lines = inv.tax_line_ids
self.assertEquals(len(tax_lines), len(exp_tax_values))
for i in range(0, len(exp_tax_values)):
self.assertEquals(tax_lines[i].amount_total, exp_tax_values[i])
def test_rounding_add_invoice_line(self):
self._check_invoice_rounding(
self.create_invoice(100.2, self.create_cash_rounding(0.5, 'UP', 'add_invoice_line')),
[100.2, 0.3]
)
self._check_invoice_rounding(
self.create_invoice(100.9, self.create_cash_rounding(1.0, 'DOWN', 'add_invoice_line')),
[100.9, -0.9]
)
self._check_invoice_rounding(
self.create_invoice(100.5, self.create_cash_rounding(1.0, 'HALF-UP', 'add_invoice_line')),
[100.5, 0.5]
)
def test_rounding_biggest_tax(self):
self._check_invoice_rounding(
self.create_invoice(100.2, self.create_cash_rounding(0.5, 'UP', 'biggest_tax'), 1.0),
[100.2], [1.3]
)
self._check_invoice_rounding(
self.create_invoice(100.9, self.create_cash_rounding(1.0, 'DOWN', 'biggest_tax'), 2.0),
[100.9], [1.1]
)
self._check_invoice_rounding(
self.create_invoice(100.5, self.create_cash_rounding(1.0, 'HALF-UP', 'biggest_tax'), 1.0),
[100.5], [1.5]
)
|
t3dev/odoo
|
addons/account/tests/test_account_invoice_rounding.py
|
Python
|
gpl-3.0
| 4,432
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A keyring based Storage.
A Storage for Credentials that uses the keyring module.
"""
import threading
import keyring
from oauth2client.client import Credentials
from oauth2client.client import Storage as BaseStorage
class Storage(BaseStorage):
"""Store and retrieve a single credential to and from the keyring.
To use this module you must have the keyring module installed. See
<http://pypi.python.org/pypi/keyring/>. This is an optional module and is not
installed with oauth2client by default because it does not work on all the
platforms that oauth2client supports, such as Google App Engine.
The keyring module <http://pypi.python.org/pypi/keyring/> is a cross-platform
library for access the keyring capabilities of the local system. The user will
be prompted for their keyring password when this module is used, and the
manner in which the user is prompted will vary per platform.
Usage:
from oauth2client.keyring_storage import Storage
s = Storage('name_of_application', 'user1')
credentials = s.get()
"""
def __init__(self, service_name, user_name):
"""Constructor.
Args:
service_name: string, The name of the service under which the credentials
are stored.
user_name: string, The name of the user to store credentials for.
"""
self._service_name = service_name
self._user_name = user_name
self._lock = threading.Lock()
def acquire_lock(self):
"""Acquires any lock necessary to access this Storage.
This lock is not reentrant."""
self._lock.acquire()
def release_lock(self):
"""Release the Storage lock.
Trying to release a lock that isn't held will result in a
RuntimeError.
"""
self._lock.release()
def locked_get(self):
"""Retrieve Credential from file.
Returns:
oauth2client.client.Credentials
"""
credentials = None
content = keyring.get_password(self._service_name, self._user_name)
if content is not None:
try:
credentials = Credentials.new_from_json(content)
credentials.set_store(self)
except ValueError:
pass
return credentials
def locked_put(self, credentials):
"""Write Credentials to file.
Args:
credentials: Credentials, the credentials to store.
"""
keyring.set_password(self._service_name, self._user_name,
credentials.to_json())
def locked_delete(self):
"""Delete Credentials file.
Args:
credentials: Credentials, the credentials to store.
"""
keyring.set_password(self._service_name, self._user_name, '')
|
harshilasu/LinkurApp
|
y/google-cloud-sdk/lib/oauth2client/keyring_storage.py
|
Python
|
gpl-3.0
| 3,220
|
from __future__ import unicode_literals
import frappe
def execute():
frappe.reload_doctype("Contact")
frappe.db.sql("""update tabContact, tabUser set tabContact.user = tabUser.name
where tabContact.email_id = tabUser.email""")
|
ovresko/erpnext
|
erpnext/patches/v6_4/set_user_in_contact.py
|
Python
|
gpl-3.0
| 232
|
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, division, absolute_import, print_function)
store_version = 2 # Needed for dynamic plugin loading
__license__ = 'GPL 3'
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import re
import urllib
from contextlib import closing
from lxml import html
from PyQt5.Qt import QUrl
from calibre import browser, url_slash_cleaner
from calibre.gui2 import open_url
from calibre.gui2.store import StorePlugin
from calibre.gui2.store.basic_config import BasicStoreConfig
from calibre.gui2.store.search_result import SearchResult
from calibre.gui2.store.web_store_dialog import WebStoreDialog
class EbookscomStore(BasicStoreConfig, StorePlugin):
def open(self, parent=None, detail_item=None, external=False):
m_url = 'http://www.dpbolvw.net/'
h_click = 'click-4913808-10364500'
d_click = 'click-4913808-10281551'
url = m_url + h_click
detail_url = None
if detail_item:
detail_url = m_url + d_click + detail_item
if external or self.config.get('open_external', False):
open_url(QUrl(url_slash_cleaner(detail_url if detail_url else url)))
else:
d = WebStoreDialog(self.gui, url, parent, detail_url)
d.setWindowTitle(self.name)
d.set_tags(self.config.get('tags', ''))
d.exec_()
def search(self, query, max_results=10, timeout=60):
url = 'http://www.ebooks.com/SearchApp/SearchResults.net?term=' + urllib.quote_plus(query)
br = browser()
counter = max_results
with closing(br.open(url, timeout=timeout)) as f:
doc = html.fromstring(f.read())
for data in doc.xpath('//div[@id="results"]//li'):
if counter <= 0:
break
id = ''.join(data.xpath('.//a[1]/@href'))
mo = re.search('\d+', id)
if not mo:
continue
id = mo.group()
cover_url = ''.join(data.xpath('.//div[contains(@class, "img")]//img/@src'))
title = ''.join(data.xpath(
'descendant::span[@class="book-title"]/a/text()')).strip()
author = ', '.join(data.xpath(
'descendant::span[@class="author"]/a/text()')).strip()
if not title or not author:
continue
counter -= 1
s = SearchResult()
s.cover_url = cover_url
s.title = title.strip()
s.author = author.strip()
s.detail_item = '?url=http://www.ebooks.com/cj.asp?IID=' + id.strip() + '&cjsku=' + id.strip()
yield s
def get_details(self, search_result, timeout):
url = 'http://www.ebooks.com/ebooks/book_display.asp?IID='
mo = re.search(r'\?IID=(?P<id>\d+)', search_result.detail_item)
if mo:
id = mo.group('id')
if not id:
return
price = _('Not Available')
br = browser()
with closing(br.open(url + id, timeout=timeout)) as nf:
pdoc = html.fromstring(nf.read())
price_l = pdoc.xpath('//div[@class="book-info"]/div[@class="price"]/text()')
if price_l:
price = price_l[0]
search_result.price = price.strip()
search_result.drm = SearchResult.DRM_UNLOCKED
permissions = ' '.join(pdoc.xpath('//div[@class="permissions-items"]//text()'))
if 'off' in permissions:
search_result.drm = SearchResult.DRM_LOCKED
fdata = pdoc.xpath('//div[contains(@class, "more-links") and contains(@class, "more-links-info")]/div//span/text()')
if len(fdata) > 1:
search_result.formats = ', '.join(fdata[1:])
return True
|
jeanlinux/calibre
|
src/calibre/gui2/store/stores/ebooks_com_plugin.py
|
Python
|
gpl-3.0
| 3,922
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
from frappe.utils import flt
from erpnext.accounts.doctype.journal_entry.test_journal_entry import make_journal_entry
class TestPeriodClosingVoucher(unittest.TestCase):
def test_closing_entry(self):
make_journal_entry("_Test Bank - _TC", "Sales - _TC", 400,
"_Test Cost Center - _TC", submit=True)
make_journal_entry("_Test Account Cost for Goods Sold - _TC",
"_Test Bank - _TC", 600, "_Test Cost Center - _TC", submit=True)
profit_or_loss = frappe.db.sql("""select sum(ifnull(t1.debit,0))-sum(ifnull(t1.credit,0)) as balance
from `tabGL Entry` t1, `tabAccount` t2
where t1.account = t2.name and ifnull(t2.report_type, '') = 'Profit and Loss'
and t2.docstatus < 2 and t2.company = '_Test Company'
and t1.posting_date between '2013-01-01' and '2013-12-31'""")
profit_or_loss = flt(profit_or_loss[0][0]) if profit_or_loss else 0
pcv = self.make_period_closing_voucher()
gle_value = frappe.db.sql("""select ifnull(debit, 0) - ifnull(credit, 0)
from `tabGL Entry` where voucher_type='Period Closing Voucher' and voucher_no=%s
and account = '_Test Account Reserves and Surplus - _TC'""", pcv.name)
gle_value = flt(gle_value[0][0]) if gle_value else 0
self.assertEqual(gle_value, profit_or_loss)
def make_period_closing_voucher(self):
pcv = frappe.get_doc({
"doctype": "Period Closing Voucher",
"closing_account_head": "_Test Account Reserves and Surplus - _TC",
"company": "_Test Company",
"fiscal_year": "_Test Fiscal Year 2013",
"posting_date": "2013-12-31",
"remarks": "test"
})
pcv.insert()
pcv.submit()
return pcv
test_dependencies = ["Customer", "Cost Center"]
test_records = frappe.get_test_records("Period Closing Voucher")
|
meisterkleister/erpnext
|
erpnext/accounts/doctype/period_closing_voucher/test_period_closing_voucher.py
|
Python
|
agpl-3.0
| 1,929
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Creates <source> elements for html5 videos
"""
from invenio.legacy.bibdocfile.api import BibRecDocs
def format_element(bfo, subformat="480p"):
""" Creates HTML5 source elements for the given subformat.
MP4, WebM and OGV are currently supported as video sources.
The function will scan the bibdocfiles attached to the record for
videos with these formats and the fiven subformat.
@param subformat: BibDocFile subformat to create the sources from (e.g. 480p)
"""
video_sources = []
recdoc = BibRecDocs(bfo.recID)
bibdocs = recdoc.list_bibdocs()
for bibdoc in bibdocs:
bibdocfiles = bibdoc.list_all_files()
for bibdocfile in bibdocfiles:
if bibdocfile.get_superformat() in ('.mp4', '.webm', '.ogv') and bibdocfile.get_subformat() == subformat:
src = bibdocfile.get_url()
ftype = bibdocfile.get_superformat()[1:]
if ftype == 'mp4':
codecs = 'avc1.42E01E, mp4a.40.2'
elif ftype == 'webm':
codecs = 'vp8, vorbis'
elif ftype == 'ogv':
codecs = 'theora, vorbis'
source = '<source src=\"%s\" type=\'video/%s; codecs=\"%s\"\' />' % (src, ftype, codecs)
video_sources.append(source)
return "\n".join(video_sources)
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
|
zenodo/invenio
|
invenio/modules/formatter/format_elements/bfe_video_sources.py
|
Python
|
gpl-2.0
| 2,331
|
class SamplePlugin(object):
def __init__(self):
self.__count = 10
|
punalpatel/st2
|
st2common/tests/resources/loadableplugin/plugin/sampleplugin3.py
|
Python
|
apache-2.0
| 78
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import exceptions as kc_exceptions
import six
from heat.common import exception
from heat.common import short_id
from heat.common import template_format
from heat.engine.resources import stack_user
from heat.engine import scheduler
from heat.objects import resource_data as resource_data_object
from heat.tests import common
from heat.tests import fakes
from heat.tests import utils
user_template = '''
heat_template_version: 2013-05-23
resources:
user:
type: StackUserResourceType
'''
class StackUserTest(common.HeatTestCase):
def setUp(self):
super(StackUserTest, self).setUp()
self.fc = fakes.FakeKeystoneClient()
def _user_create(self, stack_name, project_id, user_id,
resource_name='user', create_project=True,
password=None):
t = template_format.parse(user_template)
self.stack = utils.parse_stack(t, stack_name=stack_name)
rsrc = self.stack[resource_name]
self.m.StubOutWithMock(stack_user.StackUser, 'keystone')
stack_user.StackUser.keystone().MultipleTimes().AndReturn(self.fc)
if create_project:
self.m.StubOutWithMock(fakes.FakeKeystoneClient,
'create_stack_domain_project')
fakes.FakeKeystoneClient.create_stack_domain_project(
self.stack.id).AndReturn(project_id)
else:
self.stack.set_stack_user_project_id(project_id)
rsrc._store()
self.m.StubOutWithMock(short_id, 'get_id')
short_id.get_id(rsrc.uuid).AndReturn('aabbcc')
self.m.StubOutWithMock(fakes.FakeKeystoneClient,
'create_stack_domain_user')
expected_username = '%s-%s-%s' % (stack_name, resource_name, 'aabbcc')
fakes.FakeKeystoneClient.create_stack_domain_user(
username=expected_username, password=password,
project_id=project_id).AndReturn(user_id)
return rsrc
def test_handle_create_no_stack_project(self):
rsrc = self._user_create(stack_name='stackuser_crnoprj',
project_id='aproject123',
user_id='auser123')
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
rs_data = resource_data_object.ResourceData.get_all(rsrc)
self.assertEqual({'user_id': 'auser123'}, rs_data)
self.m.VerifyAll()
def test_handle_create_existing_project(self):
rsrc = self._user_create(stack_name='stackuser_crexistprj',
project_id='aproject456',
user_id='auser456',
create_project=False)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
rs_data = resource_data_object.ResourceData.get_all(rsrc)
self.assertEqual({'user_id': 'auser456'}, rs_data)
self.m.VerifyAll()
def test_handle_delete(self):
rsrc = self._user_create(stack_name='stackuser_testdel',
project_id='aprojectdel',
user_id='auserdel')
self.m.StubOutWithMock(fakes.FakeKeystoneClient,
'delete_stack_domain_user')
fakes.FakeKeystoneClient.delete_stack_domain_user(
user_id='auserdel', project_id='aprojectdel').AndReturn(None)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_handle_delete_not_found(self):
rsrc = self._user_create(stack_name='stackuser_testdel_notfound',
project_id='aprojectdel2',
user_id='auserdel2')
self.m.StubOutWithMock(fakes.FakeKeystoneClient,
'delete_stack_domain_user')
fakes.FakeKeystoneClient.delete_stack_domain_user(
user_id='auserdel2', project_id='aprojectdel2').AndRaise(
kc_exceptions.NotFound)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_handle_delete_noid(self):
rsrc = self._user_create(stack_name='stackuser_testdel_noid',
project_id='aprojectdel2',
user_id='auserdel2')
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
resource_data_object.ResourceData.delete(rsrc, 'user_id')
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_handle_suspend(self):
rsrc = self._user_create(stack_name='stackuser_testsusp',
project_id='aprojectdel',
user_id='auserdel')
self.m.StubOutWithMock(fakes.FakeKeystoneClient,
'disable_stack_domain_user')
fakes.FakeKeystoneClient.disable_stack_domain_user(
user_id='auserdel', project_id='aprojectdel').AndReturn(None)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
scheduler.TaskRunner(rsrc.suspend)()
self.assertEqual((rsrc.SUSPEND, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_handle_suspend_legacy(self):
rsrc = self._user_create(stack_name='stackuser_testsusp_lgcy',
project_id='aprojectdel',
user_id='auserdel')
self.m.StubOutWithMock(fakes.FakeKeystoneClient,
'disable_stack_domain_user')
fakes.FakeKeystoneClient.disable_stack_domain_user(
user_id='auserdel', project_id='aprojectdel').AndRaise(ValueError)
self.m.StubOutWithMock(fakes.FakeKeystoneClient,
'disable_stack_user')
fakes.FakeKeystoneClient.disable_stack_user(
user_id='auserdel').AndReturn(None)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
scheduler.TaskRunner(rsrc.suspend)()
self.assertEqual((rsrc.SUSPEND, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_handle_resume(self):
rsrc = self._user_create(stack_name='stackuser_testresume',
project_id='aprojectdel',
user_id='auserdel')
self.m.StubOutWithMock(fakes.FakeKeystoneClient,
'enable_stack_domain_user')
fakes.FakeKeystoneClient.enable_stack_domain_user(
user_id='auserdel', project_id='aprojectdel').AndReturn(None)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
rsrc.state_set(rsrc.SUSPEND, rsrc.COMPLETE)
scheduler.TaskRunner(rsrc.resume)()
self.assertEqual((rsrc.RESUME, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_handle_resume_legacy(self):
rsrc = self._user_create(stack_name='stackuser_testresume_lgcy',
project_id='aprojectdel',
user_id='auserdel')
self.m.StubOutWithMock(fakes.FakeKeystoneClient,
'enable_stack_domain_user')
fakes.FakeKeystoneClient.enable_stack_domain_user(
user_id='auserdel', project_id='aprojectdel').AndRaise(ValueError)
self.m.StubOutWithMock(fakes.FakeKeystoneClient,
'enable_stack_user')
fakes.FakeKeystoneClient.enable_stack_user(
user_id='auserdel').AndReturn(None)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
rsrc.state_set(rsrc.SUSPEND, rsrc.COMPLETE)
scheduler.TaskRunner(rsrc.resume)()
self.assertEqual((rsrc.RESUME, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_create_keypair(self):
rsrc = self._user_create(stack_name='stackuser_test_cr_keypair',
project_id='aprojectdel',
user_id='auserdel')
# create_stack_domain_user_keypair(self, user_id, project_id):
self.m.StubOutWithMock(fakes.FakeKeystoneClient,
'create_stack_domain_user_keypair')
fakes.FakeKeystoneClient.create_stack_domain_user_keypair(
user_id='auserdel', project_id='aprojectdel').AndReturn(
self.fc.creds)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
kp = rsrc._create_keypair()
self.assertEqual(self.fc.credential_id, kp.id)
self.assertEqual(self.fc.access, kp.access)
self.assertEqual(self.fc.secret, kp.secret)
rs_data = resource_data_object.ResourceData.get_all(rsrc)
self.assertEqual(self.fc.credential_id, rs_data['credential_id'])
self.assertEqual(self.fc.access, rs_data['access_key'])
self.assertEqual(self.fc.secret, rs_data['secret_key'])
self.m.VerifyAll()
def test_create_keypair_error(self):
rsrc = self._user_create(stack_name='stackuser_test_cr_keypair_err',
project_id='aprojectdel',
user_id='auserdel')
# create_stack_domain_user_keypair(self, user_id, project_id):
self.m.StubOutWithMock(fakes.FakeKeystoneClient,
'create_stack_domain_user_keypair')
fakes.FakeKeystoneClient.create_stack_domain_user_keypair(
user_id='auserdel', project_id='aprojectdel').AndReturn(None)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.assertRaises(exception.Error, rsrc._create_keypair)
self.m.VerifyAll()
def test_delete_keypair(self):
rsrc = self._user_create(stack_name='stackuser_testdel_keypair',
project_id='aprojectdel',
user_id='auserdel')
self.m.StubOutWithMock(fakes.FakeKeystoneClient,
'delete_stack_domain_user_keypair')
fakes.FakeKeystoneClient.delete_stack_domain_user_keypair(
user_id='auserdel', project_id='aprojectdel',
credential_id='acredential').AndReturn(None)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
rsrc.data_set('credential_id', 'acredential')
rsrc.data_set('access_key', 'access123')
rsrc.data_set('secret_key', 'verysecret')
rsrc._delete_keypair()
rs_data = resource_data_object.ResourceData.get_all(rsrc)
self.assertEqual({'user_id': 'auserdel'}, rs_data)
self.m.VerifyAll()
def test_delete_keypair_no_credential_id(self):
rsrc = self._user_create(stack_name='stackuser_del_keypair_nocrdid',
project_id='aprojectdel',
user_id='auserdel')
rsrc._delete_keypair()
def test_delete_keypair_legacy(self):
rsrc = self._user_create(stack_name='stackuser_testdel_keypair_lgcy',
project_id='aprojectdel',
user_id='auserdel')
self.m.StubOutWithMock(fakes.FakeKeystoneClient,
'delete_stack_domain_user_keypair')
fakes.FakeKeystoneClient.delete_stack_domain_user_keypair(
user_id='auserdel', project_id='aprojectdel',
credential_id='acredential').AndRaise(ValueError())
self.m.StubOutWithMock(fakes.FakeKeystoneClient,
'delete_ec2_keypair')
fakes.FakeKeystoneClient.delete_ec2_keypair(
user_id='auserdel', credential_id='acredential').AndReturn(None)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
rsrc.data_set('credential_id', 'acredential')
rsrc.data_set('access_key', 'access123')
rsrc.data_set('secret_key', 'verysecret')
rsrc._delete_keypair()
rs_data = resource_data_object.ResourceData.get_all(rsrc)
self.assertEqual({'user_id': 'auserdel'}, rs_data)
self.m.VerifyAll()
def test_delete_keypair_notfound(self):
rsrc = self._user_create(stack_name='stackuser_testdel_kpr_notfound',
project_id='aprojectdel',
user_id='auserdel')
self.m.StubOutWithMock(fakes.FakeKeystoneClient,
'delete_stack_domain_user_keypair')
fakes.FakeKeystoneClient.delete_stack_domain_user_keypair(
user_id='auserdel', project_id='aprojectdel',
credential_id='acredential').AndReturn(None)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
rsrc.data_set('credential_id', 'acredential')
rsrc._delete_keypair()
rs_data = resource_data_object.ResourceData.get_all(rsrc)
self.assertEqual({'user_id': 'auserdel'}, rs_data)
self.m.VerifyAll()
def test_user_token(self):
rsrc = self._user_create(stack_name='stackuser_testtoken',
project_id='aproject123',
user_id='aabbcc',
password='apassword')
self.m.StubOutWithMock(fakes.FakeKeystoneClient,
'stack_domain_user_token')
fakes.FakeKeystoneClient.stack_domain_user_token(
user_id='aabbcc', project_id='aproject123',
password='apassword').AndReturn('atoken123')
self.m.ReplayAll()
rsrc.password = 'apassword'
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.assertEqual('atoken123', rsrc._user_token())
self.m.VerifyAll()
def test_user_token_err_nopassword(self):
rsrc = self._user_create(stack_name='stackuser_testtoken_err_nopwd',
project_id='aproject123',
user_id='auser123')
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
ex = self.assertRaises(ValueError, rsrc._user_token)
expected = "Can't get user token without password"
self.assertEqual(expected, six.text_type(ex))
self.m.VerifyAll()
def test_user_token_err_noproject(self):
stack_name = 'user_token_err_noprohect_stack'
resource_name = 'user'
t = template_format.parse(user_template)
stack = utils.parse_stack(t, stack_name=stack_name)
rsrc = stack[resource_name]
ex = self.assertRaises(ValueError, rsrc._user_token)
expected = "Can't get user token, user not yet created"
self.assertEqual(expected, six.text_type(ex))
|
dragorosson/heat
|
heat/tests/test_stack_user.py
|
Python
|
apache-2.0
| 16,620
|
class Base:
def m(self, x):
pass
class Sub(Base):
def m<warning descr="Signature of method 'Sub.m()' does not match signature of base method in class 'Base'">(<caret>self)</warning>:
pass
|
goodwinnk/intellij-community
|
python/testData/inspections/AddKwargsToIncompatibleOverridingMethod.py
|
Python
|
apache-2.0
| 221
|
from .vartypes import *
from .model import *
from .theanof import *
from .blocking import *
import numpy as np
|
superbobry/pymc3
|
pymc3/core.py
|
Python
|
apache-2.0
| 111
|
from ..broker import Broker
class IfTrunkBroker(Broker):
controller = "if_trunks"
def show(self, **kwargs):
"""Shows the details for the specified if trunk.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param InterfaceID: The internal NetMRI identifier for the interface to which this trunk configuration applies.
:type InterfaceID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of if trunk methods. The listed methods will be called on each if trunk returned and included in the output. Available methods are: device, interface, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device, interface.
:type include: Array of String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return if_trunk: The if trunk identified by the specified InterfaceID.
:rtype if_trunk: IfTrunk
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def index(self, **kwargs):
"""Lists the available if trunks. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which this record was collected.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which this record was collected.
:type DeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param InterfaceID: The internal NetMRI identifier for the interface to which this trunk configuration applies.
:type InterfaceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param InterfaceID: The internal NetMRI identifier for the interface to which this trunk configuration applies.
:type InterfaceID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the if trunks as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of if trunk methods. The listed methods will be called on each if trunk returned and included in the output. Available methods are: device, interface, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device, interface.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` InterfaceID
:param sort: The data field(s) to use for sorting the output. Default is InterfaceID. Valid values are DataSourceID, DeviceID, InterfaceID, ifIndex, TrunkTimestamp, TrunkStartTime, TrunkEndTime, TrunkChangedCols, TrunkEncapsulationType, TrunkNativeVlanIndex, TrunkNativeVlanID, TrunkState, TrunkStatus.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each IfTrunk. Valid values are DataSourceID, DeviceID, InterfaceID, ifIndex, TrunkTimestamp, TrunkStartTime, TrunkEndTime, TrunkChangedCols, TrunkEncapsulationType, TrunkNativeVlanIndex, TrunkNativeVlanID, TrunkState, TrunkStatus. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return if_trunks: An array of the IfTrunk objects that match the specified input criteria.
:rtype if_trunks: Array of IfTrunk
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available if trunks matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which this record was collected.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the device from which this record was collected.
:type DeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param InterfaceID: The internal NetMRI identifier for the interface to which this trunk configuration applies.
:type InterfaceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param InterfaceID: The internal NetMRI identifier for the interface to which this trunk configuration applies.
:type InterfaceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param TrunkChangedCols: The fields that changed between this revision of the record and the previous revision.
:type TrunkChangedCols: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param TrunkChangedCols: The fields that changed between this revision of the record and the previous revision.
:type TrunkChangedCols: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param TrunkEncapsulationType: The trunking protocol for this trunk port.
:type TrunkEncapsulationType: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param TrunkEncapsulationType: The trunking protocol for this trunk port.
:type TrunkEncapsulationType: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param TrunkEndTime: The ending effective time of this record, or empty if still in effect.
:type TrunkEndTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param TrunkEndTime: The ending effective time of this record, or empty if still in effect.
:type TrunkEndTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param TrunkNativeVlanID: The internal NetMRI identifier for the Native VLAN for this interface.
:type TrunkNativeVlanID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param TrunkNativeVlanID: The internal NetMRI identifier for the Native VLAN for this interface.
:type TrunkNativeVlanID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param TrunkNativeVlanIndex: The VLAN number for the Native VLAN for this interface.
:type TrunkNativeVlanIndex: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param TrunkNativeVlanIndex: The VLAN number for the Native VLAN for this interface.
:type TrunkNativeVlanIndex: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param TrunkStartTime: The starting effective time of this record.
:type TrunkStartTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param TrunkStartTime: The starting effective time of this record.
:type TrunkStartTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param TrunkState: The configured trunk state.
:type TrunkState: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param TrunkState: The configured trunk state.
:type TrunkState: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param TrunkStatus: The operational trunk status.
:type TrunkStatus: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param TrunkStatus: The operational trunk status.
:type TrunkStatus: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param TrunkTimestamp: The date and time this record was collected or calculated.
:type TrunkTimestamp: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param TrunkTimestamp: The date and time this record was collected or calculated.
:type TrunkTimestamp: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ifIndex: The SNMP interface index for the interface to which this trunk configuration applies.
:type ifIndex: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ifIndex: The SNMP interface index for the interface to which this trunk configuration applies.
:type ifIndex: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the if trunks as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of if trunk methods. The listed methods will be called on each if trunk returned and included in the output. Available methods are: device, interface, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device, interface.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` InterfaceID
:param sort: The data field(s) to use for sorting the output. Default is InterfaceID. Valid values are DataSourceID, DeviceID, InterfaceID, ifIndex, TrunkTimestamp, TrunkStartTime, TrunkEndTime, TrunkChangedCols, TrunkEncapsulationType, TrunkNativeVlanIndex, TrunkNativeVlanID, TrunkState, TrunkStatus.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each IfTrunk. Valid values are DataSourceID, DeviceID, InterfaceID, ifIndex, TrunkTimestamp, TrunkStartTime, TrunkEndTime, TrunkChangedCols, TrunkEncapsulationType, TrunkNativeVlanIndex, TrunkNativeVlanID, TrunkState, TrunkStatus. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against if trunks, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: DataSourceID, DeviceID, InterfaceID, TrunkChangedCols, TrunkEncapsulationType, TrunkEndTime, TrunkNativeVlanID, TrunkNativeVlanIndex, TrunkStartTime, TrunkState, TrunkStatus, TrunkTimestamp, ifIndex.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return if_trunks: An array of the IfTrunk objects that match the specified input criteria.
:rtype if_trunks: Array of IfTrunk
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available if trunks matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: DataSourceID, DeviceID, InterfaceID, TrunkChangedCols, TrunkEncapsulationType, TrunkEndTime, TrunkNativeVlanID, TrunkNativeVlanIndex, TrunkStartTime, TrunkState, TrunkStatus, TrunkTimestamp, ifIndex.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DataSourceID: The operator to apply to the field DataSourceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DataSourceID: If op_DataSourceID is specified, the field named in this input will be compared to the value in DataSourceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DataSourceID must be specified if op_DataSourceID is specified.
:type val_f_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DataSourceID: If op_DataSourceID is specified, this value will be compared to the value in DataSourceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DataSourceID must be specified if op_DataSourceID is specified.
:type val_c_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceID: The operator to apply to the field DeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceID: The internal NetMRI identifier for the device from which this record was collected. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceID: If op_DeviceID is specified, the field named in this input will be compared to the value in DeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceID must be specified if op_DeviceID is specified.
:type val_f_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceID: If op_DeviceID is specified, this value will be compared to the value in DeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceID must be specified if op_DeviceID is specified.
:type val_c_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_InterfaceID: The operator to apply to the field InterfaceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. InterfaceID: The internal NetMRI identifier for the interface to which this trunk configuration applies. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_InterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_InterfaceID: If op_InterfaceID is specified, the field named in this input will be compared to the value in InterfaceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_InterfaceID must be specified if op_InterfaceID is specified.
:type val_f_InterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_InterfaceID: If op_InterfaceID is specified, this value will be compared to the value in InterfaceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_InterfaceID must be specified if op_InterfaceID is specified.
:type val_c_InterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_TrunkChangedCols: The operator to apply to the field TrunkChangedCols. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. TrunkChangedCols: The fields that changed between this revision of the record and the previous revision. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_TrunkChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_TrunkChangedCols: If op_TrunkChangedCols is specified, the field named in this input will be compared to the value in TrunkChangedCols using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_TrunkChangedCols must be specified if op_TrunkChangedCols is specified.
:type val_f_TrunkChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_TrunkChangedCols: If op_TrunkChangedCols is specified, this value will be compared to the value in TrunkChangedCols using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_TrunkChangedCols must be specified if op_TrunkChangedCols is specified.
:type val_c_TrunkChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_TrunkEncapsulationType: The operator to apply to the field TrunkEncapsulationType. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. TrunkEncapsulationType: The trunking protocol for this trunk port. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_TrunkEncapsulationType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_TrunkEncapsulationType: If op_TrunkEncapsulationType is specified, the field named in this input will be compared to the value in TrunkEncapsulationType using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_TrunkEncapsulationType must be specified if op_TrunkEncapsulationType is specified.
:type val_f_TrunkEncapsulationType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_TrunkEncapsulationType: If op_TrunkEncapsulationType is specified, this value will be compared to the value in TrunkEncapsulationType using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_TrunkEncapsulationType must be specified if op_TrunkEncapsulationType is specified.
:type val_c_TrunkEncapsulationType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_TrunkEndTime: The operator to apply to the field TrunkEndTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. TrunkEndTime: The ending effective time of this record, or empty if still in effect. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_TrunkEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_TrunkEndTime: If op_TrunkEndTime is specified, the field named in this input will be compared to the value in TrunkEndTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_TrunkEndTime must be specified if op_TrunkEndTime is specified.
:type val_f_TrunkEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_TrunkEndTime: If op_TrunkEndTime is specified, this value will be compared to the value in TrunkEndTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_TrunkEndTime must be specified if op_TrunkEndTime is specified.
:type val_c_TrunkEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_TrunkNativeVlanID: The operator to apply to the field TrunkNativeVlanID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. TrunkNativeVlanID: The internal NetMRI identifier for the Native VLAN for this interface. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_TrunkNativeVlanID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_TrunkNativeVlanID: If op_TrunkNativeVlanID is specified, the field named in this input will be compared to the value in TrunkNativeVlanID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_TrunkNativeVlanID must be specified if op_TrunkNativeVlanID is specified.
:type val_f_TrunkNativeVlanID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_TrunkNativeVlanID: If op_TrunkNativeVlanID is specified, this value will be compared to the value in TrunkNativeVlanID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_TrunkNativeVlanID must be specified if op_TrunkNativeVlanID is specified.
:type val_c_TrunkNativeVlanID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_TrunkNativeVlanIndex: The operator to apply to the field TrunkNativeVlanIndex. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. TrunkNativeVlanIndex: The VLAN number for the Native VLAN for this interface. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_TrunkNativeVlanIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_TrunkNativeVlanIndex: If op_TrunkNativeVlanIndex is specified, the field named in this input will be compared to the value in TrunkNativeVlanIndex using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_TrunkNativeVlanIndex must be specified if op_TrunkNativeVlanIndex is specified.
:type val_f_TrunkNativeVlanIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_TrunkNativeVlanIndex: If op_TrunkNativeVlanIndex is specified, this value will be compared to the value in TrunkNativeVlanIndex using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_TrunkNativeVlanIndex must be specified if op_TrunkNativeVlanIndex is specified.
:type val_c_TrunkNativeVlanIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_TrunkStartTime: The operator to apply to the field TrunkStartTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. TrunkStartTime: The starting effective time of this record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_TrunkStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_TrunkStartTime: If op_TrunkStartTime is specified, the field named in this input will be compared to the value in TrunkStartTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_TrunkStartTime must be specified if op_TrunkStartTime is specified.
:type val_f_TrunkStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_TrunkStartTime: If op_TrunkStartTime is specified, this value will be compared to the value in TrunkStartTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_TrunkStartTime must be specified if op_TrunkStartTime is specified.
:type val_c_TrunkStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_TrunkState: The operator to apply to the field TrunkState. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. TrunkState: The configured trunk state. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_TrunkState: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_TrunkState: If op_TrunkState is specified, the field named in this input will be compared to the value in TrunkState using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_TrunkState must be specified if op_TrunkState is specified.
:type val_f_TrunkState: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_TrunkState: If op_TrunkState is specified, this value will be compared to the value in TrunkState using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_TrunkState must be specified if op_TrunkState is specified.
:type val_c_TrunkState: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_TrunkStatus: The operator to apply to the field TrunkStatus. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. TrunkStatus: The operational trunk status. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_TrunkStatus: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_TrunkStatus: If op_TrunkStatus is specified, the field named in this input will be compared to the value in TrunkStatus using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_TrunkStatus must be specified if op_TrunkStatus is specified.
:type val_f_TrunkStatus: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_TrunkStatus: If op_TrunkStatus is specified, this value will be compared to the value in TrunkStatus using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_TrunkStatus must be specified if op_TrunkStatus is specified.
:type val_c_TrunkStatus: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_TrunkTimestamp: The operator to apply to the field TrunkTimestamp. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. TrunkTimestamp: The date and time this record was collected or calculated. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_TrunkTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_TrunkTimestamp: If op_TrunkTimestamp is specified, the field named in this input will be compared to the value in TrunkTimestamp using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_TrunkTimestamp must be specified if op_TrunkTimestamp is specified.
:type val_f_TrunkTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_TrunkTimestamp: If op_TrunkTimestamp is specified, this value will be compared to the value in TrunkTimestamp using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_TrunkTimestamp must be specified if op_TrunkTimestamp is specified.
:type val_c_TrunkTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ifIndex: The operator to apply to the field ifIndex. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ifIndex: The SNMP interface index for the interface to which this trunk configuration applies. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ifIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ifIndex: If op_ifIndex is specified, the field named in this input will be compared to the value in ifIndex using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ifIndex must be specified if op_ifIndex is specified.
:type val_f_ifIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ifIndex: If op_ifIndex is specified, this value will be compared to the value in ifIndex using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ifIndex must be specified if op_ifIndex is specified.
:type val_c_ifIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the if trunks as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of if trunk methods. The listed methods will be called on each if trunk returned and included in the output. Available methods are: device, interface, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device, interface.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` InterfaceID
:param sort: The data field(s) to use for sorting the output. Default is InterfaceID. Valid values are DataSourceID, DeviceID, InterfaceID, ifIndex, TrunkTimestamp, TrunkStartTime, TrunkEndTime, TrunkChangedCols, TrunkEncapsulationType, TrunkNativeVlanIndex, TrunkNativeVlanID, TrunkState, TrunkStatus.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each IfTrunk. Valid values are DataSourceID, DeviceID, InterfaceID, ifIndex, TrunkTimestamp, TrunkStartTime, TrunkEndTime, TrunkChangedCols, TrunkEncapsulationType, TrunkNativeVlanIndex, TrunkNativeVlanID, TrunkState, TrunkStatus. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return if_trunks: An array of the IfTrunk objects that match the specified input criteria.
:rtype if_trunks: Array of IfTrunk
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
|
infobloxopen/infoblox-netmri
|
infoblox_netmri/api/broker/v3_8_0/if_trunk_broker.py
|
Python
|
apache-2.0
| 50,853
|