code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
from __future__ import division, absolute_import, print_function
import numpy as np
from . import format
import sys
import os
import re
import sys
import itertools
import warnings
import weakref
from operator import itemgetter
from ._datasource import DataSource
from ._compiled_base import packbits, unpackbits
from ._iotools import (
LineSplitter, NameValidator, StringConverter,
ConverterError, ConverterLockError, ConversionWarning,
_is_string_like, has_nested_fields, flatten_dtype,
easy_dtype, _bytes_to_name
)
from numpy.compat import (
asbytes, asstr, asbytes_nested, bytes, basestring, unicode
)
if sys.version_info[0] >= 3:
import pickle
else:
import cPickle as pickle
from future_builtins import map
loads = pickle.loads
__all__ = ['savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource']
_string_like = _is_string_like
def seek_gzip_factory(f):
"""Use this factory to produce the class so that we can do a lazy
import on gzip.
"""
import gzip
class GzipFile(gzip.GzipFile):
def seek(self, offset, whence=0):
# figure out new position (we can only seek forwards)
if whence == 1:
offset = self.offset + offset
if whence not in [0, 1]:
raise IOError("Illegal argument")
if offset < self.offset:
# for negative seek, rewind and do positive seek
self.rewind()
count = offset - self.offset
for i in range(count // 1024):
self.read(1024)
self.read(count % 1024)
def tell(self):
return self.offset
if isinstance(f, str):
f = GzipFile(f)
elif isinstance(f, gzip.GzipFile):
# cast to our GzipFile if its already a gzip.GzipFile
try:
name = f.name
except AttributeError:
# Backward compatibility for <= 2.5
name = f.filename
mode = f.mode
f = GzipFile(fileobj=f.fileobj, filename=name)
f.mode = mode
return f
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key)
def zipfile_factory(*args, **kwargs):
import zipfile
kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ".npy" extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ".npy" extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ".npy" extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None # break reference cycle
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.open(key)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
if magic == format.MAGIC_PREFIX:
bytes = self.zip.open(key)
return format.read_array(bytes)
else:
return self.zip.read(key)
else:
raise KeyError("%s is not a file in the archive" % key)
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ".npy" extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None):
"""
Load an array(s) or pickled objects from .npy, .npz, or pickled files.
Parameters
----------
file : file-like object or string
The file to read. It must support ``seek()`` and ``read()`` methods.
If the filename extension is ``.gz``, the file is first decompressed.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode
(see `numpy.memmap` for a detailed description of the modes).
A memory-mapped array is kept on disk. However, it can be accessed
and sliced like any ndarray. Memory mapping is especially useful for
accessing small fragments of large files without reading the entire
file into memory.
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file. For '.npz' files, the returned instance of
NpzFile class must be closed to avoid leaking file descriptors.
Raises
------
IOError
If the input file does not exist or cannot be read.
See Also
--------
save, savez, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever object is stored
in the pickle is returned.
- If the file is a ``.npy`` file, then a single array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
- If the file is a ``.npz`` file, the returned value supports the context
manager protocol in a similar fashion to the open function::
with load('foo.npz') as data:
a = data['a']
The underlyling file descriptor is closed when exiting the 'with' block.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Store compressed data to disk, and load it again:
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
>>> b=np.array([1, 2])
>>> np.savez('/tmp/123.npz', a=a, b=b)
>>> data = np.load('/tmp/123.npz')
>>> data['a']
array([[1, 2, 3],
[4, 5, 6]])
>>> data['b']
array([1, 2])
>>> data.close()
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
import gzip
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
elif isinstance(file, gzip.GzipFile):
fid = seek_gzip_factory(file)
else:
fid = file
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
if magic.startswith(_ZIP_PREFIX):
# zip-file (assume .npz)
# Transfer file ownership to NpzFile
tmp = own_fid
own_fid = False
return NpzFile(fid, own_fid=tmp)
elif magic == format.MAGIC_PREFIX:
# .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid)
else:
# Try a pickle
try:
return pickle.load(fid)
except:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
finally:
if own_fid:
fid.close()
def save(file, arr):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file or str
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string, a ``.npy``
extension will be appended to the file name if it does not already
have one.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see `format`.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
else:
fid = file
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the .npz file, are 'arr_0', 'arr_1', etc. If keyword arguments
are given, the corresponding variable names, in the ``.npz`` file will
match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed .npz file format
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : str
File name of .npz file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed .npz file format
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError("Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zip = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.items():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val))
fid.close()
fid = None
zip.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zip.close()
# Adapted from matplotlib
def _getconv(dtype):
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.floating):
return float
elif issubclass(typ, np.complex):
return complex
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
record data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str, optional
The character used to indicate the start of a comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data (but see also `genfromtxt`):
``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a record
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
Examples
--------
>>> from StringIO import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
comments = asbytes(comments)
user_converters = converters
if delimiter is not None:
delimiter = asbytes(delimiter)
if usecols is not None:
usecols = list(usecols)
fown = False
try:
if _is_string_like(fname):
fown = True
if fname.endswith('.gz'):
fh = iter(seek_gzip_factory(fname))
elif fname.endswith('.bz2'):
import bz2
fh = iter(bz2.BZ2File(fname))
else:
fh = iter(open(fname, 'U'))
else:
fh = iter(fname)
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim*packing[0][0], packing*dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = flatten_dtype(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if len(tp.shape) > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
def pack_items(items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing == None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(pack_items(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter."""
line = asbytes(line).split(comments)[0].strip(asbytes('\r\n'))
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in range(skiprows):
next(fh)
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = next(fh)
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in range(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).items():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
finally:
if fown:
fh.close()
X = np.array(X, dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if not ndmin in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# '):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
b) a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4j %.4e %+.4j %.4e %+.4j'` for 3 columns
c) a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
Character separating columns.
newline : str, optional
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
Character separating lines.
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into a ``.npz`` compressed archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to preceed result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'write'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif isinstance(fmt, str):
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt),] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
else:
raise ValueError('invalid fmt: %r' % (fmt,))
if len(header) > 0:
header = header.replace('\n', '\n' + comments)
fh.write(asbytes(comments + header + newline))
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
fh.write(asbytes(format % tuple(row2) + newline))
else:
for row in X:
fh.write(asbytes(format % tuple(row) + newline))
if len(footer) > 0:
footer = footer.replace('\n', '\n' + comments)
fh.write(asbytes(comments + footer + newline))
finally:
if own_fh:
fh.close()
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skiprows=0, skip_header=0, skip_footer=0, converters=None,
missing='', missing_values=None, filling_values=None,
usecols=None, names=None,
excludelist=None, deletechars=None, replace_space='_',
autostrip=False, case_sensitive=True, defaultfmt="f%i",
unpack=None, usemask=False, loose=True, invalid_raise=True):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
`.gz` or `.bz2`, the file is first decompressed. Note that
generators must return byte strings in Python 3k.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skip_header : int, optional
The numbers of lines to skip at the beginning of the file.
skip_footer : int, optional
The numbers of lines to skip at the end of the file
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skip_header` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables
names. By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
References
----------
.. [1] Numpy User Guide, section `I/O with Numpy
<http://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
Examples
---------
>>> from StringIO import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
# Py3 data conversions to bytes, for convenience
if comments is not None:
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing, unicode):
missing = asbytes(missing)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
errmsg = "The input argument 'converter' should be a valid dictionary "\
"(got '%s' instead)"
raise TypeError(errmsg % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
try:
if isinstance(fname, basestring):
fhd = iter(np.lib._datasource.open(fname, 'rbU'))
own_fhd = True
else:
fhd = iter(fname)
except TypeError:
raise TypeError("fname mustbe a string, filehandle, or generator. "\
"(got %s instead)" % type(fname))
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Get the first valid lines after the first skiprows ones ..
if skiprows:
warnings.warn(\
"The use of `skiprows` is deprecated, it will be removed in numpy 2.0.\n" \
"Please use `skip_header` instead.",
DeprecationWarning)
skip_header = skiprows
# Skip the first `skip_header` rows
for i in range(skip_header):
next(fhd)
# Keep on until we find the first valid values
first_values = None
try:
while not first_values:
first_line = next(fhd)
if names is True:
if comments in first_line:
first_line = asbytes('').join(first_line.split(comments)[1:])
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
first_line = asbytes('')
first_values = []
warnings.warn('genfromtxt: Empty input file: "%s"' % fname)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped, then
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the deprecated `missing`
if missing != asbytes(''):
warnings.warn(\
"The use of `missing` is deprecated, it will be removed in Numpy 2.0.\n" \
"Please use `missing_values` instead.",
DeprecationWarning)
values = [str(_) for _ in missing.split(asbytes(","))]
for entry in missing_values:
entry.extend(values)
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values or []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped, then
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (i, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(i):
try:
i = names.index(i)
except ValueError:
continue
elif usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
# Find the value to test:
if len(first_line):
testing_value = first_values[i]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
# Select only the columns we need
if usecols:
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values, missing_values)]))
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = [itemgetter(i)(_m) for _m in rows]
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = map(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
#
# if loose:
# conversionfuncs = [conv._loose_call for conv in converters]
# else:
# conversionfuncs = [conv._strict_call for conv in converters]
# for (i, vals) in enumerate(rows):
# rows[i] = tuple([convert(val)
# for (convert, val) in zip(conversionfuncs, vals)])
if loose:
rows = list(zip(*[[converter._loose_call(_r) for _r in map(itemgetter(i), rows)]
for (i, converter) in enumerate(converters)]))
else:
rows = list(zip(*[[converter._strict_call(_r) for _r in map(itemgetter(i), rows)]
for (i, converter) in enumerate(converters)]))
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = list(zip(names, column_types))
mdtype = list(zip(names, [np.bool] * len(column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
errmsg = "Nested fields involving objects "\
"are not supported..."
raise NotImplementedError(errmsg)
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(masks,
dtype=np.dtype([('', np.bool)
for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for (i, ttype) in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.update(dtype=kwargs.get('dtype', None))
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
case_sensitive = kwargs.get('case_sensitive', "lower") or "lower"
names = kwargs.get('names', True)
if names is None:
names = True
kwargs.update(dtype=kwargs.get('update', None),
delimiter=kwargs.get('delimiter', ",") or ",",
names=names,
case_sensitive=case_sensitive)
usemask = kwargs.get("usemask", False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
|
alephu5/Soundbyte
|
environment/lib/python3.3/site-packages/numpy/lib/npyio.py
|
Python
|
gpl-3.0
| 65,706
|
# igc2kmz color functions
# Copyright (C) 2008 Tom Payne
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
def hsl_to_rgba(hsl, a=1.0):
"""Convert a HSL tuple into a RGBA tuple."""
def h_to_value(p, q, t):
if t < 0.0:
t += 1.0
elif 1.0 < t:
t -= 1.0
if t < 1.0 / 6.0:
return p + 6.0 * (q - p) * t
elif t < 0.5:
return q
elif t < 2.0 / 3.0:
return p + 6.0 * (q - p) * (2.0 / 3.0 - t)
else:
return p
h, s, l = hsl
if s == 0:
return (l, l, l, a)
if l < 0.5:
q = l * (s + 1.0)
else:
q = l + s - l * s
p = 2.0 * l - q
r = h_to_value(p, q, h + 1.0 / 3.0)
g = h_to_value(p, q, h)
b = h_to_value(p, q, h - 1.0 / 3.0)
return (r, g, b, a)
def hsv_to_rgb(hsv):
"""Convert a HSV tuple into a RGBA tuple."""
h, s, v = hsv
hi = int(h)
f = h - hi
p = v * (1.0 - f)
q = v * (1.0 - f * s)
t = v * (1.0 - (1.0 - f) * s)
if hi == 0:
return (v, t, p)
elif hi == 1:
return (q, v, p)
elif hi == 2:
return (p, v, t)
elif hi == 3:
return (p, q, v)
elif hi == 4:
return (t, p, v)
else:
return (v, p, q)
def grayscale_gradient(value):
"""Return a gradient from black to white."""
if value < 0.0:
return (0.0, 0.0, 0.0, 1.0)
elif 1.0 <= value:
return (1.0, 1.0, 1.0, 1.0)
else:
return (value, value, value, 1.0)
def default_gradient(value):
"""Return a gradient from blue to green to red."""
if value < 0.0:
return hsl_to_rgba((2.0 / 3.0, 1.0, 0.5))
elif 1.0 <= value:
return hsl_to_rgba((0.0, 1.0, 0.5))
else:
h = 2.0 * (1.0 - value) / 3.0
return hsl_to_rgba((h, 1.0, 0.5))
def bilinear_gradient(value):
"""Return a bilinear gradient from blue to green to red."""
if value < 0.0:
h = 2.0 / 3.0
elif value < 0.5:
h = (6.0 - 4.0 * value) / 9.0
elif value == 0.5:
h = 1.0 / 3.0
elif value < 1.0:
h = (4.0 - 4.0 * value) / 9.0
else:
h = 0.0
return hsl_to_rgba((h, 1.0, 0.5))
|
twpayne/igc2kmz
|
igc2kmz/color.py
|
Python
|
gpl-3.0
| 2,810
|
#
# Copyright (c) 2014 Sylvain Peyrefitte
#
# This file is part of rdpy.
#
# rdpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
unit test for rdpy.protocol.rdp.tpkt module
"""
import os, sys
# Change path so we find rdpy
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import unittest
import rdpy.protocol.rdp.tpkt as tpkt
import rdpy.core.type as type
import rdpy.core.error as error
class TPKTTest(unittest.TestCase):
"""
@summary: test case for tpkt layer (RDP)
"""
class TPKT_PASS(Exception):
pass
class TPKT_FAIL(Exception):
pass
def test_tpkt_layer_connect(self):
"""
@summary: test forward connect event to presentation layer
"""
class Presentation(object):
def connect(self):
raise TPKTTest.TPKT_PASS()
layer = tpkt.TPKT(Presentation())
self.assertRaises(TPKTTest.TPKT_PASS, layer.connect)
def test_tpkt_layer_recv(self):
"""
@summary: test receive in classic case
"""
class Presentation(object):
def connect(self):
pass
def recv(self, data):
data.readType(type.String("test_tpkt_layer_recv", constant = True))
raise TPKTTest.TPKT_PASS()
message = type.String("test_tpkt_layer_recv")
s = type.Stream()
s.writeType((type.UInt8(tpkt.Action.FASTPATH_ACTION_X224), type.UInt8(), type.UInt16Be(type.sizeof(message) + 4), message))
layer = tpkt.TPKT(Presentation())
layer.connect()
self.assertRaises(TPKTTest.TPKT_PASS, layer.dataReceived, s.getvalue())
def test_tpkt_layer_recv_fastpath(self):
"""
@summary: test receive in fastpath case
"""
class FastPathLayer(tpkt.IFastPathListener):
def setFastPathSender(self, fastPathSender):
pass
def recvFastPath(self, secFlag, fastPathS):
fastPathS.readType(type.String("test_tpkt_layer_recv_fastpath", constant = True))
raise TPKTTest.TPKT_PASS()
message = type.String("test_tpkt_layer_recv_fastpath")
s = type.Stream()
s.writeType((type.UInt8(tpkt.Action.FASTPATH_ACTION_FASTPATH), type.UInt8(type.sizeof(message) + 2), message))
layer = tpkt.TPKT(None)
layer.initFastPath(FastPathLayer())
layer.connect()
self.assertRaises(TPKTTest.TPKT_PASS, layer.dataReceived, s.getvalue())
def test_tpkt_layer_recv_fastpath_ext_length(self):
"""
@summary: test receive in fastpath case with extended length
"""
class FastPathLayer(tpkt.IFastPathListener):
def setFastPathSender(self, fastPathSender):
pass
def recvFastPath(self, secflag, fastPathS):
fastPathS.readType(type.String("test_tpkt_layer_recv_fastpath_ext_length", constant = True))
raise TPKTTest.TPKT_PASS()
message = type.String("test_tpkt_layer_recv_fastpath_ext_length")
s = type.Stream()
s.writeType((type.UInt8(tpkt.Action.FASTPATH_ACTION_FASTPATH), type.UInt16Be((type.sizeof(message) + 3) | 0x8000), message))
layer = tpkt.TPKT(None)
layer.initFastPath(FastPathLayer())
layer.connect()
self.assertRaises(TPKTTest.TPKT_PASS, layer.dataReceived, s.getvalue())
|
ChrisTruncer/rdpy
|
test/test_protocol_rdp_tpkt.py
|
Python
|
gpl-3.0
| 4,095
|
# (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# import iris tests first so that some things can be initialised before importing anything else
import iris.tests as tests
import unittest
import numpy as np
import iris
import iris.analysis.calculus
import iris.cube
import iris.coord_systems
import iris.coords
import iris.tests.stock
from iris.coords import DimCoord
from iris.tests.test_interpolation import normalise_order
class TestCubeDelta(tests.IrisTest):
def test_invalid(self):
cube = iris.tests.stock.realistic_4d()
with self.assertRaises(iris.exceptions.CoordinateMultiDimError):
t = iris.analysis.calculus.cube_delta(cube, 'surface_altitude')
with self.assertRaises(iris.exceptions.CoordinateMultiDimError):
t = iris.analysis.calculus.cube_delta(cube, 'altitude')
with self.assertRaises(ValueError):
t = iris.analysis.calculus.cube_delta(cube, 'forecast_period')
def test_delta_coord_lookup(self):
cube = iris.cube.Cube(np.arange(10), standard_name='air_temperature')
# Add a coordinate with a lot of metadata.
coord = iris.coords.DimCoord(np.arange(10),
long_name='projection_x_coordinate',
var_name='foo',
attributes={'source': 'testing'},
units='m',
coord_system=iris.coord_systems.OSGB())
cube.add_dim_coord(coord, 0)
delta = iris.analysis.calculus.cube_delta(cube,
'projection_x_coordinate')
delta_coord = delta.coord('projection_x_coordinate')
self.assertEqual(delta_coord, delta.coord(coord))
self.assertEqual(coord, cube.coord(delta_coord))
class TestDeltaAndMidpoint(tests.IrisTest):
def _simple_filename(self, suffix):
return tests.get_result_path(('analysis', 'delta_and_midpoint', 'simple%s.cml' % suffix))
def test_simple1_delta_midpoint(self):
a = iris.coords.DimCoord((np.arange(4, dtype=np.float32) * 90) - 180, long_name='foo',
units='degrees', circular=True)
self.assertXMLElement(a, self._simple_filename('1'))
delta = iris.analysis.calculus._construct_delta_coord(a)
self.assertXMLElement(delta, self._simple_filename('1_delta'))
midpoint = iris.analysis.calculus._construct_midpoint_coord(a)
self.assertXMLElement(midpoint, self._simple_filename('1_midpoint'))
def test_simple2_delta_midpoint(self):
a = iris.coords.DimCoord((np.arange(4, dtype=np.float32) * -90) + 180, long_name='foo',
units='degrees', circular=True)
self.assertXMLElement(a, self._simple_filename('2'))
delta = iris.analysis.calculus._construct_delta_coord(a)
self.assertXMLElement(delta, self._simple_filename('2_delta'))
midpoint = iris.analysis.calculus._construct_midpoint_coord(a)
self.assertXMLElement(midpoint, self._simple_filename('2_midpoint'))
def test_simple3_delta_midpoint(self):
a = iris.coords.DimCoord((np.arange(4, dtype=np.float32) * 90) - 180, long_name='foo',
units='degrees', circular=True)
a.guess_bounds(0.5)
self.assertXMLElement(a, self._simple_filename('3'))
delta = iris.analysis.calculus._construct_delta_coord(a)
self.assertXMLElement(delta, self._simple_filename('3_delta'))
midpoint = iris.analysis.calculus._construct_midpoint_coord(a)
self.assertXMLElement(midpoint, self._simple_filename('3_midpoint'))
def test_simple4_delta_midpoint(self):
a = iris.coords.AuxCoord(np.arange(4, dtype=np.float32) * 90 - 180, long_name='foo', units='degrees')
a.guess_bounds()
b = a.copy()
self.assertXMLElement(b, self._simple_filename('4'))
delta = iris.analysis.calculus._construct_delta_coord(b)
self.assertXMLElement(delta, self._simple_filename('4_delta'))
midpoint = iris.analysis.calculus._construct_midpoint_coord(b)
self.assertXMLElement(midpoint, self._simple_filename('4_midpoint'))
def test_simple5_not_degrees_delta_midpoint(self):
# Not sure it makes sense to have a circular coordinate which does not have a modulus but test it anyway.
a = iris.coords.DimCoord(np.arange(4, dtype=np.float32) * 90 - 180,
long_name='foo', units='meter', circular=True)
self.assertXMLElement(a, self._simple_filename('5'))
delta = iris.analysis.calculus._construct_delta_coord(a)
self.assertXMLElement(delta, self._simple_filename('5_delta'))
midpoints = iris.analysis.calculus._construct_midpoint_coord(a)
self.assertXMLElement(midpoints, self._simple_filename('5_midpoint'))
def test_simple6_delta_midpoint(self):
a = iris.coords.DimCoord(np.arange(5, dtype=np.float32), long_name='foo',
units='count', circular=True)
midpoints = iris.analysis.calculus._construct_midpoint_coord(a)
self.assertXMLElement(midpoints, self._simple_filename('6'))
def test_singular_delta(self):
# Test single valued coordinate mid-points when circular
lon = iris.coords.DimCoord(np.float32(-180.), 'latitude', units='degrees', circular=True)
r_expl = iris.analysis.calculus._construct_delta_coord(lon)
self.assertXMLElement(r_expl, ('analysis', 'delta_and_midpoint', 'delta_one_element_explicit.xml'))
# Test single valued coordinate mid-points when not circular
lon.circular = False
with self.assertRaises(ValueError):
iris.analysis.calculus._construct_delta_coord(lon)
def test_singular_midpoint(self):
# Test single valued coordinate mid-points when circular
lon = iris.coords.DimCoord(np.float32(-180.), 'latitude', units='degrees', circular=True)
r_expl = iris.analysis.calculus._construct_midpoint_coord(lon)
self.assertXMLElement(r_expl, ('analysis', 'delta_and_midpoint', 'midpoint_one_element_explicit.xml'))
# Test single valued coordinate mid-points when not circular
lon.circular = False
with self.assertRaises(ValueError):
iris.analysis.calculus._construct_midpoint_coord(lon)
class TestCoordTrig(tests.IrisTest):
def setUp(self):
points = np.arange(20, dtype=np.float32) * 2.3
bounds = np.concatenate([[points - 0.5 * 2.3],
[points + 0.5 * 2.3]]).T
self.lat = iris.coords.AuxCoord(points, 'latitude', units='degrees', bounds=bounds)
self.rlat = iris.coords.AuxCoord(np.deg2rad(points), 'latitude', units='radians', bounds=np.deg2rad(bounds))
def test_sin(self):
sin_of_coord = iris.analysis.calculus._coord_sin(self.lat)
sin_of_coord_radians = iris.analysis.calculus._coord_sin(self.rlat)
# Check the values are correct (within a tolerance)
np.testing.assert_array_almost_equal(np.sin(self.rlat.points), sin_of_coord.points)
np.testing.assert_array_almost_equal(np.sin(self.rlat.bounds), sin_of_coord.bounds)
# Check that the results of the sin function are almost equal when operating on a coord with degrees and radians
np.testing.assert_array_almost_equal(sin_of_coord.points, sin_of_coord_radians.points)
np.testing.assert_array_almost_equal(sin_of_coord.bounds, sin_of_coord_radians.bounds)
self.assertEqual(sin_of_coord.name(), 'sin(latitude)')
self.assertEqual(sin_of_coord.units, '1')
def test_cos(self):
cos_of_coord = iris.analysis.calculus._coord_cos(self.lat)
cos_of_coord_radians = iris.analysis.calculus._coord_cos(self.rlat)
# Check the values are correct (within a tolerance)
np.testing.assert_array_almost_equal(np.cos(self.rlat.points), cos_of_coord.points)
np.testing.assert_array_almost_equal(np.cos(self.rlat.bounds), cos_of_coord.bounds)
# Check that the results of the cos function are almost equal when operating on a coord with degrees and radians
np.testing.assert_array_almost_equal(cos_of_coord.points, cos_of_coord_radians.points)
np.testing.assert_array_almost_equal(cos_of_coord.bounds, cos_of_coord_radians.bounds)
# Now that we have tested the points & bounds, remove them and just test the xml
cos_of_coord = cos_of_coord.copy(points=np.array([1], dtype=np.float32))
cos_of_coord_radians = cos_of_coord_radians.copy(points=np.array([1], dtype=np.float32))
self.assertXMLElement(cos_of_coord, ('analysis', 'calculus', 'cos_simple.xml'))
self.assertXMLElement(cos_of_coord_radians, ('analysis', 'calculus', 'cos_simple_radians.xml'))
class TestCalculusSimple3(tests.IrisTest):
def setUp(self):
data = np.arange(2500, dtype=np.float32).reshape(50, 50)
cube = iris.cube.Cube(data, standard_name="x_wind", units="km/h")
self.lonlat_cs = iris.coord_systems.GeogCS(6371229)
cube.add_dim_coord(DimCoord(np.arange(50, dtype=np.float32) * 4.5 -180, 'longitude', units='degrees', coord_system=self.lonlat_cs), 0)
cube.add_dim_coord(DimCoord(np.arange(50, dtype=np.float32) * 4.5 -90, 'latitude', units='degrees', coord_system=self.lonlat_cs), 1)
self.cube = cube
def test_diff_wrt_lon(self):
t = iris.analysis.calculus.differentiate(self.cube, 'longitude')
self.assertCMLApproxData(t, ('analysis', 'calculus', 'handmade2_wrt_lon.cml'))
def test_diff_wrt_lat(self):
t = iris.analysis.calculus.differentiate(self.cube, 'latitude')
self.assertCMLApproxData(t, ('analysis', 'calculus', 'handmade2_wrt_lat.cml'))
class TestCalculusSimple2(tests.IrisTest):
def setUp(self):
data = np.array( [[1, 2, 3, 4, 5],
[2, 3, 4, 5, 6],
[3, 4, 5, 6, 7],
[4, 5, 6, 7, 9]], dtype=np.float32)
cube = iris.cube.Cube(data, standard_name="x_wind", units="km/h")
self.lonlat_cs = iris.coord_systems.GeogCS(6371229)
cube.add_dim_coord(DimCoord(np.arange(4, dtype=np.float32) * 90 -180, 'longitude', units='degrees', circular=True, coord_system=self.lonlat_cs), 0)
cube.add_dim_coord(DimCoord(np.arange(5, dtype=np.float32) * 45 -90, 'latitude', units='degrees', coord_system=self.lonlat_cs), 1)
cube.add_aux_coord(DimCoord(np.arange(4, dtype=np.float32), long_name='x', units='count', circular=True), 0)
cube.add_aux_coord(DimCoord(np.arange(5, dtype=np.float32), long_name='y', units='count'), 1)
self.cube = cube
def test_diff_wrt_x(self):
t = iris.analysis.calculus.differentiate(self.cube, 'x')
self.assertCMLApproxData(t, ('analysis', 'calculus', 'handmade_wrt_x.cml'))
def test_diff_wrt_y(self):
t = iris.analysis.calculus.differentiate(self.cube, 'y')
self.assertCMLApproxData(t, ('analysis', 'calculus', 'handmade_wrt_y.cml'))
def test_diff_wrt_lon(self):
t = iris.analysis.calculus.differentiate(self.cube, 'longitude')
self.assertCMLApproxData(t, ('analysis', 'calculus', 'handmade_wrt_lon.cml'))
def test_diff_wrt_lat(self):
t = iris.analysis.calculus.differentiate(self.cube, 'latitude')
self.assertCMLApproxData(t, ('analysis', 'calculus', 'handmade_wrt_lat.cml'))
def test_delta_wrt_x(self):
t = iris.analysis.calculus.cube_delta(self.cube, 'x')
self.assertCMLApproxData(t, ('analysis', 'calculus', 'delta_handmade_wrt_x.cml'))
def test_delta_wrt_y(self):
t = iris.analysis.calculus.cube_delta(self.cube, 'y')
self.assertCMLApproxData(t, ('analysis', 'calculus', 'delta_handmade_wrt_y.cml'))
def test_delta_wrt_lon(self):
t = iris.analysis.calculus.cube_delta(self.cube, 'longitude')
self.assertCMLApproxData(t, ('analysis', 'calculus', 'delta_handmade_wrt_lon.cml'))
def test_delta_wrt_lat(self):
t = iris.analysis.calculus.cube_delta(self.cube, 'latitude')
self.assertCMLApproxData(t, ('analysis', 'calculus', 'delta_handmade_wrt_lat.cml'))
class TestCalculusSimple1(tests.IrisTest):
def setUp(self):
data = np.array( [ [1, 2, 3, 4, 5],
[2, 3, 4, 5, 6],
[3, 4, 5, 6, 7],
[4, 5, 6, 7, 8],
[5, 6, 7, 8, 10] ], dtype=np.float32)
cube = iris.cube.Cube(data, standard_name="x_wind", units="km/h")
cube.add_dim_coord(DimCoord(np.arange(5, dtype=np.float32), long_name='x', units='count'), 0)
cube.add_dim_coord(DimCoord(np.arange(5, dtype=np.float32), long_name='y', units='count'), 1)
self.cube = cube
def test_diff_wrt_x(self):
t = iris.analysis.calculus.differentiate(self.cube, 'x')
self.assertCMLApproxData(t, ('analysis', 'calculus', 'handmade_simple_wrt_x.cml'))
def test_delta_wrt_x(self):
t = iris.analysis.calculus.cube_delta(self.cube, 'x')
self.assertCMLApproxData(t, ('analysis', 'calculus', 'delta_handmade_simple_wrt_x.cml'))
def build_cube(data, spherical=False):
"""
Create a cube suitable for testing.
"""
cube = iris.cube.Cube(data, standard_name="x_wind", units="km/h")
nx = data.shape[-1]
ny = data.shape[-2]
nz = data.shape[-3] if data.ndim > 2 else None
dimx = data.ndim - 1
dimy = data.ndim - 2
dimz = data.ndim - 3 if data.ndim > 2 else None
if spherical:
if spherical == 'rotated':
hcs = iris.coord_systems.RotatedGeogCS(10, 20)
lon_name, lat_name = 'grid_longitude', 'grid_latitude'
else:
hcs = iris.coord_systems.GeogCS(6321)
lon_name, lat_name = 'longitude', 'latitude'
cube.add_dim_coord(DimCoord(np.arange(-180, 180, 360./nx, dtype=np.float32), lon_name, units='degrees', coord_system=hcs, circular=True), dimx)
cube.add_dim_coord(DimCoord(np.arange(-90, 90, 180./ny, dtype=np.float32), lat_name, units='degrees', coord_system=hcs), dimy)
else:
cube.add_dim_coord(DimCoord(np.arange(nx, dtype=np.float32) * 2.21 + 2, 'projection_x_coordinate', units='meters'), dimx)
cube.add_dim_coord(DimCoord(np.arange(ny, dtype=np.float32) * 25 -50, 'projection_y_coordinate', units='meters'), dimy)
if nz is None:
cube.add_aux_coord(DimCoord(np.array([10], dtype=np.float32), long_name='z', units='meters', attributes={"positive":"up"}))
else:
cube.add_dim_coord(DimCoord(np.arange(nz, dtype=np.float32) * 2, long_name='z', units='meters', attributes={"positive":"up"}), dimz)
return cube
class TestCalculusWKnownSolutions(tests.IrisTest):
def get_coord_pts(self, cube):
"""return (x_pts, x_ones, y_pts, y_ones, z_pts, z_ones) for the given cube."""
x = cube.coord(axis='X')
y = cube.coord(axis='Y')
z = cube.coord(axis='Z')
if z and z.shape[0] > 1:
x_shp = (1, 1, x.shape[0])
y_shp = (1, y.shape[0], 1)
z_shp = (z.shape[0], 1, 1)
else:
x_shp = (1, x.shape[0])
y_shp = (y.shape[0], 1)
z_shp = None
x_pts = x.points.reshape(x_shp)
y_pts = y.points.reshape(y_shp)
x_ones = np.ones(x_shp)
y_ones = np.ones(y_shp)
if z_shp:
z_pts = z.points.reshape(z_shp)
z_ones = np.ones(z_shp)
else:
z_pts = None
z_ones = None
return (x_pts, x_ones, y_pts, y_ones, z_pts, z_ones)
def test_contrived_differential1(self):
# testing :
# F = ( cos(lat) cos(lon) )
# dF/dLon = - sin(lon) cos(lat) (and to simplify /cos(lat) )
cube = build_cube(np.empty((30, 60)), spherical=True)
x = cube.coord('longitude')
y = cube.coord('latitude')
y_dim = cube.coord_dims(y)[0]
cos_x_pts = np.cos(np.radians(x.points)).reshape(1, x.shape[0])
cos_y_pts = np.cos(np.radians(y.points)).reshape(y.shape[0], 1)
cube.data = cos_y_pts * cos_x_pts
lon_coord = x.copy()
lon_coord.convert_units('radians')
lat_coord = y.copy()
lat_coord.convert_units('radians')
cos_lat_coord = iris.coords.AuxCoord.from_coord(lat_coord)
cos_lat_coord.points = np.cos(lat_coord.points)
cos_lat_coord.units = '1'
cos_lat_coord.rename('cos({})'.format(lat_coord.name()))
temp = iris.analysis.calculus.differentiate(cube, lon_coord)
df_dlon = iris.analysis.maths.divide(temp, cos_lat_coord, y_dim)
x = df_dlon.coord('longitude')
y = df_dlon.coord('latitude')
sin_x_pts = np.sin(np.radians(x.points)).reshape(1, x.shape[0])
y_ones = np.ones((y.shape[0], 1))
data = - sin_x_pts * y_ones
result = df_dlon.copy(data=data)
np.testing.assert_array_almost_equal(result.data, df_dlon.data, decimal=3)
def test_contrived_differential2(self):
# testing :
# w = y^2
# dw_dy = 2*y
cube = build_cube(np.empty((10, 30, 60)), spherical=False)
x_pts, x_ones, y_pts, y_ones, z_pts, z_ones = self.get_coord_pts(cube)
w = cube.copy(data=z_ones * x_ones * pow(y_pts, 2.))
r = iris.analysis.calculus.differentiate(w, 'projection_y_coordinate')
x_pts, x_ones, y_pts, y_ones, z_pts, z_ones = self.get_coord_pts(r)
result = r.copy(data = y_pts * 2. * x_ones * z_ones)
np.testing.assert_array_almost_equal(result.data, r.data, decimal=6)
def test_contrived_non_spherical_curl1(self):
# testing :
# F(x, y, z) = (y, 0, 0)
# curl( F(x, y, z) ) = (0, 0, -1)
cube = build_cube(np.empty((25, 50)), spherical=False)
x_pts, x_ones, y_pts, y_ones, z_pts, z_ones = self.get_coord_pts(cube)
u = cube.copy(data=x_ones * y_pts)
u.rename("u_wind")
v = cube.copy(data=u.data * 0)
v.rename("v_wind")
r = iris.analysis.calculus.curl(u, v)
# Curl returns None when there is no components of Curl
self.assertEqual(r[0], None)
self.assertEqual(r[1], None)
cube = r[2]
self.assertCML(
cube,
('analysis', 'calculus', 'grad_contrived_non_spherical1.cml'),
checksum=False)
self.assertTrue(np.all(np.abs(cube.data - (-1.0)) < 1.0e-7))
def test_contrived_non_spherical_curl2(self):
# testing :
# F(x, y, z) = (z^3, x+2, y^2)
# curl( F(x, y, z) ) = (2y, 3z^2, 1)
cube = build_cube(np.empty((10, 25, 50)), spherical=False)
x_pts, x_ones, y_pts, y_ones, z_pts, z_ones = self.get_coord_pts(cube)
u = cube.copy(data=pow(z_pts, 3) * x_ones * y_ones)
v = cube.copy(data=z_ones * (x_pts + 2.) * y_ones)
w = cube.copy(data=z_ones * x_ones * pow(y_pts, 2.))
u.rename('u_wind')
v.rename('v_wind')
w.rename('w_wind')
r = iris.analysis.calculus.curl(u, v, w)
# TODO #235 When regridding is not nearest neighbour: the commented out code could be made to work
# r[0].data should now be tending towards result.data as the resolution of the grid gets higher.
# result = r[0].copy(data=True)
# x_pts, x_ones, y_pts, y_ones, z_pts, z_ones = self.get_coord_pts(result)
# result.data = y_pts * 2. * x_ones * z_ones
# print(repr(r[0].data[0:1, 0:5, 0:25:5]))
# print(repr(result.data[0:1, 0:5, 0:25:5]))
# np.testing.assert_array_almost_equal(result.data, r[0].data, decimal=2)
#
# result = r[1].copy(data=True)
# x_pts, x_ones, y_pts, y_ones, z_pts, z_ones = self.get_coord_pts(result)
# result.data = pow(z_pts, 2) * x_ones * y_ones
# np.testing.assert_array_almost_equal(result.data, r[1].data, decimal=6)
result = r[2].copy()
result.data = result.data * 0 + 1
np.testing.assert_array_almost_equal(result.data, r[2].data, decimal=4)
normalise_order(r[1])
self.assertCML(r, ('analysis', 'calculus', 'curl_contrived_cartesian2.cml'), checksum=False)
def test_contrived_spherical_curl1(self):
# testing:
# F(lon, lat, r) = (- r sin(lon), -r cos(lon) sin(lat), 0)
# curl( F(x, y, z) ) = (0, 0, 0)
cube = build_cube(np.empty((30, 60)), spherical=True)
radius = iris.analysis.cartography.DEFAULT_SPHERICAL_EARTH_RADIUS
x = cube.coord('longitude')
y = cube.coord('latitude')
cos_x_pts = np.cos(np.radians(x.points)).reshape(1, x.shape[0])
sin_x_pts = np.sin(np.radians(x.points)).reshape(1, x.shape[0])
cos_y_pts = np.cos(np.radians(y.points)).reshape(y.shape[0], 1)
sin_y_pts = np.sin(np.radians(y.points)).reshape(y.shape[0], 1)
y_ones = np.ones((cube.shape[0], 1))
u = cube.copy(data=-sin_x_pts * y_ones * radius)
v = cube.copy(data=-cos_x_pts * sin_y_pts * radius)
u.rename('u_wind')
v.rename('v_wind')
r = iris.analysis.calculus.curl(u, v)[2]
result = r.copy(data=r.data * 0)
# Note: This numerical comparison was created when the radius was 1000 times smaller
np.testing.assert_array_almost_equal(result.data[5:-5], r.data[5:-5]/1000.0, decimal=1)
self.assertCML(r, ('analysis', 'calculus', 'grad_contrived1.cml'), checksum=False)
def test_contrived_spherical_curl2(self):
# testing:
# F(lon, lat, r) = (r sin(lat) cos(lon), -r sin(lon), 0)
# curl( F(x, y, z) ) = (0, 0, -2 cos(lon) cos(lat) )
cube = build_cube(np.empty((70, 150)), spherical=True)
radius = iris.analysis.cartography.DEFAULT_SPHERICAL_EARTH_RADIUS
x = cube.coord('longitude')
y = cube.coord('latitude')
cos_x_pts = np.cos(np.radians(x.points)).reshape(1, x.shape[0])
sin_x_pts = np.sin(np.radians(x.points)).reshape(1, x.shape[0])
cos_y_pts = np.cos(np.radians(y.points)).reshape(y.shape[0], 1)
sin_y_pts = np.sin(np.radians(y.points)).reshape(y.shape[0], 1)
y_ones = np.ones((cube.shape[0], 1))
u = cube.copy(data=sin_y_pts * cos_x_pts * radius)
v = cube.copy(data=-sin_x_pts * y_ones * radius)
u.rename('u_wind')
v.rename('v_wind')
lon_coord = x.copy()
lon_coord.convert_units('radians')
lat_coord = y.copy()
lat_coord.convert_units('radians')
cos_lat_coord = iris.coords.AuxCoord.from_coord(lat_coord)
cos_lat_coord.points = np.cos(lat_coord.points)
cos_lat_coord.units = '1'
cos_lat_coord.rename('cos({})'.format(lat_coord.name()))
r = iris.analysis.calculus.curl(u, v)[2]
x = r.coord('longitude')
y = r.coord('latitude')
cos_x_pts = np.cos(np.radians(x.points)).reshape(1, x.shape[0])
cos_y_pts = np.cos(np.radians(y.points)).reshape(y.shape[0], 1)
result = r.copy(data=2*cos_x_pts*cos_y_pts)
# Note: This numerical comparison was created when the radius was 1000 times smaller
np.testing.assert_array_almost_equal(result.data[30:-30, :], r.data[30:-30, :]/1000.0, decimal=1)
self.assertCML(r, ('analysis', 'calculus', 'grad_contrived2.cml'), checksum=False)
class TestCurlInterface(tests.IrisTest):
def test_non_conformed(self):
u = build_cube(np.empty((50, 20)), spherical=True)
v = u.copy()
y = v.coord('latitude')
y.points += 5
self.assertRaises(ValueError, iris.analysis.calculus.curl, u, v)
def test_standard_name(self):
nx = 20; ny = 50; nz = None;
u = build_cube(np.empty((50, 20)), spherical=True)
v = u.copy()
w = u.copy()
u.rename('u_wind')
v.rename('v_wind')
w.rename('w_wind')
r = iris.analysis.calculus.spatial_vectors_with_phenom_name(u, v)
self.assertEqual(r, (('u', 'v', 'w'), 'wind'))
r = iris.analysis.calculus.spatial_vectors_with_phenom_name(u, v, w)
self.assertEqual(r, (('u', 'v', 'w'), 'wind'))
self.assertRaises(ValueError, iris.analysis.calculus.spatial_vectors_with_phenom_name, u, None, w)
self.assertRaises(ValueError, iris.analysis.calculus.spatial_vectors_with_phenom_name, None, None, w)
self.assertRaises(ValueError, iris.analysis.calculus.spatial_vectors_with_phenom_name, None, None, None)
u.rename("x foobar wibble")
v.rename("y foobar wibble")
w.rename("z foobar wibble")
r = iris.analysis.calculus.spatial_vectors_with_phenom_name(u, v)
self.assertEqual(r, (('x', 'y', 'z'), 'foobar wibble'))
r = iris.analysis.calculus.spatial_vectors_with_phenom_name(u, v, w)
self.assertEqual(r, (('x', 'y', 'z'), 'foobar wibble'))
u.rename("wibble foobar")
v.rename("wobble foobar")
w.rename("tipple foobar")
# r = iris.analysis.calculus.spatial_vectors_with_phenom_name(u, v, w) #should raise a Value Error...
self.assertRaises(ValueError, iris.analysis.calculus.spatial_vectors_with_phenom_name, u, v)
self.assertRaises(ValueError, iris.analysis.calculus.spatial_vectors_with_phenom_name, u, v, w)
u.rename("eastward_foobar")
v.rename("northward_foobar")
w.rename("upward_foobar")
r = iris.analysis.calculus.spatial_vectors_with_phenom_name(u, v)
self.assertEqual(r, (('eastward', 'northward', 'upward'), 'foobar'))
r = iris.analysis.calculus.spatial_vectors_with_phenom_name(u, v, w)
self.assertEqual(r, (('eastward', 'northward', 'upward'), 'foobar'))
# Change it to have an inconsistent phenomenon
v.rename('northward_foobar2')
self.assertRaises(ValueError, iris.analysis.calculus.spatial_vectors_with_phenom_name, u, v)
def test_rotated_pole(self):
u = build_cube(np.empty((30, 20)), spherical='rotated')
v = u.copy()
u.rename('u_wind')
v.rename('v_wind')
x, y, z = iris.analysis.calculus.curl(u, v)
self.assertEqual(z.coord_system(), u.coord_system())
if __name__ == "__main__":
unittest.main()
|
mo-g/iris
|
lib/iris/tests/test_analysis_calculus.py
|
Python
|
gpl-3.0
| 27,231
|
import humblewx
import wx
class VariablesExampleDialog(humblewx.Dialog):
"""
<BoxSizerVertical>
<StaticText
label="$(translated_label)"
/>
</BoxSizerVertical>
"""
def __init__(self, parent):
humblewx.Dialog.__init__(self, humblewx.Controller, parent, {
"translated_label": "Gutent tag",
})
if __name__ == "__main__":
app = wx.App()
dialog = VariablesExampleDialog(None)
dialog.ShowModal()
dialog.Destroy()
|
rickardlindberg/humblewx
|
examples/variables.py
|
Python
|
gpl-3.0
| 504
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('datasources', '0007_auto_20140723_1342'),
]
operations = [
migrations.RenameField(
model_name='demographicdatasourceproblem',
old_name='source_file',
new_name='datasource',
),
migrations.AlterField(
model_name='demographicdatafieldname',
name='name',
field=models.CharField(max_length=10, db_index=True),
),
]
|
WorldBank-Transport/open-transit-indicators
|
python/django/datasources/migrations/0008_auto_20140723_1804.py
|
Python
|
gpl-3.0
| 606
|
# -*- coding: utf-8 -*-
import re
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
class VeohCom(SimpleHoster):
__name__ = "VeohCom"
__type__ = "hoster"
__version__ = "0.23"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?veoh\.com/(tv/)?(watch|videos)/(?P<ID>v\w+)'
__config__ = [("use_premium", "bool" , "Use premium account if available", True ),
("quality" , "Low;High;Auto", "Quality" , "Auto")]
__description__ = """Veoh.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
NAME_PATTERN = r'<meta name="title" content="(?P<N>.*?)"'
OFFLINE_PATTERN = r'>Sorry, we couldn\'t find the video you were looking for'
URL_REPLACEMENTS = [(__pattern__ + ".*", r'http://www.veoh.com/watch/\g<ID>')]
COOKIES = [("veoh.com", "lassieLocale", "en")]
def setup(self):
self.resume_download = True
self.multiDL = True
self.chunk_limit = -1
def handle_free(self, pyfile):
quality = self.get_config('quality')
if quality == "Auto":
quality = ("High", "Low")
for q in quality:
pattern = r'"fullPreviewHash%sPath":"(.+?)"' % q
m = re.search(pattern, self.html)
if m is not None:
pyfile.name += ".mp4"
self.link = m.group(1).replace("\\", "")
return
else:
self.log_info(_("No %s quality video found") % q.upper())
else:
self.fail(_("No video found!"))
getInfo = create_getInfo(VeohCom)
|
jansohn/pyload
|
module/plugins/hoster/VeohCom.py
|
Python
|
gpl-3.0
| 1,707
|
"""ServerFiles"""
from __future__ import absolute_import
import serverfiles
try:
from Orange.utils import environ
except ImportError:
from . import environ
from orangecontrib.bio.utils import serverfile_path
server_url = "http://orange.biolab.si/serverfiles-bio/"
class ServerFiles(serverfiles.ServerFiles):
def __init__(self, server=server_url):
serverfiles.ServerFiles.__init__(self, server)
PATH = serverfile_path()
LOCALFILES = serverfiles.LocalFiles(PATH, )
def localpath(*args, **kwargs):
return LOCALFILES.localpath(*args, **kwargs)
def listfiles(*args, **kwargs):
return [fname for domain, fname in LOCALFILES.listfiles(*args, **kwargs)]
def localpath_download(*path, **kwargs):
files = serverfiles.LocalFiles(PATH, serverfiles=ServerFiles())
return files.localpath_download(*path, **kwargs)
def download(*args, **kwargs):
files = serverfiles.LocalFiles(PATH, serverfiles=ServerFiles())
return files.download(*args, **kwargs)
def allinfo(*args, **kwargs):
files = serverfiles.LocalFiles(PATH, serverfiles=ServerFiles())
return files.allinfo(*args, **kwargs)
def info(*args, **kwargs):
files = serverfiles.LocalFiles(PATH, serverfiles=ServerFiles())
return files.info(*args, **kwargs)
def update(*path, **kwargs):
files = serverfiles.LocalFiles(PATH, serverfiles=ServerFiles())
return files.update(*path, **kwargs)
def sizeformat(size):
return serverfiles.sizeformat(size)
|
ales-erjavec/orange-bio
|
orangecontrib/bio/utils/serverfiles.py
|
Python
|
gpl-3.0
| 1,481
|
# coding: utf-8
import unittest
import os
from datetime import datetime
import logging
from sqlalchemy.engine.reflection import Inspector
from niamoto.testing import set_test_path
set_test_path()
from niamoto import log
log.STREAM_LOGGING_LEVEL = logging.CRITICAL
log.FILE_LOGGING_LEVEL = logging.DEBUG
from niamoto.conf import settings, NIAMOTO_HOME
from niamoto.testing.test_database_manager import TestDatabaseManager
from niamoto.testing.base_tests import BaseTestNiamotoSchemaCreated
from niamoto.raster.raster_manager import RasterManager
from niamoto.db import metadata as niamoto_db_meta
from niamoto.db.connector import Connector
class TestRasterManager(BaseTestNiamotoSchemaCreated):
"""
Test case for RasterManager class.
"""
def tearDown(self):
delete_stmt = niamoto_db_meta.raster_registry.delete()
with Connector.get_connection() as connection:
inspector = Inspector.from_engine(connection)
tables = inspector.get_table_names(
schema=settings.NIAMOTO_RASTER_SCHEMA
)
for tb in tables:
if tb != niamoto_db_meta.raster_registry.name:
connection.execute("DROP TABLE IF EXISTS {};".format(
"{}.{}".format(settings.NIAMOTO_RASTER_SCHEMA, tb)
))
connection.execute(delete_stmt)
def test_get_raster_list(self):
df1 = RasterManager.get_raster_list()
self.assertEqual(len(df1), 0)
data = [
{
'name': 'raster_1',
'date_create': datetime.now(),
'date_update': datetime.now(),
'properties': {},
},
{
'name': 'raster_2',
'date_create': datetime.now(),
'date_update': datetime.now(),
'properties': {},
},
{
'name': 'raster_3',
'date_create': datetime.now(),
'date_update': datetime.now(),
'properties': {},
},
]
ins = niamoto_db_meta.raster_registry.insert().values(data)
with Connector.get_connection() as connection:
connection.execute(ins)
df2 = RasterManager.get_raster_list()
self.assertEqual(len(df2), 3)
def test_add_raster(self):
# Test non existing raster
null_path = os.path.join(NIAMOTO_HOME, "NULL.tif")
self.assertRaises(
FileNotFoundError,
RasterManager.add_raster,
"null_raster",
null_path,
tile_dimension=(200, 200)
)
# Test existing raster
test_raster = os.path.join(
NIAMOTO_HOME,
"data",
"raster",
"rainfall_wgs84.tif"
)
RasterManager.add_raster(
"rainfall",
test_raster,
)
df = RasterManager.get_raster_list()
self.assertEqual(len(df), 1)
self.assertEqual(df['name'].iloc[0], 'rainfall')
engine = Connector.get_engine()
inspector = Inspector.from_engine(engine)
self.assertIn(
'rainfall',
inspector.get_table_names(schema=settings.NIAMOTO_RASTER_SCHEMA),
)
def test_update_raster(self):
# Add raster
test_raster = os.path.join(
NIAMOTO_HOME,
"data",
"raster",
"rainfall_wgs84.tif"
)
RasterManager.add_raster(
"rainfall",
test_raster,
tile_dimension=(200, 200),
)
# Update raster
RasterManager.update_raster(
"rainfall",
test_raster,
new_name="rainfall_new",
tile_dimension=(100, 100),
)
df = RasterManager.get_raster_list()
engine = Connector.get_engine()
inspector = Inspector.from_engine(engine)
self.assertIn(
'rainfall_new',
inspector.get_table_names(schema=settings.NIAMOTO_RASTER_SCHEMA),
)
self.assertNotIn(
'rainfall',
inspector.get_table_names(schema=settings.NIAMOTO_RASTER_SCHEMA),
)
# Update raster, only properties
RasterManager.update_raster(
"rainfall_new",
properties={'test': 10}
)
def test_delete_raster(self):
test_raster = os.path.join(
NIAMOTO_HOME,
"data",
"raster",
"rainfall_wgs84.tif"
)
RasterManager.add_raster(
"rainfall",
test_raster,
tile_dimension=(200, 200),
)
RasterManager.delete_raster("rainfall")
def test_raster_srid(self):
test_raster = os.path.join(
NIAMOTO_HOME,
"data",
"raster",
"rainfall_wgs84.tif"
)
srid = RasterManager.get_raster_srid(test_raster)
self.assertEqual(srid, 4326)
df = RasterManager.get_raster_list()
self.assertEqual(len(df), 0)
if __name__ == '__main__':
TestDatabaseManager.setup_test_database()
TestDatabaseManager.create_schema(settings.NIAMOTO_SCHEMA)
TestDatabaseManager.create_schema(settings.NIAMOTO_RASTER_SCHEMA)
TestDatabaseManager.create_schema(settings.NIAMOTO_VECTOR_SCHEMA)
unittest.main(exit=False)
TestDatabaseManager.teardown_test_database()
|
niamoto/niamoto-core
|
tests/raster/test_raster_manager.py
|
Python
|
gpl-3.0
| 5,480
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for date encoder"""
import numpy
import itertools
from nupic.encoders.base import defaultDtype
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
import unittest2 as unittest
from nupic.encoders.scalar import ScalarEncoder
#########################################################################
class ScalarEncoderTest(unittest.TestCase):
'''Unit tests for ScalarEncoder class'''
def setUp(self):
# use of forced is not recommended, but used here for readability, see scalar.py
self._l = ScalarEncoder(name='scalar', n=14, w=3, minval=1, maxval=8, periodic=True, forced=True)
############################################################################
def testScalarEncoder(self):
"""Testing ScalarEncoder..."""
# -------------------------------------------------------------------------
# test missing values
mv = ScalarEncoder(name='mv', n=14, w=3, minval=1, maxval=8, periodic=False, forced=True)
empty = mv.encode(SENTINEL_VALUE_FOR_MISSING_DATA)
print "\nEncoded missing data \'None\' as %s" % empty
self.assertEqual(empty.sum(), 0)
# --------------------------------------------------------------------
def testNaNs(self):
"""test NaNs"""
mv = ScalarEncoder(name='mv', n=14, w=3, minval=1, maxval=8, periodic=False, forced=True)
empty = mv.encode(float("nan"))
print "\nEncoded missing data \'None\' as %s" % empty
self.assertEqual(empty.sum(), 0)
# ------------------------------------------------------------------------
def testBottomUpEncodingPeriodicEncoder(self):
"""Test bottom-up encoding for a Periodic encoder"""
l = ScalarEncoder(n=14, w=3, minval=1, maxval=8, periodic=True, forced=True)
self.assertEqual(l.getDescription(), [("[1:8]", 0)])
l = ScalarEncoder(name='scalar', n=14, w=3, minval=1, maxval=8, periodic=True, forced=True)
self.assertEqual(l.getDescription(), [("scalar", 0)])
self.assertTrue((l.encode(3) == numpy.array([0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
dtype=defaultDtype)).all())
self.assertTrue((l.encode(3.1) == l.encode(3)).all())
self.assertTrue((l.encode(3.5) == numpy.array([0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
dtype=defaultDtype)).all())
self.assertTrue((l.encode(3.6) == l.encode(3.5)).all())
self.assertTrue((l.encode(3.7) == l.encode(3.5)).all())
self.assertTrue((l.encode(4) == numpy.array([0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0],
dtype=defaultDtype)).all())
self.assertTrue((l.encode(1) == numpy.array([1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
dtype=defaultDtype)).all())
self.assertTrue((l.encode(1.5) == numpy.array([1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
dtype=defaultDtype)).all())
self.assertTrue((l.encode(7) == numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
dtype=defaultDtype)).all())
self.assertTrue((l.encode(7.5) == numpy.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
dtype=defaultDtype)).all())
self.assertEqual(l.resolution, 0.5)
self.assertEqual(l.radius, 1.5)
# Test that we get the same encoder when we construct it using resolution
# instead of n
def testCreateResolution(self):
"""Test that we get the same encoder when we construct it using resolution instead of n"""
l = self._l
d = l.__dict__
l = ScalarEncoder(name='scalar', resolution=0.5, w=3, minval=1, maxval=8,
periodic=True, forced=True)
self.assertEqual(l.__dict__, d)
# Test that we get the same encoder when we construct it using radius
# instead of n
l = ScalarEncoder(name='scalar', radius=1.5, w=3, minval=1, maxval=8,
periodic=True, forced=True)
self.assertEqual(l.__dict__, d)
# -------------------------------------------------------------------------
# Test the input description generation, top-down compute, and bucket
# support on a periodic encoder
def testDecodeAndResolution(self):
"""Testing periodic encoder decoding, resolution of """
l = self._l
print l.resolution
v = l.minval
while v < l.maxval:
output = l.encode(v)
decoded = l.decode(output)
print "decoding", output, "(%f)=>" % v, l.decodedToStr(decoded)
(fieldsDict, fieldNames) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, desc) = fieldsDict.values()[0]
self.assertEqual(len(ranges), 1)
(rangeMin, rangeMax) = ranges[0]
self.assertEqual(rangeMin, rangeMax)
self.assertTrue(abs(rangeMin - v) < l.resolution)
topDown = l.topDownCompute(output)[0]
print "topdown =>", topDown
self.assertTrue((topDown.encoding == output).all())
self.assertTrue(abs(topDown.value - v) <= l.resolution / 2)
# Test bucket support
bucketIndices = l.getBucketIndices(v)
print "bucket index =>", bucketIndices[0]
topDown = l.getBucketInfo(bucketIndices)[0]
self.assertTrue(abs(topDown.value - v) <= l.resolution / 2)
self.assertEqual(topDown.value, l.getBucketValues()[bucketIndices[0]])
self.assertEqual(topDown.scalar, topDown.value)
self.assertTrue((topDown.encoding == output).all())
# Next value
v += l.resolution / 4
# -----------------------------------------------------------------------
# Test the input description generation on a large number, periodic encoder
l = ScalarEncoder(name='scalar', radius=1.5, w=3, minval=1, maxval=8,
periodic=True, forced=True)
print "\nTesting periodic encoder decoding, resolution of %f..." % \
l.resolution
# Test with a "hole"
decoded = l.decode(numpy.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]))
(fieldsDict, fieldNames) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, desc) = fieldsDict.values()[0]
self.assertTrue(len(ranges) == 1 and numpy.array_equal(ranges[0], [7.5, 7.5]))
print "decodedToStr of", ranges, "=>", l.decodedToStr(decoded)
# Test with something wider than w, and with a hole, and wrapped
decoded = l.decode(numpy.array([1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]))
(fieldsDict, fieldNames) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, desc) = fieldsDict.values()[0]
self.assertTrue(len(ranges) == 2 and numpy.array_equal(ranges[0], [7.5, 8]) \
and numpy.array_equal(ranges[1], [1, 1]))
print "decodedToStr of", ranges, "=>", l.decodedToStr(decoded)
# Test with something wider than w, no hole
decoded = l.decode(numpy.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]))
(fieldsDict, fieldNames) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, desc) = fieldsDict.values()[0]
self.assertTrue(len(ranges) == 1 and numpy.array_equal(ranges[0], [1.5, 2.5]))
print "decodedToStr of", ranges, "=>", l.decodedToStr(decoded)
# Test with 2 ranges
decoded = l.decode(numpy.array([1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0]))
(fieldsDict, fieldNames) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, desc) = fieldsDict.values()[0]
self.assertTrue(len(ranges) == 2 and numpy.array_equal(ranges[0], [1.5, 1.5]) \
and numpy.array_equal(ranges[1], [5.5, 6.0]))
print "decodedToStr of", ranges, "=>", l.decodedToStr(decoded)
# Test with 2 ranges, 1 of which is narrower than w
decoded = l.decode(numpy.array([0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0]))
(fieldsDict, fieldNames) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, desc) = fieldsDict.values()[0]
self.assertTrue(len(ranges) == 2 and numpy.array_equal(ranges[0], [1.5, 1.5]) \
and numpy.array_equal(ranges[1], [5.5, 6.0]))
print "decodedToStr of", ranges, "=>", l.decodedToStr(decoded)
# ============================================================================
def testCloseness(self):
"""Test closenessScores for a periodic encoder"""
encoder = ScalarEncoder(w=7, minval=0, maxval=7, radius=1, periodic=True,
name="day of week", forced=True)
scores = encoder.closenessScores((2, 4, 7), (4, 2, 1), fractional=False)
for actual, score in itertools.izip((2, 2, 1), scores):
self.assertEqual(actual, score)
# ============================================================================
def testNonPeriodicBottomUp(self):
"""Test Non-periodic encoder bottom-up"""
l = ScalarEncoder(name='scalar', n=14, w=5, minval=1, maxval=10, periodic=False, forced=True)
print "\nTesting non-periodic encoder encoding, resolution of %f..." % \
l.resolution
self.assertTrue((l.encode(1) == numpy.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
dtype=defaultDtype)).all())
self.assertTrue((l.encode(2) == numpy.array([0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
dtype=defaultDtype)).all())
self.assertTrue((l.encode(10) == numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
dtype=defaultDtype)).all())
# Test that we get the same encoder when we construct it using resolution
# instead of n
d = l.__dict__
l = ScalarEncoder(name='scalar', resolution=1, w=5, minval=1, maxval=10,
periodic=False, forced=True)
self.assertEqual(l.__dict__, d)
# Test that we get the same encoder when we construct it using radius
# instead of n
l = ScalarEncoder(name='scalar', radius=5, w=5, minval=1, maxval=10, periodic=False, forced=True)
self.assertEqual(l.__dict__, d)
# -------------------------------------------------------------------------
# Test the input description generation and topDown decoding of a non-periodic
# encoder
v = l.minval
print "\nTesting non-periodic encoder decoding, resolution of %f..." % \
l.resolution
while v < l.maxval:
output = l.encode(v)
decoded = l.decode(output)
print "decoding", output, "(%f)=>" % v, l.decodedToStr(decoded)
(fieldsDict, fieldNames) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, desc) = fieldsDict.values()[0]
self.assertEqual(len(ranges), 1)
(rangeMin, rangeMax) = ranges[0]
self.assertEqual(rangeMin, rangeMax)
self.assertTrue(abs(rangeMin - v) < l.resolution)
topDown = l.topDownCompute(output)[0]
print "topdown =>", topDown
self.assertTrue((topDown.encoding == output).all())
self.assertTrue(abs(topDown.value - v) <= l.resolution)
# Test bucket support
bucketIndices = l.getBucketIndices(v)
print "bucket index =>", bucketIndices[0]
topDown = l.getBucketInfo(bucketIndices)[0]
self.assertTrue(abs(topDown.value - v) <= l.resolution / 2)
self.assertEqual(topDown.scalar, topDown.value)
self.assertTrue((topDown.encoding == output).all())
# Next value
v += l.resolution / 4
# Make sure we can fill in holes
decoded = l.decode(numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1]))
(fieldsDict, fieldNames) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, desc) = fieldsDict.values()[0]
self.assertTrue(len(ranges) == 1 and numpy.array_equal(ranges[0], [10, 10]))
print "decodedToStr of", ranges, "=>", l.decodedToStr(decoded)
decoded = l.decode(numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1]))
(fieldsDict, fieldNames) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, desc) = fieldsDict.values()[0]
self.assertTrue(len(ranges) == 1 and numpy.array_equal(ranges[0], [10, 10]))
print "decodedToStr of", ranges, "=>", l.decodedToStr(decoded)
#Test min and max
l = ScalarEncoder(name='scalar', n=14, w=3, minval=1, maxval=10, periodic=False, forced=True)
decoded = l.topDownCompute(numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]))[0]
self.assertEqual(decoded.value, 10)
decoded = l.topDownCompute(numpy.array([1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]))[0]
self.assertEqual(decoded.value, 1)
#Make sure only the last and first encoding encodes to max and min, and there is no value greater than max or min
l = ScalarEncoder(name='scalar', n=140, w=3, minval=1, maxval=141, periodic=False, forced=True)
for i in range(137):
iterlist = [0 for _ in range(140)]
for j in range(i, i+3):
iterlist[j] =1
npar = numpy.array(iterlist)
decoded = l.topDownCompute(npar)[0]
self.assertTrue(decoded.value <= 141)
self.assertTrue(decoded.value >= 1)
self.assertTrue(decoded.value < 141 or i==137)
self.assertTrue(decoded.value > 1 or i == 0)
# -------------------------------------------------------------------------
# Test the input description generation and top-down compute on a small number
# non-periodic encoder
l = ScalarEncoder(name='scalar', n=15, w=3, minval=.001, maxval=.002,
periodic=False, forced=True)
print "\nTesting non-periodic encoder decoding, resolution of %f..." % \
l.resolution
v = l.minval
while v < l.maxval:
output = l.encode(v)
decoded = l.decode(output)
print "decoding", output, "(%f)=>" % v, l.decodedToStr(decoded)
(fieldsDict, fieldNames) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, desc) = fieldsDict.values()[0]
self.assertEqual(len(ranges), 1)
(rangeMin, rangeMax) = ranges[0]
self.assertEqual(rangeMin, rangeMax)
self.assertTrue(abs(rangeMin - v) < l.resolution)
topDown = l.topDownCompute(output)[0].value
print "topdown =>", topDown
self.assertTrue(abs(topDown - v) <= l.resolution / 2)
v += l.resolution / 4
# -------------------------------------------------------------------------
# Test the input description generation on a large number, non-periodic encoder
l = ScalarEncoder(name='scalar', n=15, w=3, minval=1, maxval=1000000000,
periodic=False, forced=True)
print "\nTesting non-periodic encoder decoding, resolution of %f..." % \
l.resolution
v = l.minval
while v < l.maxval:
output = l.encode(v)
decoded = l.decode(output)
print "decoding", output, "(%f)=>" % v, l.decodedToStr(decoded)
(fieldsDict, fieldNames) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, desc) = fieldsDict.values()[0]
self.assertEqual(len(ranges), 1)
(rangeMin, rangeMax) = ranges[0]
self.assertEqual(rangeMin, rangeMax)
self.assertTrue(abs(rangeMin - v) < l.resolution)
topDown = l.topDownCompute(output)[0].value
print "topdown =>", topDown
self.assertTrue(abs(topDown - v) <= l.resolution / 2)
v += l.resolution / 4
# -------------------------------------------------------------------------
# Test setting fieldStats after initialization
if False:
#TODO: remove all this? (and fieldstats from ScalarEncoder (if applicable) )?
# Modified on 11/20/12 12:53 PM - setFieldStats not applicable for ScalarEncoder
l = ScalarEncoder(n=14, w=3, minval=100, maxval=800, periodic=True, forced=True)
l.setFieldStats("this", {"this":{"min":1, "max":8}})
l = ScalarEncoder(name='scalar', n=14, w=3, minval=100, maxval=800, periodic=True, forced=True)
l.setFieldStats("this", {"this":{"min":1, "max":8}})
self.assertTrue((l.encode(3) == numpy.array([0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
dtype=defaultDtype)).all())
self.assertTrue((l.encode(3.1) == l.encode(3)).all())
self.assertTrue((l.encode(3.5) == numpy.array([0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
dtype=defaultDtype)).all())
self.assertTrue((l.encode(3.6) == l.encode(3.5)).all())
self.assertTrue((l.encode(3.7) == l.encode(3.5)).all())
self.assertTrue((l.encode(4) == numpy.array([0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0],
dtype=defaultDtype)).all())
self.assertTrue((l.encode(1) == numpy.array([1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
dtype=defaultDtype)).all())
self.assertTrue((l.encode(1.5) == numpy.array([1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
dtype=defaultDtype)).all())
self.assertTrue((l.encode(7) == numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
dtype=defaultDtype)).all())
self.assertTrue((l.encode(7.5) == numpy.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
dtype=defaultDtype)).all())
l = ScalarEncoder(name='scalar', n=14, w=5, minval=100, maxval=1000, periodic=False, forced=True)
l.setFieldStats("this", {"this":{"min":1, "max":10}})
print "\nTesting non-periodic encoding using setFieldStats, resolution of %f..." % \
l.resolution
self.assertTrue((l.encode(1) == numpy.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
dtype=defaultDtype)).all())
self.assertTrue((l.encode(2) == numpy.array([0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
dtype=defaultDtype)).all())
self.assertTrue((l.encode(10) == numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
dtype=defaultDtype)).all())
# ============================================================================
def testEncodeInvalidInputType(self):
encoder = ScalarEncoder(name='enc', n=14, w=3, minval=1, maxval=8,
periodic=False, forced=True)
with self.assertRaises(TypeError):
encoder.encode("String")
###########################################
if __name__ == '__main__':
unittest.main()
|
spbguru/repo1
|
tests/unit/py2/nupic/encoders/scalar_test.py
|
Python
|
gpl-3.0
| 19,900
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2017 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""A keyboard-driven, vim-like browser based on PyQt5."""
import os.path
__author__ = "Florian Bruhin"
__copyright__ = "Copyright 2014-2017 Florian Bruhin (The Compiler)"
__license__ = "GPL"
__maintainer__ = __author__
__email__ = "mail@qutebrowser.org"
__version_info__ = (1, 0, 3)
__version__ = '.'.join(str(e) for e in __version_info__)
__description__ = "A keyboard-driven, vim-like browser based on PyQt5."
basedir = os.path.dirname(os.path.realpath(__file__))
|
NoctuaNivalis/qutebrowser
|
qutebrowser/__init__.py
|
Python
|
gpl-3.0
| 1,268
|
# -*- coding: utf-8 -*-
import os
import os.path
from tempfile import mkdtemp
from unittest import TestCase
from outwiker.core.attachment import Attachment
from outwiker.core.defines import PAGE_RESULT_HTML
from outwiker.core.style import Style
from outwiker.core.tree import WikiDocument
from outwiker.gui.guiconfig import HtmlRenderConfig
from outwiker.pages.wiki.parser.command import Command
from outwiker.pages.wiki.wikipage import WikiPageFactory
from outwiker.pages.wiki.htmlgenerator import HtmlGenerator
from outwiker.pages.wiki.emptycontent import EmptyContent
from outwiker.pages.wiki.wikiconfig import WikiConfig
from test.utils import removeDir
from test.basetestcases import BaseOutWikerMixin
class ExampleFooterWikiCommand(Command):
def execute(self, params, content):
self.parser.appendToFooter(content)
return ''
@property
def name(self):
return "footer"
class ExampleHeadWikiCommand(Command):
def execute(self, params, content):
self.parser.appendToHead(content)
return ''
@property
def name(self):
return "head"
class WikiHtmlGeneratorTest(BaseOutWikerMixin, TestCase):
def setUp(self):
self.initApplication()
self.filesPath = "../test/samplefiles/"
self.__createWiki()
files = ["image.jpg", "dir"]
self.wikicommands = [ExampleFooterWikiCommand,
ExampleHeadWikiCommand,
]
fullFilesPath = [os.path.join(self.filesPath, fname)
for fname
in files]
self.attach_page2 = Attachment(self.wikiroot["Страница 2"])
# Прикрепим к двум страницам файлы
Attachment(self.testPage).attach(fullFilesPath)
self.wikitext = """Бла-бла-бла
%thumb maxsize=250%Attach:image.jpg%%
Бла-бла-бла"""
self.testPage.content = self.wikitext
self.__htmlconfig = HtmlRenderConfig(self.application.config)
self.__setDefaultConfig()
self.resultPath = os.path.join(self.testPage.path, PAGE_RESULT_HTML)
self.application.onWikiParserPrepare += self.__onWikiParserPrepare
def __setDefaultConfig(self):
# Установим размер превьюшки, не совпадающий с размером по умолчанию
self.application.config.set(WikiConfig.WIKI_SECTION,
WikiConfig.THUMB_SIZE_PARAM,
WikiConfig.THUMB_SIZE_DEFAULT)
self.application.config.set(HtmlRenderConfig.HTML_SECTION,
HtmlRenderConfig.FONT_FACE_NAME_PARAM,
HtmlRenderConfig.FONT_NAME_DEFAULT)
def __createWiki(self):
# Здесь будет создаваться вики
self.path = mkdtemp(prefix='Абырвалг абыр')
self.wikiroot = WikiDocument.create(self.path)
WikiPageFactory().create(self.wikiroot, "Страница 2", [])
self.testPage = self.wikiroot["Страница 2"]
def __onWikiParserPrepare(self, parser):
list([parser.addCommand(command(parser))
for command in self.wikicommands])
def tearDown(self):
self.destroyApplication()
removeDir(self.path)
def testEmpty1(self):
text = "бла-бла-бла"
content = EmptyContent(self.application.config)
content.content = text
# Очистим содержимое, чтобы использовать EmptyContent
self.testPage.content = ""
generator = HtmlGenerator(self.testPage)
result = generator.makeHtml(Style().getPageStyle(self.testPage))
self.assertTrue(text in result)
def testEmpty2(self):
text = "(:attachlist:)"
content = EmptyContent(self.application.config)
content.content = text
# Очистим содержимое, чтобы использовать EmptyContent
self.testPage.content = ""
generator = HtmlGenerator(self.testPage)
result = generator.makeHtml(Style().getPageStyle(self.testPage))
self.assertTrue("image.jpg" in result)
def testFooter_01(self):
text = 'Бла-бла-бла(:footer:)Подвал 1(:footerend:)'
self.testPage.content = text
generator = HtmlGenerator(self.testPage)
result = generator.makeHtml(Style().getPageStyle(self.testPage))
self.assertIn('Бла-бла-бла<br/>\nПодвал 1\n</body>',
result.replace('\r\n', '\n'))
def testFooter_02(self):
text = 'Бла-бла-бла(:footer:)Подвал 1(:footerend:)(:footer:)Подвал 2(:footerend:)11111'
self.testPage.content = text
generator = HtmlGenerator(self.testPage)
result = generator.makeHtml(Style().getPageStyle(self.testPage))
self.assertIn('Бла-бла-бла11111<br/>\nПодвал 1Подвал 2\n</body>',
result.replace('\r\n', '\n'))
def testHead_01(self):
text = 'Бла-бла-бла(:head:)Заголовок 1(:headend:)'
self.testPage.content = text
generator = HtmlGenerator(self.testPage)
result = generator.makeHtml(Style().getPageStyle(self.testPage))
self.assertIn('Заголовок 1\n</head>',
result.replace('\r\n', '\n'))
def testHead_02(self):
text = '''Бла-бла-бла
(:head:)Заголовок 1(:headend:)
(:head:)Заголовок 2(:headend:)
'''
self.testPage.content = text
generator = HtmlGenerator(self.testPage)
result = generator.makeHtml(Style().getPageStyle(self.testPage))
self.assertIn('Заголовок 1Заголовок 2\n</head>',
result.replace('\r\n', '\n'))
|
unreal666/outwiker
|
src/test/gui/wikipage/test_wikihtmlgenerator.py
|
Python
|
gpl-3.0
| 5,965
|
import os
import codecs
from django.conf import settings
from django.db import models
from django.db.models import Q
from opencontext_py.apps.entities.uri.models import URImanagement
from opencontext_py.apps.ldata.linkentities.models import LinkEntity
from opencontext_py.apps.ocitems.manifest.models import Manifest
from opencontext_py.apps.ldata.linkannotations.models import LinkAnnotation
# This class is used for the mass editing of category data
class ModifyCategories():
PREFIXING = {'db-prefix': 'oc-gen:',
'file-prefix': '&oc-general;'}
REVISION_LIST = [
{'new': 'oc-gen:cat-object', 'old': 'oc-gen:cat-0008'},
{'new': 'oc-gen:cat-coin', 'old': 'oc-gen:cat-0009'},
{'new': 'oc-gen:cat-pottery', 'old': 'oc-gen:cat-0010'},
{'new': 'oc-gen:cat-glass', 'old': 'oc-gen:cat-0011'},
{'new': 'oc-gen:cat-groundstone', 'old': 'oc-gen:cat-0012'},
{'new': 'oc-gen:cat-arch-element', 'old': 'oc-gen:cat-0013'},
{'new': 'oc-gen:cat-bio-subj-ecofact', 'old': 'oc-gen:cat-0014'},
{'new': 'oc-gen:cat-animal-bone', 'old': 'oc-gen:cat-0015'},
{'new': 'oc-gen:cat-shell', 'old': 'oc-gen:cat-0016'},
{'new': 'oc-gen:cat-non-diag-bone', 'old': 'oc-gen:cat-0017'},
{'new': 'oc-gen:cat-human-bone', 'old': 'oc-gen:cat-0018'},
{'new': 'oc-gen:cat-plant-remains', 'old': 'oc-gen:cat-0019'},
{'new': 'oc-gen:cat-loc-or-context', 'old': 'oc-gen:cat-0020'},
{'new': 'oc-gen:cat-survey-unit', 'old': 'oc-gen:cat-0021'},
{'new': 'oc-gen:cat-site', 'old': 'oc-gen:cat-0022'},
{'new': 'oc-gen:cat-site-area', 'old': 'oc-gen:cat-0023'},
{'new': 'oc-gen:cat-context', 'old': 'oc-gen:cat-0024'},
{'new': 'oc-gen:cat-feature', 'old': 'oc-gen:cat-0025'},
{'new': 'oc-gen:cat-exc-unit', 'old': 'oc-gen:cat-0026'},
{'new': 'oc-gen:cat-locus', 'old': 'oc-gen:cat-0027'},
{'new': 'oc-gen:cat-lot', 'old': 'oc-gen:cat-0028'},
{'new': 'oc-gen:cat-basket', 'old': 'oc-gen:cat-0029'},
{'new': 'oc-gen:cat-area', 'old': 'oc-gen:cat-0030'},
{'new': 'oc-gen:cat-trench', 'old': 'oc-gen:cat-0031'},
{'new': 'oc-gen:cat-operation', 'old': 'oc-gen:cat-0032'},
{'new': 'oc-gen:cat-field-proj', 'old': 'oc-gen:cat-0033'},
{'new': 'oc-gen:cat-square', 'old': 'oc-gen:cat-0034'},
{'new': 'oc-gen:cat-unit', 'old': 'oc-gen:cat-0035'},
{'new': 'oc-gen:cat-sequence', 'old': 'oc-gen:cat-0036'},
{'new': 'oc-gen:cat-human-subj', 'old': 'oc-gen:cat-0037'},
{'new': 'oc-gen:cat-stratum', 'old': 'oc-gen:cat-0038'},
{'new': 'oc-gen:cat-phase', 'old': 'oc-gen:cat-0039'},
{'new': 'oc-gen:cat-hospital', 'old': 'oc-gen:cat-0040'},
{'new': 'oc-gen:cat-mound', 'old': 'oc-gen:cat-0041'},
{'new': 'oc-gen:cat-sculpture', 'old': 'oc-gen:cat-0042'},
{'new': 'oc-gen:cat-sample', 'old': 'oc-gen:cat-0043'},
{'new': 'oc-gen:cat-sample-col', 'old': 'oc-gen:cat-0044'},
{'new': 'oc-gen:cat-ref-col', 'old': 'oc-gen:cat-0045'},
{'new': 'oc-gen:cat-region', 'old': 'oc-gen:cat-0046'},
{'new': 'oc-gen:cat-figurine', 'old': 'oc-gen:cat-0047'}
]
def __init__(self):
self.root_export_dir = settings.STATIC_EXPORTS_ROOT + 'categories'
def mass_revise_category_uris(self):
""" Revises category uris in a mass edit
"""
for revision in self.REVISION_LIST:
search_old_db = revision['old']
replace_db = revision['new']
old_uri = URImanagement.convert_prefix_to_full_uri(search_old_db)
new_uri = URImanagement.convert_prefix_to_full_uri(replace_db)
Manifest.objects\
.filter(class_uri=search_old_db)\
.update(class_uri=replace_db)
LinkAnnotation.objects\
.filter(subject=search_old_db)\
.update(subject=replace_db)
LinkAnnotation.objects\
.filter(subject=old_uri)\
.update(subject=new_uri)
LinkAnnotation.objects\
.filter(object_uri=search_old_db)\
.update(object_uri=replace_db)
LinkAnnotation.objects\
.filter(object_uri=old_uri)\
.update(object_uri=new_uri)
LinkEntity.objects\
.filter(uri=old_uri)\
.update(uri=new_uri)
def update_ontology_doc(self, filename):
""" Changes categories in the ontology document
"""
filepath = self.root_export_dir + '/' + filename
newfilepath = self.root_export_dir + '/rev-' + filename
if os.path.isfile(filepath):
print('Found: ' + filepath)
with open(filepath, 'r') as myfile:
data = myfile.read()
for revision in self.REVISION_LIST:
search_old_db = revision['old']
search_old_file = search_old_db.replace(self.PREFIXING['db-prefix'],
self.PREFIXING['file-prefix'])
replace_db = revision['new']
replace_file = replace_db.replace(self.PREFIXING['db-prefix'],
self.PREFIXING['file-prefix'])
data = data.replace(search_old_file, replace_file)
old_uri = URImanagement.convert_prefix_to_full_uri(search_old_db)
new_uri = URImanagement.convert_prefix_to_full_uri(replace_db)
data = data.replace(old_uri, new_uri)
file = codecs.open(newfilepath, 'w', 'utf-8')
file.write(data)
file.close()
else:
print('Ouch! Cannot find: '+ filepath)
|
portableant/open-context-py
|
opencontext_py/apps/edit/modcategories.py
|
Python
|
gpl-3.0
| 5,885
|
"""
Abstract base class of basic types provides a generic type tester method.
"""
import os, time
import re
import lldb
from lldbtest import *
import lldbutil
def Msg(var, val, using_frame_variable):
return "'%s %s' matches the output (from compiled code): %s" % (
'frame variable --show-types' if using_frame_variable else 'expression' ,var, val)
class GenericTester(TestBase):
# This is the pattern by design to match the " var = 'value'" output from
# printf() stmts (see basic_type.cpp).
pattern = re.compile(" (\*?a[^=]*) = '([^=]*)'$")
# Assert message.
DATA_TYPE_GROKKED = "Data type from expr parser output is parsed correctly"
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# We'll use the test method name as the exe_name.
# There are a bunch of test cases under test/types and we don't want the
# module cacheing subsystem to be confused with executable name "a.out"
# used for all the test cases.
self.exe_name = self.testMethodName
def tearDown(self):
"""Cleanup the test byproducts."""
TestBase.tearDown(self)
#print "Removing golden-output.txt..."
os.remove("golden-output.txt")
#==========================================================================#
# Functions build_and_run() and build_and_run_expr() are generic functions #
# which are called from the Test*Types*.py test cases. The API client is #
# responsible for supplying two mandatory arguments: the source file, e.g.,#
# 'int.cpp', and the atoms, e.g., set(['unsigned', 'long long']) to the #
# functions. There are also three optional keyword arguments of interest, #
# as follows: #
# #
# dsym -> build for dSYM (defaulted to True) #
# True: build dSYM file #
# False: build DWARF map #
# bc -> blockCaptured (defaulted to False) #
# True: testing vars of various basic types from isnide a block #
# False: testing vars of various basic types from a function #
# qd -> quotedDisplay (defaulted to False) #
# True: the output from 'frame var' or 'expr var' contains a pair #
# of single quotes around the value #
# False: no single quotes are to be found around the value of #
# variable #
#==========================================================================#
def build_and_run(self, source, atoms, dsym=True, bc=False, qd=False):
self.build_and_run_with_source_atoms_expr(source, atoms, expr=False, dsym=dsym, bc=bc, qd=qd)
def build_and_run_expr(self, source, atoms, dsym=True, bc=False, qd=False):
self.build_and_run_with_source_atoms_expr(source, atoms, expr=True, dsym=dsym, bc=bc, qd=qd)
def build_and_run_with_source_atoms_expr(self, source, atoms, expr, dsym=True, bc=False, qd=False):
# See also Makefile and basic_type.cpp:177.
if bc:
d = {'CXX_SOURCES': source, 'EXE': self.exe_name, 'CFLAGS_EXTRAS': '-DTEST_BLOCK_CAPTURED_VARS'}
else:
d = {'CXX_SOURCES': source, 'EXE': self.exe_name}
if dsym:
self.buildDsym(dictionary=d)
else:
self.buildDwarf(dictionary=d)
self.setTearDownCleanup(dictionary=d)
if expr:
self.generic_type_expr_tester(self.exe_name, atoms, blockCaptured=bc, quotedDisplay=qd)
else:
self.generic_type_tester(self.exe_name, atoms, blockCaptured=bc, quotedDisplay=qd)
def generic_type_tester(self, exe_name, atoms, quotedDisplay=False, blockCaptured=False):
"""Test that variables with basic types are displayed correctly."""
self.runCmd("file %s" % exe_name, CURRENT_EXECUTABLE_SET)
# First, capture the golden output emitted by the oracle, i.e., the
# series of printf statements.
self.runCmd("process launch -o golden-output.txt")
with open("golden-output.txt") as f:
go = f.read()
# This golden list contains a list of (variable, value) pairs extracted
# from the golden output.
gl = []
# Scan the golden output line by line, looking for the pattern:
#
# variable = 'value'
#
for line in go.split(os.linesep):
# We'll ignore variables of array types from inside a block.
if blockCaptured and '[' in line:
continue
match = self.pattern.search(line)
if match:
var, val = match.group(1), match.group(2)
gl.append((var, val))
#print "golden list:", gl
# This test uses a #include of a the "basic_type.cpp" so we need to enable
# always setting inlined breakpoints.
self.runCmd('settings set target.inline-breakpoint-strategy always')
# And add hooks to restore the settings during tearDown().
self.addTearDownHook(
lambda: self.runCmd("settings set target.inline-breakpoint-strategy headers"))
# Bring the program to the point where we can issue a series of
# 'frame variable --show-types' command.
if blockCaptured:
break_line = line_number ("basic_type.cpp", "// Break here to test block captured variables.")
else:
break_line = line_number ("basic_type.cpp", "// Here is the line we will break on to check variables.")
lldbutil.run_break_set_by_file_and_line (self, "basic_type.cpp", break_line, num_expected_locations=1, loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
self.expect("process status", STOPPED_DUE_TO_BREAKPOINT,
substrs = [" at basic_type.cpp:%d" % break_line,
"stop reason = breakpoint"])
#self.runCmd("frame variable --show-types")
# Now iterate through the golden list, comparing against the output from
# 'frame variable --show-types var'.
for var, val in gl:
self.runCmd("frame variable --show-types %s" % var)
output = self.res.GetOutput()
# The input type is in a canonical form as a set of named atoms.
# The display type string must conatin each and every element.
#
# Example:
# runCmd: frame variable --show-types a_array_bounded[0]
# output: (char) a_array_bounded[0] = 'a'
#
try:
dt = re.match("^\((.*)\)", output).group(1)
except:
self.fail(self.DATA_TYPE_GROKKED)
# Expect the display type string to contain each and every atoms.
self.expect(dt,
"Display type: '%s' must contain the type atoms: '%s'" %
(dt, atoms),
exe=False,
substrs = list(atoms))
# The (var, val) pair must match, too.
nv = ("%s = '%s'" if quotedDisplay else "%s = %s") % (var, val)
self.expect(output, Msg(var, val, True), exe=False,
substrs = [nv])
def generic_type_expr_tester(self, exe_name, atoms, quotedDisplay=False, blockCaptured=False):
"""Test that variable expressions with basic types are evaluated correctly."""
self.runCmd("file %s" % exe_name, CURRENT_EXECUTABLE_SET)
# First, capture the golden output emitted by the oracle, i.e., the
# series of printf statements.
self.runCmd("process launch -o golden-output.txt")
with open("golden-output.txt") as f:
go = f.read()
# This golden list contains a list of (variable, value) pairs extracted
# from the golden output.
gl = []
# Scan the golden output line by line, looking for the pattern:
#
# variable = 'value'
#
for line in go.split(os.linesep):
# We'll ignore variables of array types from inside a block.
if blockCaptured and '[' in line:
continue
match = self.pattern.search(line)
if match:
var, val = match.group(1), match.group(2)
gl.append((var, val))
#print "golden list:", gl
# This test uses a #include of a the "basic_type.cpp" so we need to enable
# always setting inlined breakpoints.
self.runCmd('settings set target.inline-breakpoint-strategy always')
# And add hooks to restore the settings during tearDown().
self.addTearDownHook(
lambda: self.runCmd("settings set target.inline-breakpoint-strategy headers"))
# Bring the program to the point where we can issue a series of
# 'expr' command.
if blockCaptured:
break_line = line_number ("basic_type.cpp", "// Break here to test block captured variables.")
else:
break_line = line_number ("basic_type.cpp", "// Here is the line we will break on to check variables.")
lldbutil.run_break_set_by_file_and_line (self, "basic_type.cpp", break_line, num_expected_locations=1, loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
self.expect("process status", STOPPED_DUE_TO_BREAKPOINT,
substrs = [" at basic_type.cpp:%d" % break_line,
"stop reason = breakpoint"])
#self.runCmd("frame variable --show-types")
# Now iterate through the golden list, comparing against the output from
# 'expr var'.
for var, val in gl:
# Don't overwhelm the expression mechanism.
# This slows down the test suite quite a bit, to enable it, define
# the environment variable LLDB_TYPES_EXPR_TIME_WAIT. For example:
#
# export LLDB_TYPES_EXPR_TIME_WAIT=0.5
#
# causes a 0.5 second delay between 'expression' commands.
if "LLDB_TYPES_EXPR_TIME_WAIT" in os.environ:
time.sleep(float(os.environ["LLDB_TYPES_EXPR_TIME_WAIT"]))
self.runCmd("expression %s" % var)
output = self.res.GetOutput()
# The input type is in a canonical form as a set of named atoms.
# The display type string must conatin each and every element.
#
# Example:
# runCmd: expr a
# output: (double) $0 = 1100.12
#
try:
dt = re.match("^\((.*)\) \$[0-9]+ = ", output).group(1)
except:
self.fail(self.DATA_TYPE_GROKKED)
# Expect the display type string to contain each and every atoms.
self.expect(dt,
"Display type: '%s' must contain the type atoms: '%s'" %
(dt, atoms),
exe=False,
substrs = list(atoms))
# The val part must match, too.
valPart = ("'%s'" if quotedDisplay else "%s") % val
self.expect(output, Msg(var, val, False), exe=False,
substrs = [valPart])
|
s20121035/rk3288_android5.1_repo
|
external/lldb/test/types/AbstractBase.py
|
Python
|
gpl-3.0
| 11,517
|
#!/usr/bin/env python
############################################################################
#
# Copyright (C) 2012, 2013 PX4 Development Team. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. Neither the name PX4 nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
############################################################################
#
# Serial firmware uploader for the PX4FMU bootloader
#
# The PX4 firmware file is a JSON-encoded Python object, containing
# metadata fields and a zlib-compressed base64-encoded firmware image.
#
# The uploader uses the following fields from the firmware file:
#
# image
# The firmware that will be uploaded.
# image_size
# The size of the firmware in bytes.
# board_id
# The board for which the firmware is intended.
# board_revision
# Currently only used for informational purposes.
#
# for python2.7 compatibility
from __future__ import print_function
import sys
import argparse
import binascii
import serial
import struct
import json
import zlib
import base64
import time
import array
import os
from sys import platform as _platform
class firmware(object):
'''Loads a firmware file'''
desc = {}
image = bytes()
crctab = array.array('I', [
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d])
crcpad = bytearray(b'\xff\xff\xff\xff')
def __init__(self, path):
# read the file
f = open(path, "r")
self.desc = json.load(f)
f.close()
self.image = bytearray(zlib.decompress(base64.b64decode(self.desc['image'])))
# pad image to 4-byte length
while ((len(self.image) % 4) != 0):
self.image.append('\xff')
def property(self, propname):
return self.desc[propname]
def __crc32(self, bytes, state):
for byte in bytes:
index = (state ^ byte) & 0xff
state = self.crctab[index] ^ (state >> 8)
return state
def crc(self, padlen):
state = self.__crc32(self.image, int(0))
for i in range(len(self.image), (padlen - 1), 4):
state = self.__crc32(self.crcpad, state)
return state
class uploader(object):
'''Uploads a firmware file to the PX FMU bootloader'''
# protocol bytes
INSYNC = b'\x12'
EOC = b'\x20'
# reply bytes
OK = b'\x10'
FAILED = b'\x11'
INVALID = b'\x13' # rev3+
# command bytes
NOP = b'\x00' # guaranteed to be discarded by the bootloader
GET_SYNC = b'\x21'
GET_DEVICE = b'\x22'
CHIP_ERASE = b'\x23'
CHIP_VERIFY = b'\x24' # rev2 only
PROG_MULTI = b'\x27'
READ_MULTI = b'\x28' # rev2 only
GET_CRC = b'\x29' # rev3+
GET_OTP = b'\x2a' # rev4+ , get a word from OTP area
GET_SN = b'\x2b' # rev4+ , get a word from SN area
REBOOT = b'\x30'
INFO_BL_REV = b'\x01' # bootloader protocol revision
BL_REV_MIN = 2 # minimum supported bootloader protocol
BL_REV_MAX = 4 # maximum supported bootloader protocol
INFO_BOARD_ID = b'\x02' # board type
INFO_BOARD_REV = b'\x03' # board revision
INFO_FLASH_SIZE = b'\x04' # max firmware size in bytes
PROG_MULTI_MAX = 60 # protocol max is 255, must be multiple of 4
READ_MULTI_MAX = 60 # protocol max is 255, something overflows with >= 64
NSH_INIT = bytearray(b'\x0d\x0d\x0d')
NSH_REBOOT_BL = b"reboot -b\n"
NSH_REBOOT = b"reboot\n"
MAVLINK_REBOOT_ID1 = bytearray(b'\xfe\x21\x72\xff\x00\x4c\x00\x00\x80\x3f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x00\x01\x00\x00\x48\xf0')
MAVLINK_REBOOT_ID0 = bytearray(b'\xfe\x21\x45\xff\x00\x4c\x00\x00\x80\x3f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x00\x00\x00\x00\xd7\xac')
def __init__(self, portname, baudrate):
# open the port, keep the default timeout short so we can poll quickly
self.port = serial.Serial(portname, baudrate, timeout=0.5)
self.otp = b''
self.sn = b''
def close(self):
if self.port is not None:
self.port.close()
def __send(self, c):
# print("send " + binascii.hexlify(c))
self.port.write(c)
def __recv(self, count=1):
c = self.port.read(count)
if len(c) < 1:
raise RuntimeError("timeout waiting for data")
# print("recv " + binascii.hexlify(c))
return c
def __recv_int(self):
raw = self.__recv(4)
val = struct.unpack("<I", raw)
return val[0]
def __getSync(self):
self.port.flush()
c = bytes(self.__recv())
if c != self.INSYNC:
raise RuntimeError("unexpected %s instead of INSYNC" % c)
c = self.__recv()
if c == self.INVALID:
raise RuntimeError("bootloader reports INVALID OPERATION")
if c == self.FAILED:
raise RuntimeError("bootloader reports OPERATION FAILED")
if c != self.OK:
raise RuntimeError("unexpected response 0x%x instead of OK" % ord(c))
# attempt to get back into sync with the bootloader
def __sync(self):
# send a stream of ignored bytes longer than the longest possible conversation
# that we might still have in progress
# self.__send(uploader.NOP * (uploader.PROG_MULTI_MAX + 2))
self.port.flushInput()
self.__send(uploader.GET_SYNC
+ uploader.EOC)
self.__getSync()
# def __trySync(self):
# c = self.__recv()
# if (c != self.INSYNC):
# #print("unexpected 0x%x instead of INSYNC" % ord(c))
# return False;
# c = self.__recv()
# if (c != self.OK):
# #print("unexpected 0x%x instead of OK" % ord(c))
# return False
# return True
# send the GET_DEVICE command and wait for an info parameter
def __getInfo(self, param):
self.__send(uploader.GET_DEVICE + param + uploader.EOC)
value = self.__recv_int()
self.__getSync()
return value
# send the GET_OTP command and wait for an info parameter
def __getOTP(self, param):
t = struct.pack("I", param) # int param as 32bit ( 4 byte ) char array.
self.__send(uploader.GET_OTP + t + uploader.EOC)
value = self.__recv(4)
self.__getSync()
return value
# send the GET_OTP command and wait for an info parameter
def __getSN(self, param):
t = struct.pack("I", param) # int param as 32bit ( 4 byte ) char array.
self.__send(uploader.GET_SN + t + uploader.EOC)
value = self.__recv(4)
self.__getSync()
return value
# send the CHIP_ERASE command and wait for the bootloader to become ready
def __erase(self):
self.__send(uploader.CHIP_ERASE
+ uploader.EOC)
# erase is very slow, give it 20s
deadline = time.time() + 20
while time.time() < deadline:
try:
self.__getSync()
return
except RuntimeError:
# we timed out, that's OK
continue
raise RuntimeError("timed out waiting for erase")
# send a PROG_MULTI command to write a collection of bytes
def __program_multi(self, data):
if runningPython3 == True:
length = len(data).to_bytes(1, byteorder='big')
else:
length = chr(len(data))
self.__send(uploader.PROG_MULTI)
self.__send(length)
self.__send(data)
self.__send(uploader.EOC)
self.__getSync()
# verify multiple bytes in flash
def __verify_multi(self, data):
if runningPython3 == True:
length = len(data).to_bytes(1, byteorder='big')
else:
length = chr(len(data))
self.__send(uploader.READ_MULTI)
self.__send(length)
self.__send(uploader.EOC)
self.port.flush()
programmed = self.__recv(len(data))
if programmed != data:
print("got " + binascii.hexlify(programmed))
print("expect " + binascii.hexlify(data))
return False
self.__getSync()
return True
# send the reboot command
def __reboot(self):
self.__send(uploader.REBOOT
+ uploader.EOC)
self.port.flush()
# v3+ can report failure if the first word flash fails
if self.bl_rev >= 3:
self.__getSync()
# split a sequence into a list of size-constrained pieces
def __split_len(self, seq, length):
return [seq[i:i+length] for i in range(0, len(seq), length)]
# upload code
def __program(self, fw):
code = fw.image
groups = self.__split_len(code, uploader.PROG_MULTI_MAX)
for bytes in groups:
self.__program_multi(bytes)
# verify code
def __verify_v2(self, fw):
self.__send(uploader.CHIP_VERIFY
+ uploader.EOC)
self.__getSync()
code = fw.image
groups = self.__split_len(code, uploader.READ_MULTI_MAX)
for bytes in groups:
if (not self.__verify_multi(bytes)):
raise RuntimeError("Verification failed")
def __verify_v3(self, fw):
expect_crc = fw.crc(self.fw_maxsize)
self.__send(uploader.GET_CRC
+ uploader.EOC)
report_crc = self.__recv_int()
self.__getSync()
if report_crc != expect_crc:
print("Expected 0x%x" % expect_crc)
print("Got 0x%x" % report_crc)
raise RuntimeError("Program CRC failed")
# get basic data about the board
def identify(self):
# make sure we are in sync before starting
self.__sync()
# get the bootloader protocol ID first
self.bl_rev = self.__getInfo(uploader.INFO_BL_REV)
if (self.bl_rev < uploader.BL_REV_MIN) or (self.bl_rev > uploader.BL_REV_MAX):
print("Unsupported bootloader protocol %d" % uploader.INFO_BL_REV)
raise RuntimeError("Bootloader protocol mismatch")
self.board_type = self.__getInfo(uploader.INFO_BOARD_ID)
self.board_rev = self.__getInfo(uploader.INFO_BOARD_REV)
self.fw_maxsize = self.__getInfo(uploader.INFO_FLASH_SIZE)
# upload the firmware
def upload(self, fw):
# Make sure we are doing the right thing
if self.board_type != fw.property('board_id'):
raise RuntimeError("Firmware not suitable for this board")
if self.fw_maxsize < fw.property('image_size'):
raise RuntimeError("Firmware image is too large for this board")
# OTP added in v4:
if self.bl_rev > 3:
for byte in range(0,32*6,4):
x = self.__getOTP(byte)
self.otp = self.otp + x
print(binascii.hexlify(x).decode('Latin-1') + ' ', end='')
# see src/modules/systemlib/otp.h in px4 code:
self.otp_id = self.otp[0:4]
self.otp_idtype = self.otp[4:5]
self.otp_vid = self.otp[8:4:-1]
self.otp_pid = self.otp[12:8:-1]
self.otp_coa = self.otp[32:160]
# show user:
try:
print("type: " + self.otp_id.decode('Latin-1'))
print("idtype: " + binascii.b2a_qp(self.otp_idtype).decode('Latin-1'))
print("vid: " + binascii.hexlify(self.otp_vid).decode('Latin-1'))
print("pid: "+ binascii.hexlify(self.otp_pid).decode('Latin-1'))
print("coa: "+ binascii.b2a_base64(self.otp_coa).decode('Latin-1'))
print("sn: ", end='')
for byte in range(0,12,4):
x = self.__getSN(byte)
x = x[::-1] # reverse the bytes
self.sn = self.sn + x
print(binascii.hexlify(x).decode('Latin-1'), end='') # show user
print('')
except Exception:
# ignore bad character encodings
pass
print("erase...")
self.__erase()
print("program...")
self.__program(fw)
print("verify...")
if self.bl_rev == 2:
self.__verify_v2(fw)
else:
self.__verify_v3(fw)
print("done, rebooting.")
self.__reboot()
self.port.close()
def send_reboot(self):
try:
# try reboot via NSH first
self.__send(uploader.NSH_INIT)
self.__send(uploader.NSH_REBOOT_BL)
self.__send(uploader.NSH_INIT)
self.__send(uploader.NSH_REBOOT)
# then try MAVLINK command
self.__send(uploader.MAVLINK_REBOOT_ID1)
self.__send(uploader.MAVLINK_REBOOT_ID0)
except:
return
# Detect python version
if sys.version_info[0] < 3:
runningPython3 = False
else:
runningPython3 = True
# Parse commandline arguments
parser = argparse.ArgumentParser(description="Firmware uploader for the PX autopilot system.")
parser.add_argument('--port', action="store", required=True, help="Serial port(s) to which the FMU may be attached")
parser.add_argument('--baud', action="store", type=int, default=115200, help="Baud rate of the serial port (default is 115200), only required for true serial ports.")
parser.add_argument('firmware', action="store", help="Firmware file to be uploaded")
args = parser.parse_args()
# warn people about ModemManager which interferes badly with Pixhawk
if os.path.exists("/usr/sbin/ModemManager"):
print("=======================================================================")
print("WARNING: You should uninstall ModemManager as it conflicts with Pixhawk")
print("=======================================================================")
# Load the firmware file
fw = firmware(args.firmware)
print("Loaded firmware for %x,%x, waiting for the bootloader..." % (fw.property('board_id'), fw.property('board_revision')))
# Spin waiting for a device to show up
while True:
portlist = []
patterns = args.port.split(",")
# on unix-like platforms use glob to support wildcard ports. This allows
# the use of /dev/serial/by-id/usb-3D_Robotics on Linux, which prevents the upload from
# causing modem hangups etc
if "linux" in _platform or "darwin" in _platform:
import glob
for pattern in patterns:
portlist += glob.glob(pattern)
else:
portlist = patterns
for port in portlist:
#print("Trying %s" % port)
# create an uploader attached to the port
try:
if "linux" in _platform:
# Linux, don't open Mac OS and Win ports
if not "COM" in port and not "tty.usb" in port:
up = uploader(port, args.baud)
elif "darwin" in _platform:
# OS X, don't open Windows and Linux ports
if not "COM" in port and not "ACM" in port:
up = uploader(port, args.baud)
elif "win" in _platform:
# Windows, don't open POSIX ports
if not "/" in port:
up = uploader(port, args.baud)
except Exception:
# open failed, rate-limit our attempts
time.sleep(0.05)
# and loop to the next port
continue
# port is open, try talking to it
try:
# identify the bootloader
up.identify()
print("Found board %x,%x bootloader rev %x on %s" % (up.board_type, up.board_rev, up.bl_rev, port))
except Exception:
# most probably a timeout talking to the port, no bootloader, try to reboot the board
print("attempting reboot on %s..." % port)
up.send_reboot()
# wait for the reboot, without we might run into Serial I/O Error 5
time.sleep(0.5)
continue
try:
# ok, we have a bootloader, try flashing it
up.upload(fw)
except RuntimeError as ex:
# print the error
print("ERROR: %s" % ex.args)
finally:
# always close the port
up.close()
# we could loop here if we wanted to wait for more boards...
sys.exit(0)
|
jlnaudin/x-VTOLdrone
|
PixHawk_PX4/PX4Firmware/Tools/px_uploader.py
|
Python
|
gpl-3.0
| 24,258
|
from rule import Rule
from opcodes import *
"""
Rule:
mask = shlWorkaround(u256(-1) >> unsigned(A.d()), unsigned(B.d()))
SHL(B, SHR(A, X)) -> AND(SH[L/R]([B - A / A - B], X), Mask)
Requirements:
A < BitWidth
B < BitWidth
"""
rule = Rule()
n_bits = 64
# Input vars
X = BitVec('X', n_bits)
A = BitVec('A', n_bits)
B = BitVec('B', n_bits)
# Constants
BitWidth = BitVecVal(n_bits, n_bits)
# Requirements
rule.require(ULT(A, BitWidth))
rule.require(ULT(B, BitWidth))
# Non optimized result
nonopt = SHL(B, SHR(A, X))
# Optimized result
Mask = SHL(B, SHR(A, Int2BV(IntVal(-1), n_bits)))
opt = If(
UGT(A, B),
AND(SHR(A - B, X), Mask),
If(
UGT(B, A),
AND(SHL(B - A, X), Mask),
AND(X, Mask)
)
)
rule.check(nonopt, opt)
|
winsvega/solidity
|
test/formal/combine_shr_shl_by_constant_64.py
|
Python
|
gpl-3.0
| 735
|
# Copyright (C) 2015 Equinor ASA, Norway.
#
# The file 'test_field_config.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
import os
from res.enkf import FieldConfig
from res.enkf import ActiveList
from tests import ResTest
from ecl.grid import EclGridGenerator
class FieldConfigTest(ResTest):
def test_create(self):
grid = EclGridGenerator.create_rectangular( (10,10,5) , (1,1,1) )
field_config = FieldConfig("SWAT" , grid)
|
Statoil/libres
|
python/tests/res/enkf/data/test_field_config.py
|
Python
|
gpl-3.0
| 1,005
|
# -*- coding: utf-8 -*-
"""
Cached Information
~~~~~~~~~~~~~~~~~~
Classes and methods to maintain any information that is stored
outside the doctree.
.. autoclass:: Cache
:members:
.. autoclass:: BibfileCache
:members:
.. autoclass:: BibliographyCache
:members:
"""
import collections
from oset import oset
import pybtex.database
class Cache:
"""Global bibtex extension information cache. Stored in
``app.env.bibtex_cache``, so must be picklable.
.. attribute:: bibfiles
A :class:`dict` mapping .bib file names (relative to the top
source folder) to :class:`BibfileCache` instances.
.. attribute:: bibliographies
Each bibliography directive is assigned an id of the form
bibtex-bibliography-xxx. This :class:`dict` maps each such id
to information about the bibliography directive,
:class:`BibliographyCache`. We need to store this extra
information separately because it cannot be stored in the
:class:`~sphinxcontrib.bibtex.nodes.bibliography` nodes
themselves.
.. attribute:: _cited
A :class:`dict` mapping each docname to a :class:`set` of
citation keys.
.. attribute:: _enum_count
A :class:`dict` mapping each docname to an :class:`int`
representing the current bibliography enumeration counter.
"""
def __init__(self):
self.bibfiles = {}
self.bibliographies = {}
self._cited = collections.defaultdict(oset)
self._enum_count = {}
def purge(self, docname):
"""Remove all information related to *docname*.
:param docname: The document name.
:type docname: :class:`str`
"""
ids = [id_ for id_, info in self.bibliographies.iteritems()
if info.docname == docname]
for id_ in ids:
del self.bibliographies[id_]
self._cited.pop(docname, None)
self._enum_count.pop(docname, None)
def inc_enum_count(self, docname):
if docname in self._enum_count:
self._enum_count[docname] += 1
else:
self._enum_count[docname] = 2
def set_enum_count(self, docname, value):
self._enum_count[docname] = value
def get_enum_count(self, docname):
if docname in self._enum_count:
return self._enum_count[docname]
else:
return 1
def add_cited(self, key, docname):
"""Add the given *key* to the set of cited keys for
*docname*.
:param key: The citation key.
:type key: :class:`str`
:param docname: The document name.
:type docname: :class:`str`
"""
self._cited[docname].add(key)
def is_cited(self, key):
"""Return whether the given key is cited in any document.
:param key: The citation key.
:type key: :class:`str`
"""
for docname, keys in self._cited.iteritems():
if key in keys:
return True
return False
def get_label_from_key(self, key):
"""Return label for the given key."""
for info in self.bibliographies.itervalues():
if key in info.labels:
return info.labels[key]
else:
raise KeyError("%s not found" % key)
def get_all_cited_keys(self):
"""Yield all citation keys, sorted first by document
(alphabetical), then by citation order in the document.
"""
for docname in sorted(self._cited):
for key in self._cited[docname]:
yield key
class BibfileCache:
"""Contains information about a parsed .bib file.
.. attribute:: mtime
A :class:`float` representing the modification time of the .bib
file when it was last parsed.
.. attribute:: data
A :class:`pybtex.database.BibliographyData` containing the
parsed .bib file.
"""
def __init__(self, mtime=None, data=None):
self.mtime = mtime if mtime is not None else -float("inf")
self.data = (data if data is not None
else pybtex.database.BibliographyData())
class BibliographyCache:
"""Contains information about a bibliography directive.
.. attribute:: docname
A :class:`str` containing the name of the document in which
the directive occurs. We need this information during the
Sphinx event *env-purge-doc*.
.. attribute:: bibfiles
A :class:`list` of :class:`str`\ s containing the .bib file
names (relative to the top source folder) that contain the
references.
.. attribute:: cite
A :class:`str`. Should be one of:
``"cited"``
Only generate cited references.
``"notcited"``
Only generated non-cited references.
``"all"``
Generate all references from the .bib files.
.. attribute:: style
The bibtex style.
.. attribute:: list_
The list type.
.. attribute:: enumtype
The sequence type (only used for enumerated lists).
.. attribute:: start
The first ordinal of the sequence (only used for enumerated lists).
.. attribute:: labels
Maps citation keys to their final labels.
.. attribute:: labelprefix
This bibliography's string prefix for pybtex generated labels.
"""
def __init__(self, docname=None, bibfiles=None,
cite="cited", style=None,
list_="citation", enumtype="arabic", start=1,
labels=None,
encoding=None,
curly_bracket_strip=True,
labelprefix="",
):
self.docname = docname
self.bibfiles = bibfiles if bibfiles is not None else []
self.cite = cite
self.style = style
self.list_ = list_
self.enumtype = enumtype
self.start = start
self.encoding = encoding
self.curly_bracket_strip = curly_bracket_strip
self.labels = labels if labels is not None else {}
self.labelprefix = labelprefix
|
talbrecht/pism_pik07
|
doc/site-packages/sphinxcontrib/bibtex/cache.py
|
Python
|
gpl-3.0
| 6,197
|
# This file is a part of MediaDrop (http://www.mediadrop.net),
# Copyright 2009-2014 MediaDrop contributors
# For the exact contribution history, see the git revision log.
# The source code contained in this file is licensed under the GPLv3 or
# (at your option) any later version.
# See LICENSE.txt in the main project directory, for more information.
"""add anonymous and authenticated groups
create groups for "anonymous" and "authenticated" users
added: 2012-12-11 (v0.10dev)
previously migrate script v055
Revision ID: 16ed4c91d1aa
Revises: 280565a54124
Create Date: 2013-05-14 22:38:25.194543
"""
# revision identifiers, used by Alembic.
revision = '16ed4c91d1aa'
down_revision = '280565a54124'
from datetime import datetime
from alembic.op import execute, inline_literal
from sqlalchemy import Column, MetaData, Table
from sqlalchemy import DateTime, ForeignKey, Integer, Unicode, UnicodeText
# -- table definition ---------------------------------------------------------
metadata = MetaData()
groups = Table('groups', metadata,
Column('group_id', Integer, autoincrement=True, primary_key=True),
Column('group_name', Unicode(16), unique=True, nullable=False),
Column('display_name', Unicode(255)),
Column('created', DateTime, default=datetime.now),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
def add_group(group_name, display_name):
execute(
groups.insert().\
values({
'group_name': inline_literal(group_name),
'display_name': inline_literal(display_name),
})
)
def upgrade():
add_group(group_name=u'anonymous', display_name=u'Everyone (including guests)')
add_group(group_name=u'authenticated', display_name=u'Logged in users')
def downgrade():
execute(
groups.delete().\
where(groups.c.group_name.in_([u'anonymous', u'authenticated']))
)
# assignments of users to 'anonymous' and 'authenticated' are deleted
# automatically because of existing ForeignKey constraint in the DB
# (ON DELETE CASCADE ON UPDATE CASCADE)
|
kgao/MediaDrop
|
mediadrop/migrations/versions/005-16ed4c91d1aa-add_anonymous_and_authenticated_groups.py
|
Python
|
gpl-3.0
| 2,087
|
import click
from .utils import init as _init
from .utils import setup_env as _setup_env
from .utils import new_site as _new_site
from .utils import setup_backups as _setup_backups
from .utils import setup_auto_update as _setup_auto_update
from .utils import setup_sudoers as _setup_sudoers
from .utils import start as _start
from .utils import setup_procfile as _setup_procfile
from .utils import set_nginx_port as _set_nginx_port
from .utils import set_url_root as _set_url_root
from .utils import set_default_site as _set_default_site
from .utils import (build_assets, patch_sites, exec_cmd, update_bench, get_frappe, setup_logging,
get_config, update_config, restart_supervisor_processes, put_config, default_config, update_requirements,
backup_all_sites, backup_site, get_sites, prime_wheel_cache, is_root, set_mariadb_host, drop_privileges,
fix_file_perms, fix_prod_setup_perms, set_ssl_certificate, set_ssl_certificate_key)
from .app import get_app as _get_app
from .app import new_app as _new_app
from .app import pull_all_apps
from .config import generate_nginx_config, generate_supervisor_config
from .production_setup import setup_production as _setup_production
from .migrate_to_v5 import migrate_to_v5
import os
import sys
import logging
import copy
import pwd
import grp
logger = logging.getLogger('bench')
def cli():
check_uid()
change_dir()
change_uid()
if len(sys.argv) > 2 and sys.argv[1] == "frappe":
return frappe()
return bench()
def cmd_requires_root():
if len(sys.argv) > 2 and sys.argv[2] in ('production', 'sudoers'):
return True
if len(sys.argv) > 2 and sys.argv[1] in ('patch',):
return True
def check_uid():
if cmd_requires_root() and not is_root():
print 'superuser privileges required for this command'
sys.exit(1)
def change_uid():
if is_root() and not cmd_requires_root():
frappe_user = get_config().get('frappe_user')
if frappe_user:
drop_privileges(uid_name=frappe_user, gid_name=frappe_user)
os.environ['HOME'] = pwd.getpwnam(frappe_user).pw_dir
else:
print 'You should not run this command as root'
sys.exit(1)
def change_dir():
if os.path.exists('config.json'):
return
dir_path_file = '/etc/frappe_bench_dir'
if os.path.exists(dir_path_file):
with open(dir_path_file) as f:
dir_path = f.read().strip()
if os.path.exists(dir_path):
os.chdir(dir_path)
def frappe(bench='.'):
f = get_frappe(bench=bench)
os.chdir(os.path.join(bench, 'sites'))
os.execv(f, [f] + sys.argv[2:])
@click.command()
def shell(bench='.'):
if not os.environ.get('SHELL'):
print "Cannot get shell"
sys.exit(1)
if not os.path.exists('sites'):
print "sites dir doesn't exist"
sys.exit(1)
env = copy.copy(os.environ)
env['PS1'] = '(' + os.path.basename(os.path.dirname(os.path.abspath(__file__))) + ')' + env.get('PS1', '')
env['PATH'] = os.path.dirname(os.path.abspath(os.path.join('env','bin')) + ':' + env['PATH'])
os.chdir('sites')
os.execve(env['SHELL'], [env['SHELL']], env)
@click.group()
def bench(bench='.'):
"Bench manager for Frappe"
# TODO add bench path context
setup_logging(bench=bench)
@click.command()
@click.argument('path')
@click.option('--apps_path', default=None, help="path to json files with apps to install after init")
@click.option('--frappe-path', default=None, help="path to frappe repo")
@click.option('--frappe-branch', default=None, help="path to frappe repo")
@click.option('--no-procfile', flag_value=True, type=bool, help="Pull changes in all the apps in bench")
@click.option('--no-backups',flag_value=True, type=bool, help="Run migrations for all sites in the bench")
@click.option('--no-auto-update',flag_value=True, type=bool, help="Build JS and CSS artifacts for the bench")
def init(path, apps_path, frappe_path, frappe_branch, no_procfile, no_backups,
no_auto_update):
"Create a new bench"
_init(path, apps_path=apps_path, no_procfile=no_procfile, no_backups=no_backups,
no_auto_update=no_auto_update, frappe_path=frappe_path, frappe_branch=frappe_branch)
click.echo('Bench {} initialized'.format(path))
@click.command('get-app')
@click.argument('name')
@click.argument('git-url')
@click.option('--branch', default=None, help="branch to checkout")
def get_app(name, git_url, branch):
"clone an app from the internet and set it up in your bench"
_get_app(name, git_url, branch=branch)
@click.command('new-app')
@click.argument('app-name')
def new_app(app_name):
"start a new app"
_new_app(app_name)
@click.command('new-site')
@click.option('--mariadb-root-password', help="MariaDB root password")
@click.option('--admin-password', help="admin password to set for site")
@click.argument('site')
def new_site(site, mariadb_root_password=None, admin_password=None):
"Create a new site in the bench"
_new_site(site, mariadb_root_password=mariadb_root_password, admin_password=admin_password)
#TODO: Not DRY
@click.command('update')
@click.option('--pull', flag_value=True, type=bool, help="Pull changes in all the apps in bench")
@click.option('--patch',flag_value=True, type=bool, help="Run migrations for all sites in the bench")
@click.option('--build',flag_value=True, type=bool, help="Build JS and CSS artifacts for the bench")
@click.option('--bench',flag_value=True, type=bool, help="Update bench")
@click.option('--requirements',flag_value=True, type=bool, help="Update requirements")
@click.option('--restart-supervisor',flag_value=True, type=bool, help="restart supervisor processes after update")
@click.option('--auto',flag_value=True, type=bool)
@click.option('--no-backup',flag_value=True, type=bool)
def update(pull=False, patch=False, build=False, bench=False, auto=False, restart_supervisor=False, requirements=False, no_backup=False):
"Update bench"
if not (pull or patch or build or bench or requirements):
pull, patch, build, bench, requirements = True, True, True, True, True
conf = get_config()
if conf.get('release_bench'):
print 'Release bench, cannot update'
sys.exit(1)
if auto:
sys.exit(1)
if bench and conf.get('update_bench_on_update'):
update_bench()
restart_update({
'pull': pull,
'patch': patch,
'build': build,
'requirements': requirements,
'no-backup': no_backup,
'restart-supervisor': restart_supervisor
})
if pull:
pull_all_apps()
if requirements:
update_requirements()
if patch:
if not no_backup:
backup_all_sites()
patch_sites()
if build:
build_assets()
if restart_supervisor or conf.get('restart_supervisor_on_update'):
restart_supervisor_processes()
print "_"*80
print "https://frappe.io/buy - Donate to help make better free and open source tools"
print
def restart_update(kwargs):
args = ['--'+k for k, v in kwargs.items() if v]
os.execv(sys.argv[0], sys.argv[:2] + args)
@click.command('restart')
def restart():
"Restart supervisor processes"
restart_supervisor_processes()
@click.command('start')
def start():
"Start Frappe development processes"
_start()
@click.command('migrate-3to4')
@click.argument('path')
def migrate_3to4(path):
"Migrate from ERPNext v3.x"
exec_cmd("{python} {migrate_3to4} {site}".format(
python=os.path.join('env', 'bin', 'python'),
migrate_3to4=os.path.join(os.path.dirname(__file__), 'migrate3to4.py'),
site=path))
@click.command('migrate-to-v5')
def _migrate_to_v5(bench='.'):
"Migrate to Version 5"
click.echo("This will migrate all sites in the bench to version 5. Version 5 is still work in progress and NOT STABLE.")
if click.confirm("This is irreversible. Do you want to continue?", abort=True):
migrate_to_v5(bench=bench)
@click.command('set-nginx-port')
@click.argument('site')
@click.argument('port', type=int)
def set_nginx_port(site, port):
"Set nginx port for site"
_set_nginx_port(site, port)
@click.command('set-ssl-certificate')
@click.argument('site')
@click.argument('ssl-certificate-path')
def _set_ssl_certificate(site, ssl_certificate_path):
"Set ssl certificate path for site"
set_ssl_certificate(site, ssl_certificate_path)
@click.command('set-ssl-key')
@click.argument('site')
@click.argument('ssl-certificate-key-path')
def _set_ssl_certificate_key(site, ssl_certificate_key_path):
"Set ssl certificate private key path for site"
set_ssl_certificate_key(site, ssl_certificate_key_path)
@click.command('set-url-root')
@click.argument('site')
@click.argument('url-root')
def set_url_root(site, url_root):
"Set url root for site"
_set_url_root(site, url_root)
@click.command('set-mariadb-host')
@click.argument('host')
def _set_mariadb_host(host):
"Set MariaDB host for bench"
set_mariadb_host(host)
@click.command('set-default-site')
@click.argument('site')
def set_default_site(site):
"Set default site for bench"
_set_default_site(site)
@click.command('backup')
@click.argument('site')
def _backup_site(site):
"backup site"
if not site in get_sites(bench='.'):
print 'site not found'
sys.exit(1)
backup_site(site, bench='.')
@click.command('backup-all-sites')
def _backup_all_sites():
"backup all sites"
backup_all_sites(bench='.')
@click.command('prime-wheel-cache')
def _prime_wheel_cache():
"Update wheel cache"
prime_wheel_cache(bench='.')
@click.command('release')
@click.argument('app', type=click.Choice(['frappe', 'erpnext', 'shopping_cart']))
@click.argument('bump-type', type=click.Choice(['major', 'minor', 'patch']))
def _release(app, bump_type):
"Release app (internal to the Frappe team)"
from .release import release
repo = os.path.join('apps', app)
release(repo, bump_type)
## Setup
@click.group()
def setup():
"Setup bench"
pass
@click.command('sudoers')
@click.argument('user')
def setup_sudoers(user):
"Add commands to sudoers list for execution without password"
_setup_sudoers(user)
@click.command('nginx')
def setup_nginx():
"generate config for nginx"
generate_nginx_config()
@click.command('supervisor')
def setup_supervisor():
"generate config for supervisor"
generate_supervisor_config()
@click.command('production')
@click.argument('user')
def setup_production(user):
"setup bench for production"
_setup_production(user=user)
@click.command('auto-update')
def setup_auto_update():
"Add cronjob for bench auto update"
_setup_auto_update()
@click.command('backups')
def setup_backups():
"Add cronjob for bench backups"
_setup_backups()
@click.command('dnsmasq')
def setup_dnsmasq():
pass
@click.command('env')
def setup_env():
"Setup virtualenv for bench"
_setup_env()
@click.command('procfile')
def setup_procfile():
"Setup Procfile for bench start"
_setup_procfile()
@click.command('config')
def setup_config():
"overwrite or make config.json"
put_config(default_config)
setup.add_command(setup_nginx)
setup.add_command(setup_sudoers)
setup.add_command(setup_supervisor)
setup.add_command(setup_auto_update)
setup.add_command(setup_dnsmasq)
setup.add_command(setup_backups)
setup.add_command(setup_env)
setup.add_command(setup_procfile)
setup.add_command(setup_config)
setup.add_command(setup_production)
## Config
## Not DRY
@click.group()
def config():
"change bench configuration"
pass
@click.command('auto_update')
@click.argument('state', type=click.Choice(['on', 'off']))
def config_auto_update(state):
"Enable/Disable auto update for bench"
state = True if state == 'on' else False
update_config({'auto_update': state})
@click.command('restart_supervisor_on_update')
@click.argument('state', type=click.Choice(['on', 'off']))
def config_restart_supervisor_on_update(state):
"Enable/Disable auto restart of supervisor processes"
state = True if state == 'on' else False
update_config({'restart_supervisor_on_update': state})
@click.command('update_bench_on_update')
@click.argument('state', type=click.Choice(['on', 'off']))
def config_update_bench_on_update(state):
"Enable/Disable bench updates on running bench update"
state = True if state == 'on' else False
update_config({'update_bench_on_update': state})
@click.command('dns_multitenant')
@click.argument('state', type=click.Choice(['on', 'off']))
def config_dns_multitenant(state):
"Enable/Disable bench updates on running bench update"
state = True if state == 'on' else False
update_config({'dns_multitenant': state})
@click.command('serve_default_site')
@click.argument('state', type=click.Choice(['on', 'off']))
def config_serve_default_site(state):
"Configure nginx to serve the default site on port 80"
state = True if state == 'on' else False
update_config({'serve_default_site': state})
@click.command('rebase_on_pull')
@click.argument('state', type=click.Choice(['on', 'off']))
def config_rebase_on_pull(state):
"Rebase repositories on pulling"
state = True if state == 'on' else False
update_config({'rebase_on_pull': state})
@click.command('http_timeout')
@click.argument('seconds', type=int)
def config_http_timeout(seconds):
"set http timeout"
update_config({'http_timeout': seconds})
config.add_command(config_auto_update)
config.add_command(config_update_bench_on_update)
config.add_command(config_restart_supervisor_on_update)
config.add_command(config_dns_multitenant)
config.add_command(config_serve_default_site)
config.add_command(config_http_timeout)
@click.group()
def patch():
pass
@click.command('fix-prod-perms')
def _fix_prod_perms():
"Fix permissions if supervisor processes were run as root"
if os.path.exists("config/supervisor.conf"):
exec_cmd("supervisorctl stop frappe:")
fix_prod_setup_perms()
if os.path.exists("config/supervisor.conf"):
exec_cmd("{bench} setup supervisor".format(bench=sys.argv[0]))
exec_cmd("supervisorctl reload")
@click.command('fix-file-perms')
def _fix_file_perms():
"Fix file permissions"
fix_file_perms()
patch.add_command(_fix_file_perms)
patch.add_command(_fix_prod_perms)
#Bench commands
bench.add_command(init)
bench.add_command(get_app)
bench.add_command(new_app)
bench.add_command(new_site)
bench.add_command(setup)
bench.add_command(update)
bench.add_command(restart)
bench.add_command(config)
bench.add_command(start)
bench.add_command(set_nginx_port)
bench.add_command(_set_ssl_certificate)
bench.add_command(_set_ssl_certificate_key)
bench.add_command(_set_mariadb_host)
bench.add_command(set_default_site)
bench.add_command(migrate_3to4)
bench.add_command(shell)
bench.add_command(_backup_all_sites)
bench.add_command(_backup_site)
bench.add_command(_prime_wheel_cache)
bench.add_command(_release)
bench.add_command(patch)
bench.add_command(set_url_root)
bench.add_command(_migrate_to_v5)
|
saurabh6790/phr-bench-repo
|
bench/cli.py
|
Python
|
gpl-3.0
| 14,438
|
# -*- coding: utf-8 -*-
"""
Public API for Panucci
This module provides enough functionality to create a frontend to control
Panucci as well as exporting a large portion of the Panucci API over D-Bus.
"""
from __future__ import absolute_import
import dbus
import logging
from panucci import player
from panucci import services
class PanucciAPI(services.ForwardingObservableService, dbus.service.Object):
"""
Panucci's public API for use in frontend development.
Signal descriptions
"playing" : ()
Emitted when the file starts playing.
"paused" : ()
Emitted when the file is paused.
"stopped" : ()
Emitted when the file is stopped.
"end-of-file" : ()
Emitted when the player reaches the end of the file.
"end-of-playlist" : ()
Emitted when the player reaches the end of the playlist.
"new-track-loaded" : ()
Emitted when the player changes track.
"new-metadata-available" : ()
Emitted when new metadata for the current track is available. Use
the get_metadata function to retrieve it.
"playlist-to-be-overwritten" : ()
Emitted when a playlist is about to be over-written. If the function
returns True the playlist will be over-written. Otherwise the
over-write will be aborted.
"item-added" : ( item-id )
Emitted when an item is added to the playlist.
"item-removed" : ( item-id )
"item-moved" : ( item-id, from-pos, to-pos )
"bookmark-added" : ( bookmark-id )
"bookmark-removed" : ( bookmark-id )
"""
player_signals = {
"new-metadata-available" : "new-metadata-available",
"playlist-to-be-overwritten": "playlist-to-be-overwritten",
"end-of-playlist" : "end-of-playlist",
"item-removed" : "item-removed",
"item-moved" : "item-moved",
"bookmark-added" : "bookmark-added",
"new-track-loaded" : "new-track-loaded",
"bookmark-removed" : "bookmark-removed",
"item-added" : "item-added", }
playlist_signals = { "playing" : "playing",
"paused" : "paused",
"end-of-file" : "eof",
"stopped" : "stopped", }
signals = player_signals.keys() + playlist_signals.keys()
def __init__(self):
self.__log = logging.getLogger("panucci.api.PanucciAPI")
services.ForwardingObservableService.__init__( self,
self.signals,
self.__log )
self.__player = player.player
self.__playlist = self.__player.playlist
self.forward( self.__player, self.player_signals )
self.forward( self.__playlist, self.playlist_signals )
def ready(self):
""" Can be called by the frontend when it's initialized. This loads
the last played track then sends the appropriate signals to
populate the frontend's UI (namely: new-track-loaded and
new-metadata-available) """
self.__player.init()
def quit(self):
""" Should be called when the user exits the application. This stops
the player and creates a resume bookmark. """
self.__player.quit()
def play(self):
""" Starts playing the current track in the playlist. Does nothing if
the playlist is empty or if the player is already playing. """
return self.__player.play()
def pause(self):
""" Pauses the current track. Does nothing if the playlist is empty or
the track is paused or stopped. """
return self.__player.pause()
def play_pause_toggle(self):
""" Calls play() if the player is paused, calls pause() if the player
is playing, does nothing if the player is stopped."""
return self.__player.play_pause_toggle()
def stop(self):
""" Stop the currently playing (or paused) track. """
return self.__player.stop()
def next_track(self, loop=False):
""" Changes to the next track in the playlist. If "loop" is set to
True, then when the end of the playlist is reached it will loop
to the beginning and continue playing. """
return self.__playlist.skip( skip_by=1, loop=loop )
def previous_track(self, loop=False):
""" Changes to the previous track in the playlist. If "loop" is set to
True and the current track is the first in the playlist then this
function will skip to the last track in the playlist. """
return self.__playlist.skip( skip_by=-1, loop=loop )
def seek(self, from_beginning=None, from_current=None, percent=None ):
""" A very flexible function to seek in the current file
Params: Requires ONE of the following keyword arguments
- from_beginning=n: seek n nanoseconds from the start of
the file
- from_current=n: seek n nanoseconds from the current
position
- percent=n: seek n percent from the beginning of the file
Returns: False if the seek was NOT possible
( position, duration ) if the seek was possible
"""
return self.__player.do_seek( from_beginning = from_beginning,
from_current = from_current,
percent = percent )
def get_position_duration(self):
""" Returns the position in the current file and the duration in
nanoseconds (10**-9). Returns ( -1, -1 ) if seeking is not possible
or if no file is loaded. """
return self.__player.get_position_duration()
def get_metadata(self):
""" Returns a dict containing metadata related to the current file
being played. It can contain the following keys:
"title" - The title of the track
"artist" - The artist tag for the track
"album" - The album tag for the track
"coverart" - A binary blob representing an image
"length" - The length in nanoseconds of the track
"""
return self.__playlist.get_file_metadata()
def play_uri(self, uri):
""" Erase the playlist and start playing URI. The only supported URI
at the moment is file:// followed by an absolute path.
If the playlist has been modified since it was loaded a
"playlist-to-be-overwritten" signal will be emitted. """
return self.__playlist.load(uri)
def play_directory(self, dirpath):
""" Same as play_uri except dirpath is just an absolute path to a local
directory. All the files in the directory will be loaded. """
return self.__playlist.load_directory(dirpath)
def queue_uri(self, uri):
""" Adds a URI to the end of the playlist, see play_file for supported
URIs. If the playlist is empty the file will start playing. """
return self.__playlist.append(uri)
def queue_directory(self, dirpath):
""" Same as queue_uri except dirpath is just an absolute path to a local
directory. All the files in the directory will be queued. """
return self.__playlist.load_directory( dirpath, append=True )
def add_bookmark_at_current_position( self, name=None ):
""" Adds a bookmark at the current position in the track. The
bookmark's name will be a formatted version of the position
(hh:mm:ss). If a name is provided it will be used instead. """
return self.__player.add_bookmark_at_current_position( label=name )
def get_playlist_item_data( self, item_id ):
""" """
def remove_bookmark(self, bookmark_id):
""" """
def get_recent_files(self):
""" """
def show_main_window(self):
""" """
|
gpodder/panucci
|
src/panucci/api.py
|
Python
|
gpl-3.0
| 8,323
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from selenium.webdriver.common.by import By
from pages.base import BasePage
from pages.regions.download_button import DownloadButton
from pages.regions.modal import ModalProtocol
class PlatformDownloadPage(BasePage):
URL_TEMPLATE = '/{locale}/firefox/{slug}/'
_download_button_locator = (By.ID, 'download-button-desktop-release')
_platforms_modal_link_locator = (By.CLASS_NAME, 'js-platform-modal-button')
_platforms_modal_content_locator = (By.CLASS_NAME, 'mzp-u-modal-content')
@property
def download_button(self):
el = self.find_element(*self._download_button_locator)
return DownloadButton(self, root=el)
def download_firefox(self):
self.download_button.click()
from pages.firefox.new.thank_you import ThankYouPage
return ThankYouPage(self.selenium, self.base_url).wait_for_page_to_load()
def open_other_platforms_modal(self):
modal = ModalProtocol(self)
self.find_element(*self._platforms_modal_link_locator).click()
self.wait.until(lambda s: modal.displays(self._platforms_modal_content_locator))
return modal
|
ericawright/bedrock
|
tests/pages/firefox/new/platform.py
|
Python
|
mpl-2.0
| 1,328
|
from setuptools import find_packages, setup
def findRequirements():
"""
Read the requirements.txt file and parse into requirements for setup's
install_requirements option.
"""
return [line.strip()
for line in open('requirements.txt').readlines()
if not line.startswith('#')]
setup(name='cloudbrain_examples',
version='0.0.1',
description='CloudBrain Examples',
author='Marion Le Borgne',
url='https://github.com/cloudbrain/cloudbrain-examples',
packages=find_packages(),
install_requires=findRequirements(),
license=open('LICENSE.txt').read(),
long_description=open('README.md').read()
)
|
marionleborgne/cloudbrain_examples
|
setup.py
|
Python
|
agpl-3.0
| 677
|
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Luis Felipe Miléo - KMEE
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
import urllib
import urllib2
import json
from collections import namedtuple
WS_SERVICOS = 0
WS_PRODUTOS = 1
WS_IBPT = {
WS_SERVICOS: 'http://iws.ibpt.org.br/api/deolhonoimposto/Servicos/?',
WS_PRODUTOS: 'http://iws.ibpt.org.br/api/deolhonoimposto/Produtos/?',
}
DeOlhoNoImposto = namedtuple('Config', 'token cnpj uf')
def _convert(dictionary):
return namedtuple('Result', dictionary.keys())(**dictionary)
def _response_to_dict(response):
json_acceptable_string = response.replace("'", "\"").lower()
return json.loads(json_acceptable_string)
def _request(req):
try:
response = urllib2.urlopen(req)
data = _response_to_dict(response.read())
return _convert(data)
except urllib2.HTTPError, e:
from odoo import _
from odoo.exceptions import Warning as UserError
raise UserError(_('Error in the request: {0}'.format(e)))
def get_ibpt_product(config, ncm, ex='0', reference=None, description=None,
uom=None, amount=None, gtin=None):
data = urllib.urlencode({
'token': config.token,
'cnpj': config.cnpj,
'uf': config.uf,
'codigo': ncm,
'ex': ex,
'codigoInterno': reference,
'descricao': description,
'unidadeMedida': uom,
'valor': amount,
'gtin': gtin,
})
req = urllib2.Request(WS_IBPT[WS_PRODUTOS] + data)
return _request(req)
def get_ibpt_service(config, nbs, description=None, uom=None, amount=None):
data = urllib.urlencode({
'token': config.token,
'cnpj': config.cnpj,
'uf': config.uf,
'codigo': nbs,
'descricao': description,
'unidadeMedida': uom,
'valor': amount,
})
req = urllib2.Request(WS_IBPT[WS_SERVICOS] + data)
return _request(req)
|
odoo-brazil/l10n-brazil-wip
|
l10n_br_account/sped/ibpt/deolhonoimposto.py
|
Python
|
agpl-3.0
| 1,962
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# AvanzOSC, Avanzed Open Source Consulting
# Copyright (C) 2011-2012 Iker Coranti (www.avanzosc.com). All Rights Reserved
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
import wiz_cpostal_region
|
jaumemarti/l10n-spain-txerpa
|
__unported__/l10n_es_toponyms_region/wizard/__init__.py
|
Python
|
agpl-3.0
| 1,013
|
# -*- encoding: utf-8 -*-
import re
import string
from unicodedata import normalize
from lxml import etree
from lxml.etree import ElementTree, Element, SubElement
from datetime import datetime
import time
import netsvc
from osv import fields, osv
import decimal_precision as dp
from tools.translate import _
class account_invoice(osv.osv):
_inherit = 'account.invoice'
def _amount_all(self, cr, uid, ids, name, args, context=None):
obj_precision = self.pool.get('decimal.precision')
prec = obj_precision.precision_get(cr, uid, 'Account')
res = {}
for invoice in self.browse(cr, uid, ids, context=context):
res[invoice.id] = {
'amount_untaxed': 0.0,
'amount_tax': 0.0,
'amount_tax_discount': 0.0,
'amount_total': 0.0,
'icms_base': 0.0,
'icms_value': 0.0,
'icms_st_base': 0.0,
'icms_st_value': 0.0,
'ipi_base': 0.0,
'ipi_value': 0.0,
'pis_base': 0.0,
'pis_value': 0.0,
'cofins_base': 0.0,
'cofins_value': 0.0,
}
for line in invoice.invoice_line:
res[invoice.id]['amount_untaxed'] += line.price_total
res[invoice.id]['icms_base'] += line.icms_base
res[invoice.id]['icms_value'] += line.icms_value
res[invoice.id]['icms_st_base'] += line.icms_st_base
res[invoice.id]['icms_st_value'] += line.icms_st_value
res[invoice.id]['ipi_base'] += line.ipi_base
res[invoice.id]['ipi_value'] += line.ipi_value
res[invoice.id]['pis_base'] += line.pis_base
res[invoice.id]['pis_value'] += line.pis_value
res[invoice.id]['cofins_base'] += line.cofins_base
res[invoice.id]['cofins_value'] += line.cofins_value
for invoice_tax in invoice.tax_line:
if not invoice_tax.tax_code_id.tax_include:
res[invoice.id]['amount_tax'] += invoice_tax.amount
res[invoice.id]['amount_total'] = res[invoice.id]['amount_tax'] + res[invoice.id]['amount_untaxed']
return res
def _get_fiscal_type(self, cr, uid, context=None):
if context is None:
context = {}
return context.get('fiscal_type', 'product')
def fields_view_get(self, cr, uid, view_id=None, view_type=False, context=None, toolbar=False, submenu=False):
result = super(account_invoice, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
if context is None:
context = {}
field_names = ['service_type_id']
result['fields'].update(self.fields_get(cr, uid, field_names, context))
if not view_type:
view_id = self.pool.get('ir.ui.view').search(cr, uid, [('name', '=', 'account.invoice.tree')])
view_type = 'tree'
if view_type == 'form':
eview = etree.fromstring(result['arch'])
if 'type' in context.keys():
operation_type = {'out_invoice': 'output', 'in_invoice': 'input', 'out_refund': 'input', 'in_refund': 'output'}
types = eview.xpath("//field[@name='invoice_line']")
for type in types:
type.set('context', "{'type': '%s', 'fiscal_type': '%s'}" % (context['type'], context.get('fiscal_type', 'product'), ))
cfops = eview.xpath("//field[@name='cfop_id']")
for cfop_id in cfops:
cfop_id.set('domain', "[('type', '=', '%s')]" % (operation_type[context['type']], ))
cfop_id.set('required', '1')
fiscal_operation_categories = eview.xpath("//field[@name='fiscal_operation_category_id']")
for fiscal_operation_category_id in fiscal_operation_categories:
fiscal_operation_category_id.set('domain', "[('fiscal_type', '=', 'product'), ('type', '=', '%s'), ('use_invoice', '=', True)]" % (operation_type[context['type']], ))
fiscal_operation_category_id.set('required', '1')
fiscal_operations = eview.xpath("//field[@name='fiscal_operation_id']")
for fiscal_operation_id in fiscal_operations:
fiscal_operation_id.set('domain', "[('fiscal_type', '=', 'product'), ('type', '=', '%s'), ('fiscal_operation_category_id', '=', fiscal_operation_category_id), ('use_invoice', '=', True)]" % (operation_type[context['type']], ))
fiscal_operation_id.set('required', '1')
if context.get('fiscal_type', False) == 'service':
delivery_infos = eview.xpath("//group[@name='delivery_info']")
for delivery_info in delivery_infos:
delivery_info.set('invisible', '1')
cfops = eview.xpath("//field[@name='cfop_id']")
for cfop_id in cfops:
cfop_id.set('name', 'service_type_id')
cfop_id.set('domain', '[]')
document_series = eview.xpath("//field[@name='document_serie_id']")
for document_serie_id in document_series:
document_serie_id.set('domain', "[('fiscal_type', '=', 'service')]")
if context['type'] in ('in_invoice', 'out_refund'):
fiscal_operation_categories = eview.xpath("//field[@name='fiscal_operation_category_id']")
for fiscal_operation_category_id in fiscal_operation_categories:
fiscal_operation_category_id.set('domain', "[('fiscal_type', '=', 'service'), ('type', '=', 'input'), ('use_invoice', '=', True)]")
fiscal_operation_category_id.set('required', '1')
fiscal_operations = eview.xpath("//field[@name='fiscal_operation_id']")
for fiscal_operation_id in fiscal_operations:
fiscal_operation_id.set('domain', "[('fiscal_type', '=', 'service'), ('type', '=', 'input'), ('fiscal_operation_category_id', '=', fiscal_operation_category_id), ('use_invoice', '=', True)]")
fiscal_operation_id.set('required', '1')
if context['type'] in ('out_invoice', 'in_refund'):
fiscal_operation_categories = eview.xpath("//field[@name='fiscal_operation_category_id']")
for fiscal_operation_category_id in fiscal_operation_categories:
fiscal_operation_category_id.set('domain', "[('fiscal_type', '=', 'service'), ('type', '=', 'output'), ('use_invoice', '=', True)]")
fiscal_operation_category_id.set('required', '1')
fiscal_operations = eview.xpath("//field[@name='fiscal_operation_id']")
for fiscal_operation_id in fiscal_operations:
fiscal_operation_id.set('domain', "[('fiscal_type', '=', 'service'), ('type', '=', 'output'), ('fiscal_operation_category_id', '=', fiscal_operation_category_id), ('use_invoice', '=', True)]")
fiscal_operation_id.set('required', '1')
result['arch'] = etree.tostring(eview)
if view_type == 'tree':
doc = etree.XML(result['arch'])
nodes = doc.xpath("//field[@name='partner_id']")
partner_string = _('Customer')
if context.get('type', 'out_invoice') in ('in_invoice', 'in_refund'):
partner_string = _('Supplier')
for node in nodes:
node.set('string', partner_string)
result['arch'] = etree.tostring(doc)
return result
def _get_invoice_line(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('account.invoice.line').browse(cr, uid, ids, context=context):
result[line.invoice_id.id] = True
return result.keys()
def _get_invoice_tax(self, cr, uid, ids, context=None):
result = {}
for tax in self.pool.get('account.invoice.tax').browse(cr, uid, ids, context=context):
result[tax.invoice_id.id] = True
return result.keys()
def _get_receivable_lines(self, cr, uid, ids, name, arg, context=None):
res = {}
for invoice in self.browse(cr, uid, ids, context=context):
id = invoice.id
res[id] = []
if not invoice.move_id:
continue
data_lines = [x for x in invoice.move_id.line_id if x.account_id.id == invoice.account_id.id and x.account_id.type in ('receivable', 'payable') and invoice.journal_id.revenue_expense]
New_ids = []
for line in data_lines:
New_ids.append(line.id)
New_ids.sort()
res[id] = New_ids
return res
_columns = {
'state': fields.selection([
('draft', 'Draft'),
('proforma', 'Pro-forma'),
('proforma2', 'Pro-forma'),
('open', 'Open'),
('sefaz_export', 'Enviar para Receita'),
('sefaz_exception', 'Erro de autorização da Receita'),
('paid', 'Paid'),
('cancel', 'Cancelled')
], 'State', select=True, readonly=True,
help=' * The \'Draft\' state is used when a user is encoding a new and unconfirmed Invoice. \
\n* The \'Pro-forma\' when invoice is in Pro-forma state, invoice does not have an invoice number. \
\n* The \'Open\' state is used when user create invoice, a invoice number is generated.Its in open state till user does not pay invoice. \
\n* The \'Paid\' state is set automatically when invoice is paid.\
\n* The \'sefaz_out\' Gerado aquivo de exportação para sistema daReceita.\
\n* The \'sefaz_aut\' Recebido arquivo de autolização da Receita.\
\n* The \'Cancelled\' state is used when user cancel invoice.'),
'partner_shipping_id': fields.many2one('res.partner.address', 'Endereço de Entrega', readonly=True, states={'draft': [('readonly', False)]}, help="Shipping address for current sales order."),
'own_invoice': fields.boolean('Nota Fiscal Própria', readonly=True, states={'draft': [('readonly', False)]}),
'internal_number': fields.char('Invoice Number', size=32, readonly=True, states={'draft': [('readonly', False)]}, help="Unique number of the invoice, computed automatically when the invoice is created."),
'vendor_serie': fields.char('Série NF Entrada', size=12, readonly=True, states={'draft': [('readonly', False)]}, help="Série do número da Nota Fiscal do Fornecedor"),
'nfe_access_key': fields.char('Chave de Acesso NFE', size=44, readonly=True, states={'draft': [('readonly', False)]}),
'nfe_status': fields.char('Status na Sefaz', size=44, readonly=True),
'nfe_date': fields.datetime('Data do Status NFE', readonly=True, states={'draft': [('readonly', False)]}),
'nfe_export_date': fields.datetime('Exportação NFE', readonly=True),
'fiscal_document_id': fields.many2one('l10n_br_account.fiscal.document', 'Documento', readonly=True, states={'draft': [('readonly', False)]}),
'fiscal_document_nfe': fields.related('fiscal_document_id', 'nfe', type='boolean', readonly=True, size=64, relation='l10n_br_account.fiscal.document', store=True, string='NFE'),
'fiscal_type': fields.selection([('product', 'Produto'), ('service', 'Serviço')], 'Tipo Fiscal', requeried=True),
'move_line_receivable_id': fields.function(_get_receivable_lines, method=True, type='many2many', relation='account.move.line', string='Entry Lines'),
'document_serie_id': fields.many2one('l10n_br_account.document.serie', 'Serie', domain="[('fiscal_document_id', '=', fiscal_document_id), ('company_id', '=', company_id)]", readonly=True, states={'draft': [('readonly', False)]}),
'fiscal_operation_category_id': fields.many2one('l10n_br_account.fiscal.operation.category', 'Categoria', readonly=True, states={'draft': [('readonly', False)]}),
'fiscal_operation_id': fields.many2one('l10n_br_account.fiscal.operation', 'Operação Fiscal', domain="[('fiscal_operation_category_id', '=', fiscal_operation_category_id)]", readonly=True, states={'draft': [('readonly', False)]}),
'cfop_id': fields.many2one('l10n_br_account.cfop', 'CFOP', readonly=True, states={'draft': [('readonly', False)]}),
'service_type_id': fields.many2one('l10n_br_account.service.type', 'Tipo de Serviço', readonly=True, states={'draft': [('readonly', False)]}),
'amount_untaxed': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Untaxed',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
'account.invoice.tax': (_get_invoice_tax, None, 20),
'account.invoice.line': (_get_invoice_line, ['price_unit', 'invoice_line_tax_id', 'quantity', 'discount'], 20),
},
multi='all'),
'amount_tax': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Tax',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
'account.invoice.tax': (_get_invoice_tax, None, 20),
'account.invoice.line': (_get_invoice_line, ['price_unit', 'invoice_line_tax_id', 'quantity', 'discount'], 20),
},
multi='all'),
'amount_total': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Total',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
'account.invoice.tax': (_get_invoice_tax, None, 20),
'account.invoice.line': (_get_invoice_line, ['price_unit', 'invoice_line_tax_id', 'quantity', 'discount'], 20),
},
multi='all'),
'icms_base': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Base ICMS',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
'account.invoice.tax': (_get_invoice_tax, None, 20),
'account.invoice.line': (_get_invoice_line, ['price_unit', 'invoice_line_tax_id', 'quantity', 'discount'], 20),
},
multi='all'),
'icms_value': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Valor ICMS',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
'account.invoice.tax': (_get_invoice_tax, None, 20),
'account.invoice.line': (_get_invoice_line, ['price_unit', 'invoice_line_tax_id', 'quantity', 'discount'], 20),
},
multi='all'),
'icms_st_base': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Base ICMS ST',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
'account.invoice.tax': (_get_invoice_tax, None, 20),
'account.invoice.line': (_get_invoice_line, ['price_unit', 'invoice_line_tax_id', 'quantity', 'discount'], 20),
},
multi='all'),
'icms_st_value': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Valor ICMS ST',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
'account.invoice.tax': (_get_invoice_tax, None, 20),
'account.invoice.line': (_get_invoice_line, ['price_unit', 'invoice_line_tax_id', 'quantity', 'discount'], 20),
},
multi='all'),
'ipi_base': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Base IPI',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
'account.invoice.tax': (_get_invoice_tax, None, 20),
'account.invoice.line': (_get_invoice_line, ['price_unit', 'invoice_line_tax_id', 'quantity', 'discount'], 20),
},
multi='all'),
'ipi_value': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Valor IPI',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
'account.invoice.tax': (_get_invoice_tax, None, 20),
'account.invoice.line': (_get_invoice_line, ['price_unit', 'invoice_line_tax_id', 'quantity', 'discount'], 20),
},
multi='all'),
'pis_base': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Base PIS',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
'account.invoice.tax': (_get_invoice_tax, None, 20),
'account.invoice.line': (_get_invoice_line, ['price_unit', 'invoice_line_tax_id', 'quantity', 'discount'], 20),
},
multi='all'),
'pis_value': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Valor PIS',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
'account.invoice.tax': (_get_invoice_tax, None, 20),
'account.invoice.line': (_get_invoice_line, ['price_unit', 'invoice_line_tax_id', 'quantity', 'discount'], 20),
},
multi='all'),
'cofins_base': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Base COFINS',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
'account.invoice.tax': (_get_invoice_tax, None, 20),
'account.invoice.line': (_get_invoice_line, ['price_unit', 'invoice_line_tax_id', 'quantity', 'discount'], 20),
},
multi='all'),
'cofins_value': fields.function(_amount_all, method=True, digits_compute=dp.get_precision('Account'), string='Valor COFINS',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
'account.invoice.tax': (_get_invoice_tax, None, 20),
'account.invoice.line': (_get_invoice_line, ['price_unit', 'invoice_line_tax_id', 'quantity', 'discount'], 20),
},
multi='all'),
}
_defaults = {
'own_invoice': True,
'fiscal_type': _get_fiscal_type,
}
# go from canceled state to draft state
def action_cancel_draft(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {'state': 'draft', 'internal_number': False, 'nfe_access_key': False,
'nfe_status': False, 'nfe_date': False, 'nfe_export_date': False})
wf_service = netsvc.LocalService("workflow")
for inv_id in ids:
wf_service.trg_delete(uid, 'account.invoice', inv_id, cr)
wf_service.trg_create(uid, 'account.invoice', inv_id, cr)
return True
def copy(self, cr, uid, id, default={}, context=None):
default.update({
'internal_number': False,
'nfe_access_key': False,
'nfe_status': False,
'nfe_date': False,
'nfe_export_date': False,
})
return super(account_invoice, self).copy(cr, uid, id, default, context)
def action_internal_number(self, cr, uid, ids, context=None):
if context is None:
context = {}
for obj_inv in self.browse(cr, uid, ids):
if obj_inv.own_invoice:
obj_sequence = self.pool.get('ir.sequence')
seq_no = obj_sequence.get_id(cr, uid, obj_inv.journal_id.internal_sequence.id, context=context)
self.write(cr, uid, obj_inv.id, {'internal_number': seq_no})
return True
def action_number(self, cr, uid, ids, context=None):
if context is None:
context = {}
#TODO: not correct fix but required a frech values before reading it.
#self.write(cr, uid, ids, {})
for obj_inv in self.browse(cr, uid, ids):
id = obj_inv.id
invtype = obj_inv.type
number = obj_inv.number
move_id = obj_inv.move_id and obj_inv.move_id.id or False
reference = obj_inv.internal_number or obj_inv.reference or ''
#self.write(cr, uid, ids, {'internal_number': number})
#if invtype in ('in_invoice', 'in_refund'):
# if not reference:
# ref = self._convert_ref(cr, uid, number)
# else:
# ref = reference
#else:
# ref = self._convert_ref(cr, uid, number)
ref = reference
cr.execute('UPDATE account_move SET ref=%s ' \
'WHERE id=%s AND (ref is null OR ref = \'\')',
(ref, move_id))
cr.execute('UPDATE account_move_line SET ref=%s ' \
'WHERE move_id=%s AND (ref is null OR ref = \'\')',
(ref, move_id))
cr.execute('UPDATE account_analytic_line SET ref=%s ' \
'FROM account_move_line ' \
'WHERE account_move_line.move_id = %s ' \
'AND account_analytic_line.move_id = account_move_line.id',
(ref, move_id))
for inv_id, name in self.name_get(cr, uid, [id]):
ctx = context.copy()
if obj_inv.type in ('out_invoice', 'out_refund'):
ctx = self.get_log_context(cr, uid, context=ctx)
message = _('Invoice ') + " '" + name + "' " + _("is validated.")
self.log(cr, uid, inv_id, message, context=ctx)
return True
def action_move_create(self, cr, uid, ids, *args):
result = super(account_invoice, self).action_move_create(cr, uid, ids, *args)
for inv in self.browse(cr, uid, ids):
if inv.move_id:
self.pool.get('account.move').write(cr, uid, [inv.move_id.id], {'ref': inv.internal_number})
for move_line in inv.move_id.line_id:
self.pool.get('account.move.line').write(cr, uid, [move_line.id], {'ref': inv.internal_number})
move_lines = [x for x in inv.move_id.line_id if x.account_id.id == inv.account_id.id and x.account_id.type in ('receivable', 'payable')]
i = len(move_lines)
for move_line in move_lines:
move_line_name = '%s/%s' % (inv.internal_number, i)
self.pool.get('account.move.line').write(cr, uid, [move_line.id], {'name': move_line_name})
i -= 1
return result
def finalize_invoice_move_lines(self, cr, uid, invoice_browse, move_lines):
"""finalize_invoice_move_lines(cr, uid, invoice, move_lines) -> move_lines
Hook method to be overridden in additional modules to verify and possibly alter the
move lines to be created by an invoice, for special cases.
* remove move lines without credit and debit, we have to see where l10n_br modules are creating these error entries;
* correct move entreis when we have tax_retain
:param invoice_browse: browsable record of the invoice that is generating the move lines
:param move_lines: list of dictionaries with the account.move.lines (as for create())
:return: the (possibly updated) final move_lines to create for this invoice
"""
total_taxes_credit = 0
move_lines_tmp = []
remove_itens = []
tax_retained_itens = []
mv_tmp_tuple = []
final_credit_ind = -1
initial_debit_ind = -1
for ind, move_line in enumerate(move_lines):
move_line_item = move_line[2]
if not move_line_item['credit'] and not move_line_item['debit']:
remove_itens.append(ind)
elif move_line_item['account_id'] == invoice_browse.account_id.id and not move_line_item['credit']:
move_line_item['debit'] = invoice_browse.amount_total
initial_debit_ind = ind
elif move_line_item['tax_amount'] < 0:
move_line_item['tax_amount'] = - move_line_item['tax_amount']
tax_retained_itens.append(ind)
elif move_line_item['credit'] > 0 and move_line_item['credit'] != invoice_browse.amount_untaxed:
total_taxes_credit += move_line_item['credit']
elif move_line_item['credit'] == invoice_browse.amount_untaxed:
final_credit_ind = ind
if final_credit_ind > -1:
move_lines[final_credit_ind][2]['credit'] = invoice_browse.amount_total - total_taxes_credit
for mv_ind in tax_retained_itens:
mv_tmp = move_lines[mv_ind][2].copy()
mv_tmp['credit'] = mv_tmp['debit']
mv_tmp['debit'] = False
mv_tmp_tuple = 0, 0, mv_tmp
move_lines_tmp.append(mv_tmp_tuple)
while remove_itens:
move_lines.pop(remove_itens.pop())
while move_lines_tmp:
move_lines.append(move_lines_tmp.pop())
return move_lines
def nfe_dv(self, key):
#Testing
return '2'
def nfe_check(self, cr, uid, ids, context=None):
strErro = ''
strErro = u''
if context is None:
context = {}
for inv in self.browse(cr, uid, ids):
#Nota fiscal
if not inv.own_invoice or inv.fiscal_type == 'service':
continue
company_addr = self.pool.get('res.partner').address_get(cr, uid, [inv.company_id.partner_id.id], ['default'])
company_addr_default = self.pool.get('res.partner.address').browse(cr, uid, [company_addr['default']], context={'lang': 'pt_BR'})[0]
if not inv.document_serie_id:
strErro = u'Nota Fiscal - Série da nota fiscal\n'
if not inv.fiscal_document_id:
strErro = u'Nota Fiscal - Tipo de documento fiscal\n'
#if not inv.date_invoice:
# strErro = 'Nota Fiscal - Data da nota fiscal\n'
if not inv.journal_id.internal_sequence:
strErro = u'Nota Fiscal - Número da nota fiscal, o diário deve ter uma sequencia interna\n'
if not inv.cfop_id:
strErro = u'Nota Fiscal - CFOP\n'
else:
if not inv.cfop_id.small_name:
strErro = u'Nota Fiscal - Descrição reduzida do CFOP\n'
#Emitente
if not inv.company_id.partner_id.legal_name:
strErro = u'Emitente - Razão Social\n'
if not inv.company_id.partner_id.name:
strErro = u'Emitente - Fantasia\n'
if not inv.company_id.partner_id.cnpj_cpf:
strErro = u'Emitente - CNPJ/CPF\n'
if not company_addr_default.street:
strErro = u'Emitente / Endereço - Logradouro\n'
if not company_addr_default.number:
strErro = u'Emitente / Endereço - Número\n'
if not company_addr_default.zip:
strErro = u'Emitente / Endereço - CEP\n'
if not inv.company_id.cnae_main_id:
strErro = u'Emitente / CNAE Principal\n'
if not inv.company_id.partner_id.inscr_est:
strErro = u'Emitente / Inscrição Estadual\n'
if not company_addr_default.state_id:
strErro = u'Emitente / Endereço - Estado\n'
else:
if not company_addr_default.state_id.ibge_code:
strErro = u'Emitente / Endereço - Código do IBGE do estado\n'
if not company_addr_default.state_id.name:
strErro = u'Emitente / Endereço - Nome do estado\n'
if not company_addr_default.l10n_br_city_id:
strErro = u'Emitente / Endereço - município\n'
else:
if not company_addr_default.l10n_br_city_id.name:
strErro = u'Emitente / Endereço - Nome do município\n'
if not company_addr_default.l10n_br_city_id.ibge_code:
strErro = u'Emitente / Endereço - Código do IBGE do município\n'
if not company_addr_default.country_id:
strErro = u'Emitente / Endereço - país\n'
else:
if not company_addr_default.country_id.name:
strErro = u'Emitente / Endereço - Nome do país\n'
if not company_addr_default.country_id.bc_code:
strErro = u'Emitente / Endereço - Código do BC do país\n'
if not company_addr_default.country_id:
strErro = u'Emitente / Regime Tributário\n'
#Destinatário
if not inv.partner_id.legal_name:
strErro = u'Destinatário - Razão Social\n'
if not inv.partner_id.cnpj_cpf:
strErro = u'Destinatário - CNPJ/CPF\n'
if not inv.address_invoice_id.street:
strErro = u'Destinatário / Endereço - Logradouro\n'
if not inv.address_invoice_id.number:
strErro = u'Destinatário / Endereço - Número\n'
if not inv.address_invoice_id.zip:
strErro = u'Destinatário / Endereço - CEP\n'
if not inv.address_invoice_id.state_id:
strErro = u'Destinatário / Endereço - Estado\n'
else:
if not inv.address_invoice_id.state_id.ibge_code:
strErro = u'Destinatário / Endereço - Código do IBGE do estado\n'
if not inv.address_invoice_id.state_id.name:
strErro = u'Destinatário / Endereço - Nome do estado\n'
if not inv.address_invoice_id.l10n_br_city_id:
strErro = u'Destinatário / Endereço - Município\n'
else:
if not inv.address_invoice_id.l10n_br_city_id.name:
strErro = u'Destinatário / Endereço - Nome do município\n'
if not inv.address_invoice_id.l10n_br_city_id.ibge_code:
strErro = u'Destinatário / Endereço - Código do IBGE do município\n'
if not inv.address_invoice_id.country_id:
strErro = u'Destinatário / Endereço - País\n'
else:
if not inv.address_invoice_id.country_id.name:
strErro = u'Destinatário / Endereço - Nome do país\n'
if not inv.address_invoice_id.country_id.bc_code:
strErro = u'Destinatário / Endereço - Código do BC do país\n'
#endereco de entrega
if inv.partner_shipping_id:
if inv.address_invoice_id != inv.partner_shipping_id:
if not inv.partner_shipping_id.street:
strErro = u'Destinatário / Endereço de Entrega - Logradouro\n'
if not inv.partner_shipping_id.number:
strErro = u'Destinatário / Endereço de Entrega - Número\n'
if not inv.address_invoice_id.zip:
strErro = u'Destinatário / Endereço de Entrega - CEP\n'
if not inv.partner_shipping_id.state_id:
strErro = u'Destinatário / Endereço de Entrega - Estado\n'
else:
if not inv.partner_shipping_id.state_id.ibge_code:
strErro = u'Destinatário / Endereço de Entrega - Código do IBGE do estado\n'
if not inv.partner_shipping_id.state_id.name:
strErro = u'Destinatário / Endereço de Entrega - Nome do estado\n'
if not inv.partner_shipping_id.l10n_br_city_id:
strErro = u'Destinatário / Endereço - Município\n'
else:
if not inv.partner_shipping_id.l10n_br_city_id.name:
strErro = u'Destinatário / Endereço de Entrega - Nome do município\n'
if not inv.partner_shipping_id.l10n_br_city_id.ibge_code:
strErro = u'Destinatário / Endereço de Entrega - Código do IBGE do município\n'
if not inv.partner_shipping_id.country_id:
strErro = u'Destinatário / Endereço de Entrega - País\n'
else:
if not inv.partner_shipping_id.country_id.name:
strErro = u'Destinatário / Endereço de Entrega - Nome do país\n'
if not inv.partner_shipping_id.country_id.bc_code:
strErro = u'Destinatário / Endereço de Entrega - Código do BC do país\n'
#produtos
for inv_line in inv.invoice_line:
if inv_line.product_id:
if not inv_line.product_id.code:
strErro = u'Produtos e Serviços: %s, Qtde: %s - Código do produto\n' % (inv_line.product_id.name, inv_line.quantity)
if not inv_line.product_id.name:
strErro = u'Produtos e Serviços: %s, Qtde: %s - Nome do produto\n' % (inv_line.product_id.name, inv_line.quantity)
if not inv_line.cfop_id:
strErro = u'Produtos e Serviços: %s, Qtde: %s - CFOP\n' % (inv_line.product_id.name, inv_line.quantity)
else:
if not inv_line.cfop_id.code:
strErro = u'Produtos e Serviços: %s, Qtde: %s - Código do CFOP\n' % (inv_line.product_id.name, inv_line.quantity)
if not inv_line.uos_id:
strErro = u'Produtos e Serviços: %s, Qtde: %s - Unidade de medida\n' % (inv_line.product_id.name, inv_line.quantity)
if not inv_line.quantity:
strErro = u'Produtos e Serviços: %s, Qtde: %s - Quantidade\n' % (inv_line.product_id.name, inv_line.quantity)
if not inv_line.price_unit:
strErro = u'Produtos e Serviços: %s, Qtde: %s - Preço unitário\n' % (inv_line.product_id.name, inv_line.quantity)
if not inv_line.icms_cst:
strErro = u'Produtos e Serviços: %s, Qtde: %s - CST do ICMS\n' % (inv_line.product_id.name, inv_line.quantity)
if not inv_line.ipi_cst:
strErro = u'Produtos e Serviços: %s, Qtde: %s - CST do IPI\n' % (inv_line.product_id.name, inv_line.quantity)
if not inv_line.pis_cst:
strErro = u'Produtos e Serviços: %s, Qtde: %s - CST do PIS\n' % (inv_line.product_id.name, inv_line.quantity)
if not inv_line.cofins_cst:
strErro = u'Produtos e Serviços: %s, Qtde: %s - CST do COFINS\n' % (inv_line.product_id.name, inv_line.quantity)
if strErro:
raise osv.except_osv(_('Error !'), _("Error Validating NFE:\n '%s'") % (strErro, ))
return True
def nfe_export_txt(self, cr, uid, ids, context=False):
StrFile = ''
StrNF = 'NOTA FISCAL|%s|\n' % len(ids)
StrFile = StrNF
for inv in self.browse(cr, uid, ids, context={'lang': 'pt_BR'}):
#Endereço do company
company_addr = self.pool.get('res.partner').address_get(cr, uid, [inv.company_id.partner_id.id], ['default'])
company_addr_default = self.pool.get('res.partner.address').browse(cr, uid, [company_addr['default']], context={'lang': 'pt_BR'})[0]
#nfe_key = unicode(company_addr_default.state_id.ibge_code).strip().rjust(2, u'0')
#nfe_key += unicode(datetime.strptime(inv.date_invoice, '%Y-%m-%d').strftime(u'%y%m')).strip().rjust(4, u'0')
#nfe_key += re.sub('[%s]' % re.escape(string.punctuation), '', inv.company_id.partner_id.cnpj_cpf or '')
#nfe_key += inv.fiscal_document_id.code
#nfe_key += unicode(inv.document_serie_id.code).strip().rjust(3, u'0')
#nfe_key += unicode(inv.internal_number).strip().rjust(9, u'0')
#fe_key += unicode('1').strip().rjust(1, u'0') # Homologação
#nfe_key += unicode(inv.internal_number).strip().rjust(8, u'0')
#nfe_key += unicode(self.nfe_dv(nfe_key)).strip().rjust(1, u'0')
StrA = 'A|%s|%s|\n' % ('2.00', '')
StrFile += StrA
StrRegB = {
'cUF': company_addr_default.state_id.ibge_code,
'cNF': '',
'NatOp': normalize('NFKD', unicode(inv.cfop_id.small_name or '')).encode('ASCII', 'ignore'),
'intPag': '2',
'mod': inv.fiscal_document_id.code,
'serie': inv.document_serie_id.code,
'nNF': inv.internal_number or '',
'dEmi': inv.date_invoice or '',
'dSaiEnt': inv.date_invoice or '',
'hSaiEnt': '',
'tpNF': '',
'cMunFG': ('%s%s') % (company_addr_default.state_id.ibge_code, company_addr_default.l10n_br_city_id.ibge_code),
'TpImp': '1',
'TpEmis': '1',
'cDV': '',
'tpAmb': '2',
'finNFe': '1',
'procEmi': '0',
'VerProc': '2.1.4',
'dhCont': '',
'xJust': '',
}
if inv.cfop_id.type in ("input"):
StrRegB['tpNF'] = '0'
else:
StrRegB['tpNF'] = '1'
StrB = 'B|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegB['cUF'], StrRegB['cNF'], StrRegB['NatOp'], StrRegB['intPag'],
StrRegB['mod'], StrRegB['serie'], StrRegB['nNF'], StrRegB['dEmi'], StrRegB['dSaiEnt'],
StrRegB['hSaiEnt'], StrRegB['tpNF'], StrRegB['cMunFG'], StrRegB['TpImp'], StrRegB['TpEmis'],
StrRegB['cDV'], StrRegB['tpAmb'], StrRegB['finNFe'], StrRegB['procEmi'], StrRegB['VerProc'],
StrRegB['dhCont'], StrRegB['xJust'])
StrFile += StrB
StrRegC = {
'XNome': normalize('NFKD', unicode(inv.company_id.partner_id.legal_name or '')).encode('ASCII', 'ignore'),
'XFant': normalize('NFKD', unicode(inv.company_id.partner_id.name or '')).encode('ASCII', 'ignore'),
'IE': re.sub('[%s]' % re.escape(string.punctuation), '', inv.company_id.partner_id.inscr_est or ''),
'IEST': '',
'IM': re.sub('[%s]' % re.escape(string.punctuation), '', inv.company_id.partner_id.inscr_mun or ''),
'CNAE': re.sub('[%s]' % re.escape(string.punctuation), '', inv.company_id.cnae_main_id.code or ''),
'CRT': inv.company_id.fiscal_type or '',
}
#TODO - Verificar, pois quando e informado do CNAE ele exige que a inscricao municipal, parece um bug do emissor da NFE
if not inv.company_id.partner_id.inscr_mun:
StrRegC['CNAE'] = ''
StrC = 'C|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegC['XNome'], StrRegC['XFant'], StrRegC['IE'], StrRegC['IEST'],
StrRegC['IM'], StrRegC['CNAE'], StrRegC['CRT'])
StrFile += StrC
if inv.company_id.partner_id.tipo_pessoa == 'J':
StrC02 = 'C02|%s|\n' % (re.sub('[%s]' % re.escape(string.punctuation), '', inv.company_id.partner_id.cnpj_cpf or ''))
else:
StrC02 = 'C02a|%s|\n' % (re.sub('[%s]' % re.escape(string.punctuation), '', inv.company_id.partner_id.cnpj_cpf or ''))
StrFile += StrC02
StrRegC05 = {
'XLgr': normalize('NFKD', unicode(company_addr_default.street or '')).encode('ASCII', 'ignore'),
'Nro': company_addr_default.number or '',
'Cpl': normalize('NFKD', unicode(company_addr_default.street2 or '')).encode('ASCII', 'ignore'),
'Bairro': normalize('NFKD', unicode(company_addr_default.district or 'Sem Bairro')).encode('ASCII', 'ignore'),
'CMun': '%s%s' % (company_addr_default.state_id.ibge_code, company_addr_default.l10n_br_city_id.ibge_code),
'XMun': normalize('NFKD', unicode(company_addr_default.l10n_br_city_id.name or '')).encode('ASCII', 'ignore'),
'UF': company_addr_default.state_id.code or '',
'CEP': re.sub('[%s]' % re.escape(string.punctuation), '', str(company_addr_default.zip or '').replace(' ', '')),
'cPais': company_addr_default.country_id.bc_code or '',
'xPais': normalize('NFKD', unicode(company_addr_default.country_id.name or '')).encode('ASCII', 'ignore'),
'fone': re.sub('[%s]' % re.escape(string.punctuation), '', str(company_addr_default.phone or '').replace(' ', '')),
}
StrC05 = 'C05|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegC05['XLgr'], StrRegC05['Nro'], StrRegC05['Cpl'], StrRegC05['Bairro'],
StrRegC05['CMun'], StrRegC05['XMun'], StrRegC05['UF'], StrRegC05['CEP'],
StrRegC05['cPais'], StrRegC05['xPais'], StrRegC05['fone'])
StrFile += StrC05
if inv.partner_id.tipo_pessoa == 'J':
_xNome = normalize('NFKD', unicode(inv.partner_id.legal_name)).encode('ASCII', 'ignore')
else:
_xNome = normalize('NFKD', unicode(inv.partner_id.name)).encode('ASCII', 'ignore')
StrRegE = {
'xNome': _xNome,
'IE': re.sub('[%s]' % re.escape(string.punctuation), '', inv.partner_id.inscr_est or ''),
'ISUF': '',
'email': inv.partner_id.email or '',
}
StrE = 'E|%s|%s|%s|%s|\n' % (StrRegE['xNome'], StrRegE['IE'], StrRegE['ISUF'], StrRegE['email'])
StrFile += StrE
if inv.partner_id.tipo_pessoa == 'J':
StrE0 = 'E02|%s|\n' % (re.sub('[%s]' % re.escape(string.punctuation), '', inv.partner_id.cnpj_cpf or ''))
else:
StrE0 = 'E03|%s|\n' % (re.sub('[%s]' % re.escape(string.punctuation), '', inv.partner_id.cnpj_cpf or ''))
StrFile += StrE0
StrRegE05 = {
'xLgr': normalize('NFKD', unicode(inv.address_invoice_id.street or '')).encode('ASCII', 'ignore'),
'nro': normalize('NFKD', unicode(inv.address_invoice_id.number or '')).encode('ASCII', 'ignore'),
'xCpl': re.sub('[%s]' % re.escape(string.punctuation), '', normalize('NFKD', unicode(inv.address_invoice_id.street2 or '')).encode('ASCII', 'ignore')),
'xBairro': normalize('NFKD', unicode(inv.address_invoice_id.district or 'Sem Bairro')).encode('ASCII', 'ignore'),
'cMun': ('%s%s') % (inv.address_invoice_id.state_id.ibge_code, inv.address_invoice_id.l10n_br_city_id.ibge_code),
'xMun': normalize('NFKD', unicode(inv.address_invoice_id.l10n_br_city_id.name or '')).encode('ASCII', 'ignore'),
'UF': inv.address_invoice_id.state_id.code,
'CEP': re.sub('[%s]' % re.escape(string.punctuation), '', str(inv.address_invoice_id.zip or '').replace(' ', '')),
'cPais': inv.address_invoice_id.country_id.bc_code,
'xPais': normalize('NFKD', unicode(inv.address_invoice_id.country_id.name or '')).encode('ASCII', 'ignore'),
'fone': re.sub('[%s]' % re.escape(string.punctuation), '', str(inv.address_invoice_id.phone or '').replace(' ', '')),
}
StrE05 = 'E05|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegE05['xLgr'], StrRegE05['nro'], StrRegE05['xCpl'], StrRegE05['xBairro'],
StrRegE05['cMun'], StrRegE05['xMun'], StrRegE05['UF'], StrRegE05['CEP'],
StrRegE05['cPais'], StrRegE05['xPais'], StrRegE05['fone'], )
StrFile += StrE05
if inv.partner_shipping_id:
if inv.address_invoice_id != inv.partner_shipping_id:
StrRegG = {
'XLgr': normalize('NFKD', unicode(inv.partner_shipping_id.street or '', )).encode('ASCII', 'ignore'),
'Nro': normalize('NFKD', unicode(inv.partner_shipping_id.number or '')).encode('ASCII', 'ignore'),
'XCpl': re.sub('[%s]' % re.escape(string.punctuation), '', normalize('NFKD', unicode(inv.partner_shipping_id.street2 or '')).encode('ASCII', 'ignore')),
'XBairro': re.sub('[%s]' % re.escape(string.punctuation), '', normalize('NFKD', unicode(inv.partner_shipping_id.district or 'Sem Bairro')).encode('ASCII', 'ignore')),
'CMun': ('%s%s') % (inv.partner_shipping_id.state_id.ibge_code, inv.partner_shipping_id.l10n_br_city_id.ibge_code),
'XMun': normalize('NFKD', unicode(inv.partner_shipping_id.l10n_br_city_id.name or '')).encode('ASCII', 'ignore'),
'UF': inv.address_invoice_id.state_id.code,
}
StrG = 'G|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegG['XLgr'], StrRegG['Nro'], StrRegG['XCpl'], StrRegG['XBairro'], StrRegG['CMun'], StrRegG['XMun'], StrRegG['UF'])
StrFile += StrG
if inv.partner_id.tipo_pessoa == 'J':
StrG0 = 'G02|%s|\n' % (re.sub('[%s]' % re.escape(string.punctuation), '', inv.partner_id.cnpj_cpf or ''))
else:
StrG0 = 'G02a|%s|\n' % (re.sub('[%s]' % re.escape(string.punctuation), '', inv.partner_id.cnpj_cpf or ''))
StrFile += StrG0
i = 0
for inv_line in inv.invoice_line:
i += 1
StrH = 'H|%s||\n' % (i)
StrFile += StrH
StrRegI = {
'CProd': normalize('NFKD', unicode(inv_line.product_id.code or '', )).encode('ASCII', 'ignore'),
'CEAN': inv_line.product_id.ean13 or '',
'XProd': normalize('NFKD', unicode(inv_line.product_id.name or '')).encode('ASCII', 'ignore'),
'NCM': re.sub('[%s]' % re.escape(string.punctuation), '', inv_line.product_id.property_fiscal_classification.name or ''),
'EXTIPI': '',
'CFOP': inv_line.cfop_id.code,
'UCom': normalize('NFKD', unicode(inv_line.uos_id.name or '', )).encode('ASCII', 'ignore'),
'QCom': str("%.4f" % inv_line.quantity),
'VUnCom': str("%.2f" % (inv_line.price_unit * (1 - (inv_line.discount or 0.0) / 100.0))),
'VProd': str("%.2f" % inv_line.price_total),
'CEANTrib': '',
'UTrib': inv_line.uos_id.name,
'QTrib': str("%.4f" % inv_line.quantity),
'VUnTrib': str("%.2f" % inv_line.price_unit),
'VFrete': '',
'VSeg': '',
'VDesc': '',
'vOutro': '',
'indTot': '1',
'xPed': '',
'nItemPed': '',
}
if inv_line.product_id.code:
StrRegI['CProd'] = inv_line.product_id.code
else:
StrRegI['CProd'] = unicode(i).strip().rjust(4, u'0')
#No OpenERP já traz o valor unitário como desconto
#if inv_line.discount > 0:
# StrRegI['VDesc'] = str("%.2f" % (inv_line.quantity * (inv_line.price_unit * (1-(inv_line.discount or 0.0)/100.0))))
StrI = 'I|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegI['CProd'], StrRegI['CEAN'], StrRegI['XProd'], StrRegI['NCM'],
StrRegI['EXTIPI'], StrRegI['CFOP'], StrRegI['UCom'], StrRegI['QCom'],
StrRegI['VUnCom'], StrRegI['VProd'], StrRegI['CEANTrib'], StrRegI['UTrib'],
StrRegI['QTrib'], StrRegI['VUnTrib'], StrRegI['VFrete'], StrRegI['VSeg'],
StrRegI['VDesc'], StrRegI['vOutro'], StrRegI['indTot'], StrRegI['xPed'],
StrRegI['nItemPed'])
StrFile += StrI
StrM = 'M|\n'
StrFile += StrM
StrN = 'N|\n'
StrFile += StrN
#TODO - Fazer alteração para cada tipo de cst
if inv_line.icms_cst in ('00'):
StrRegN02 = {
'Orig': inv_line.product_id.origin or '0',
'CST': inv_line.icms_cst,
'ModBC': '0',
'VBC': str("%.2f" % inv_line.icms_base),
'PICMS': str("%.2f" % inv_line.icms_percent),
'VICMS': str("%.2f" % inv_line.icms_value),
}
StrN02 = 'N02|%s|%s|%s|%s|%s|%s|\n' % (StrRegN02['Orig'], StrRegN02['CST'], StrRegN02['ModBC'], StrRegN02['VBC'], StrRegN02['PICMS'],
StrRegN02['VICMS'])
StrFile += StrN02
if inv_line.icms_cst in ('20'):
StrRegN04 = {
'Orig': inv_line.product_id.origin or '0',
'CST': inv_line.icms_cst,
'ModBC': '0',
'PRedBC': str("%.2f" % inv_line.icms_percent_reduction),
'VBC': str("%.2f" % inv_line.icms_base),
'PICMS': str("%.2f" % inv_line.icms_percent),
'VICMS': str("%.2f" % inv_line.icms_value),
}
StrN04 = 'N04|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegN04['Orig'], StrRegN04['CST'], StrRegN04['ModBC'], StrRegN04['PRedBC'], StrRegN04['VBC'], StrRegN04['PICMS'],
StrRegN04['VICMS'])
StrFile += StrN04
if inv_line.icms_cst in ('10'):
StrRegN03 = {
'Orig': inv_line.product_id.origin or '0',
'CST': inv_line.icms_cst,
'ModBC': '0',
'VBC': str("%.2f" % inv_line.icms_base),
'PICMS': str("%.2f" % inv_line.icms_percent),
'VICMS': str("%.2f" % inv_line.icms_value),
'ModBCST': '4', # TODO
'PMVAST': str("%.2f" % inv_line.icms_st_mva) or '',
'PRedBCST': '',
'VBCST': str("%.2f" % inv_line.icms_st_base),
'PICMSST': str("%.2f" % inv_line.icms_st_percent),
'VICMSST': str("%.2f" % inv_line.icms_st_value),
}
StrN03 = 'N03|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegN03['Orig'], StrRegN03['CST'], StrRegN03['ModBC'], StrRegN03['VBC'], StrRegN03['PICMS'],
StrRegN03['VICMS'], StrRegN03['ModBCST'], StrRegN03['PMVAST'], StrRegN03['PRedBCST'], StrRegN03['VBCST'],
StrRegN03['PICMSST'], StrRegN03['VICMSST'])
StrFile += StrN03
if inv_line.icms_cst in ('40', '41', '50', '51'):
StrRegN06 = {
'Orig': inv_line.product_id.origin or '0',
'CST': inv_line.icms_cst,
'vICMS': str("%.2f" % inv_line.icms_value),
'motDesICMS': '9', # FIXME
}
StrN06 = 'N06|%s|%s|%s|%s|\n' % (StrRegN06['Orig'], StrRegN06['CST'], StrRegN06['vICMS'], StrRegN06['motDesICMS'])
StrFile += StrN06
if inv_line.icms_cst in ('60'):
StrRegN08 = {
'Orig': inv_line.product_id.origin or '0',
'CST': inv_line.icms_cst,
'VBCST': str("%.2f" % 0.00),
'VICMSST': str("%.2f" % 0.00),
}
StrN08 = 'N08|%s|%s|%s|%s|\n' % (StrRegN08['Orig'], StrRegN08['CST'], StrRegN08['VBCST'], StrRegN08['VICMSST'])
StrFile += StrN08
if inv_line.icms_cst in ('70'):
StrRegN09 = {
'Orig': inv_line.product_id.origin or '0',
'CST': inv_line.icms_cst,
'ModBC': '0',
'PRedBC': str("%.2f" % inv_line.icms_percent_reduction),
'VBC': str("%.2f" % inv_line.icms_base),
'PICMS': str("%.2f" % inv_line.icms_percent),
'VICMS': str("%.2f" % inv_line.icms_value),
'ModBCST': '4', # TODO
'PMVAST': str("%.2f" % inv_line.icms_st_mva) or '',
'PRedBCST': '',
'VBCST': str("%.2f" % inv_line.icms_st_base),
'PICMSST': str("%.2f" % inv_line.icms_st_percent),
'VICMSST': str("%.2f" % inv_line.icms_st_value),
}
StrN09 = 'N09|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegN09['Orig'], StrRegN09['CST'], StrRegN09['ModBC'], StrRegN09['PRedBC'], StrRegN09['VBC'], StrRegN09['PICMS'], StrRegN09['VICMS'], StrRegN09['ModBCST'], StrRegN09['PMVAST'], StrRegN09['PRedBCST'], StrRegN09['VBCST'], StrRegN09['PICMSST'], StrRegN09['VICMSST'])
StrFile += StrN09
if inv_line.icms_cst == '102':
StrRegN10d = {
'Orig': inv_line.product_id.origin or '0',
'CST': inv_line.icms_cst,
}
StrFile += 'N10d|%(Orig)s|%(CST)s|\n' % StrRegN10d
StrRegO = {
'ClEnq': '',
'CNPJProd': '',
'CSelo': '',
'QSelo': '',
'CEnq': '999',
}
StrO = 'O|%s|%s|%s|%s|%s|\n' % (StrRegO['ClEnq'], StrRegO['CNPJProd'], StrRegO['CSelo'], StrRegO['QSelo'], StrRegO['CEnq'])
StrFile += StrO
if inv_line.ipi_percent > 0:
StrRegO07 = {
'CST': inv_line.ipi_cst,
'VIPI': str("%.2f" % inv_line.ipi_value),
}
StrO07 = 'O07|%s|%s|\n' % (StrRegO07['CST'], StrRegO07['VIPI'])
StrFile += StrO07
if inv_line.ipi_type == 'percent':
StrRegO10 = {
'VBC': str("%.2f" % inv_line.ipi_base),
'PIPI': str("%.2f" % inv_line.ipi_percent),
}
StrO1 = 'O10|%s|%s|\n' % (StrRegO10['VBC'], StrRegO10['PIPI'])
if inv_line.ipi_type == 'quantity':
pesol = 0
if inv_line.product_id:
pesol = inv_line.product_id.weight_net
StrRegO11 = {
'QUnid': str("%.4f" % (inv_line.quantity * pesol)),
'VUnid': str("%.4f" % inv_line.ipi_percent),
}
StrO1 = 'O11|%s|%s|\n' % (StrRegO11['QUnid'], StrRegO11['VUnid'])
StrFile += StrO1
else:
StrO1 = 'O08|%s|\n' % inv_line.ipi_cst
StrFile += StrO1
StrFile += 'Q|\n'
if inv_line.pis_cst == '99':
StrFile += 'Q05|99|0.00|\nQ10|0.0000|0.0000|\n'
else:
if inv_line.pis_percent > 0:
StrRegQ02 = {
'CST': inv_line.pis_cst,
'VBC': str("%.2f" % inv_line.pis_base),
'PPIS': str("%.2f" % inv_line.pis_percent),
'VPIS': str("%.2f" % inv_line.pis_value),
}
StrQ02 = ('Q02|%s|%s|%s|%s|\n') % (StrRegQ02['CST'], StrRegQ02['VBC'], StrRegQ02['PPIS'], StrRegQ02['VPIS'])
else:
StrQ02 = 'Q04|%s|\n' % inv_line.pis_cst
StrFile += StrQ02
StrFile += 'S|\n'
if inv_line.cofins_cst == '99':
StrFile += 'S05|99|0.00|\nS09|0.0000|0.0000|\n'
else:
if inv_line.cofins_percent > 0:
StrRegS02 = {
'CST': inv_line.cofins_cst,
'VBC': str("%.2f" % inv_line.cofins_base),
'PCOFINS': str("%.2f" % inv_line.cofins_percent),
'VCOFINS': str("%.2f" % inv_line.cofins_value),
}
StrS02 = ('S02|%s|%s|%s|%s|\n') % (StrRegS02['CST'], StrRegS02['VBC'], StrRegS02['PCOFINS'], StrRegS02['VCOFINS'])
else:
StrS02 = 'S04|%s|\n' % inv_line.cofins_cst
StrFile += StrS02
StrW = 'W|\n'
StrFile += StrW
StrRegW02 = {
'vBC': str("%.2f" % inv.icms_base),
'vICMS': str("%.2f" % inv.icms_value),
'vBCST': str("%.2f" % inv.icms_st_base),
'vST': str("%.2f" % inv.icms_st_value),
'vProd': str("%.2f" % inv.amount_untaxed),
'vFrete': str("%.2f" % inv.amount_freight),
'vSeg': str("%.2f" % inv.amount_insurance),
'vDesc': '0.00',
'vII': '0.00',
'vIPI': str("%.2f" % inv.ipi_value),
'vPIS': str("%.2f" % inv.pis_value),
'vCOFINS': str("%.2f" % inv.cofins_value),
'vOutro': str("%.2f" % inv.amount_costs),
'vNF': str("%.2f" % inv.amount_total),
}
StrW02 = 'W02|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\n' % (StrRegW02['vBC'], StrRegW02['vICMS'], StrRegW02['vBCST'], StrRegW02['vST'], StrRegW02['vProd'],
StrRegW02['vFrete'], StrRegW02['vSeg'], StrRegW02['vDesc'], StrRegW02['vII'], StrRegW02['vIPI'],
StrRegW02['vPIS'], StrRegW02['vCOFINS'], StrRegW02['vOutro'], StrRegW02['vNF'])
StrFile += StrW02
# Modo do Frete: 0- Por conta do emitente; 1- Por conta do destinatário/remetente; 2- Por conta de terceiros; 9- Sem frete (v2.0)
StrRegX0 = '0'
if inv.incoterm.code == 'FOB':
StrRegX0 = '0'
if inv.incoterm.code == 'CIF':
StrRegX0 = '1'
StrX = 'X|%s|\n' % (StrRegX0)
StrFile += StrX
StrRegX03 = {
'XNome': '',
'IE': '',
'XEnder': '',
'UF': '',
'XMun': '',
}
StrX0 = ''
if inv.carrier_id:
#Endereço da transportadora
carrier_addr = self.pool.get('res.partner').address_get(cr, uid, [inv.carrier_id.partner_id.id], ['default'])
carrier_addr_default = self.pool.get('res.partner.address').browse(cr, uid, [carrier_addr['default']])[0]
if inv.carrier_id.partner_id.legal_name:
StrRegX03['XNome'] = normalize('NFKD', unicode(inv.carrier_id.partner_id.legal_name or '')).encode('ASCII', 'ignore')
else:
StrRegX03['XNome'] = normalize('NFKD', unicode(inv.carrier_id.partner_id.name or '')).encode('ASCII', 'ignore')
StrRegX03['IE'] = inv.carrier_id.partner_id.inscr_est or ''
StrRegX03['xEnder'] = normalize('NFKD', unicode(carrier_addr_default.street or '')).encode('ASCII', 'ignore')
StrRegX03['UF'] = carrier_addr_default.state_id.code or ''
if carrier_addr_default.l10n_br_city_id:
StrRegX03['xMun'] = normalize('NFKD', unicode(carrier_addr_default.l10n_br_city_id.name or '')).encode('ASCII', 'ignore')
if inv.carrier_id.partner_id.tipo_pessoa == 'J':
StrX0 = 'X04|%s|\n' % (re.sub('[%s]' % re.escape(string.punctuation), '', inv.carrier_id.partner_id.cnpj_cpf or ''))
else:
StrX0 = 'X05|%s|\n' % (re.sub('[%s]' % re.escape(string.punctuation), '', inv.carrier_id.partner_id.cnpj_cpf or ''))
StrX03 = 'X03|%s|%s|%s|%s|%s|\n' % (StrRegX03['XNome'], StrRegX03['IE'], StrRegX03['XEnder'], StrRegX03['UF'], StrRegX03['XMun'])
StrFile += StrX03
StrFile += StrX0
StrRegX18 = {
'Placa': '',
'UF': '',
'RNTC': '',
}
if inv.vehicle_id:
StrRegX18['Placa'] = inv.vehicle_id.plate or ''
StrRegX18['UF'] = inv.vehicle_id.plate.state_id.code or ''
StrRegX18['RNTC'] = inv.vehicle_id.rntc_code or ''
StrX18 = 'X18|%s|%s|%s|\n' % (StrRegX18['Placa'], StrRegX18['UF'], StrRegX18['RNTC'])
StrFile += StrX18
StrRegX26 = {
'QVol': '',
'Esp': '',
'Marca': '',
'NVol': '',
'PesoL': '',
'PesoB': '',
}
if inv.number_of_packages:
StrRegX26['QVol'] = inv.number_of_packages
StrRegX26['Esp'] = 'Volume' # TODO
StrRegX26['Marca']
StrRegX26['NVol']
StrRegX26['PesoL'] = str("%.3f" % inv.weight_net)
StrRegX26['PesoB'] = str("%.3f" % inv.weight)
StrX26 = 'X26|%s|%s|%s|%s|%s|%s|\n' % (StrRegX26['QVol'], StrRegX26['Esp'], StrRegX26['Marca'], StrRegX26['NVol'], StrRegX26['PesoL'], StrRegX26['PesoB'])
StrFile += StrX26
if inv.journal_id.revenue_expense:
StrY = 'Y|\n'
StrFile += StrY
for line in inv.move_line_receivable_id:
StrRegY07 = {
'NDup': line.name,
'DVenc': line.date_maturity or inv.date_due or inv.date_invoice,
'VDup': str("%.2f" % line.debit),
}
StrY07 = 'Y07|%s|%s|%s|\n' % (StrRegY07['NDup'], StrRegY07['DVenc'], StrRegY07['VDup'])
StrFile += StrY07
StrRegZ = {
'InfAdFisco': '',
'InfCpl': normalize('NFKD', unicode(inv.comment or '')).encode('ASCII', 'ignore'),
}
StrZ = 'Z|%s|%s|\n' % (StrRegZ['InfAdFisco'], StrRegZ['InfCpl'])
StrFile += StrZ
self.write(cr, uid, [inv.id], {'nfe_export_date': datetime.now()})
return unicode(StrFile.encode('utf-8'), errors='replace')
def nfe_export_xml(self, cr, uid, ids, context=False):
nfeProc = Element('nfeProc', {'versao': '2.00', 'xmlns': 'http://www.portalfiscal.inf.br/nfe'})
for inv in self.browse(cr, uid, ids, context={'lang': 'pt_BR'}):
#Endereço do company
company_addr = self.pool.get('res.partner').address_get(cr, uid, [inv.company_id.partner_id.id], ['default'])
company_addr_default = self.pool.get('res.partner.address').browse(cr, uid, [company_addr['default']], context={'lang': 'pt_BR'})[0]
#MontaChave da Nota Fiscal Eletronica
nfe_key = unicode(company_addr_default.state_id.ibge_code).strip().rjust(2, u'0')
nfe_key += unicode(datetime.strptime(inv.date_invoice, '%Y-%m-%d').strftime(u'%y%m')).strip().rjust(4, u'0')
nfe_key += '08478495000170' # unicode(inv.company_id.partner_id.cnpj_cpf).strip().rjust(14, u'0')
nfe_key += inv.fiscal_document_id.code
nfe_key += unicode(inv.document_serie_id.code).strip().rjust(3, u'0')
nfe_key += unicode(inv.internal_number).strip().rjust(9, u'0')
nfe_key += unicode('1').strip().rjust(1, u'0') # Homologação
nfe_key += unicode(inv.internal_number).strip().rjust(8, u'0')
nfe_key += unicode(self.nfe_dv(nfe_key)).strip().rjust(1, u'0')
NFe = SubElement(nfeProc, 'NFe', {'xmlns': 'http://www.portalfiscal.inf.br/nfe'})
infNFe = SubElement(NFe, 'infNFe', {'versao': '2.00', 'Id': nfe_key})
#Dados da identificação da nota fiscal
ide = SubElement(infNFe, 'ide')
ide_cUF = SubElement(ide, 'cUF')
ide_cUF.text = company_addr_default.state_id.ibge_code
ide_cNF = SubElement(ide, 'cNF')
ide_cNF.text = unicode(inv.internal_number).strip().rjust(8, u'0')
ide_natOp = SubElement(ide, 'natOp')
ide_natOp.text = inv.cfop_id.name
ide_indPag = SubElement(ide, 'indPag')
ide_indPag.text = "2"
ide_mod = SubElement(ide, 'mod')
ide_mod.text = inv.fiscal_document_id.code
ide_serie = SubElement(ide, 'serie')
ide_serie.text = inv.document_serie_id.code
ide_nNF = SubElement(ide, 'nNF')
ide_nNF.text = inv.internal_number
ide_dEmi = SubElement(ide, 'dEmi')
ide_dEmi.text = inv.date_invoice
ide_dSaiEnt = SubElement(ide, 'dSaiEnt')
ide_dSaiEnt.text = inv.date_invoice
ide_tpNF = SubElement(ide, 'tpNF')
if inv.type in ("out_invoice", "in_refuld"):
ide_tpNF.text = '0'
else:
ide_tpNF.text = '1'
ide_cMunFG = SubElement(ide, 'cMunFG')
ide_cMunFG.text = ('%s%s') % (company_addr_default.state_id.ibge_code, company_addr_default.l10n_br_city_id.ibge_code)
ide_tpImp = SubElement(ide, 'tpImp')
ide_tpImp.text = "1"
ide_tpEmis = SubElement(ide, 'tpEmis')
ide_tpEmis.text = "1"
ide_cDV = SubElement(ide, 'cDV')
ide_cDV.text = self.nfe_dv(nfe_key)
#Tipo de ambiente: 1 - Produção; 2 - Homologação
ide_tpAmb = SubElement(ide, 'tpAmb')
ide_tpAmb.text = "2"
#Finalidade da emissão da NF-e: 1 - NFe normal 2 - NFe complementar 3 - NFe de ajuste
ide_finNFe = SubElement(ide, 'finNFe')
ide_finNFe.text = "1"
ide_procEmi = SubElement(ide, 'procEmi')
ide_procEmi.text = "0"
ide_verProc = SubElement(ide, 'verProc')
ide_verProc.text = "2.0.4"
emit = SubElement(infNFe, 'emit')
emit_CNPJ = SubElement(emit, 'CNPJ')
emit_CNPJ.text = inv.company_id.partner_id.cnpj_cpf
emit_xNome = SubElement(emit, 'xNome')
emit_xNome.text = inv.company_id.partner_id.legal_name
emit_xFant = SubElement(emit, 'xFant')
emit_xFant.text = inv.company_id.partner_id.name
enderEmit = SubElement(emit, 'enderEmit')
enderEmit_xLgr = SubElement(enderEmit, 'xLgr')
enderEmit_xLgr.text = company_addr_default.street
enderEmit_nro = SubElement(enderEmit, 'nro')
enderEmit_nro.text = company_addr_default.number
enderEmit_xBairro = SubElement(enderEmit, 'xBairro')
enderEmit_xBairro.text = company_addr_default.district
enderEmit_cMun = SubElement(enderEmit, 'cMun')
enderEmit_cMun.text = ('%s%s') % (company_addr_default.state_id.ibge_code, company_addr_default.l10n_br_city_id.ibge_code)
enderEmit_xMun = SubElement(enderEmit, 'xMun')
enderEmit_xMun.text = company_addr_default.l10n_br_city_id.name
enderEmit_UF = SubElement(enderEmit, 'UF')
enderEmit_UF.text = company_addr_default.state_id.code
enderEmit_CEP = SubElement(enderEmit, 'CEP')
enderEmit_CEP.text = company_addr_default.zip
enderEmit_cPais = SubElement(enderEmit, 'cPais')
enderEmit_cPais.text = company_addr_default.country_id.bc_code
enderEmit_xPais = SubElement(enderEmit, 'xPais')
enderEmit_xPais.text = company_addr_default.country_id.name
enderEmit_fone = SubElement(enderEmit, 'fone')
enderEmit_fone.text = company_addr_default.phone
emit_IE = SubElement(emit, 'IE')
emit_IE.text = inv.company_id.partner_id.inscr_est
emit_IEST = SubElement(emit, 'IEST')
emit_IEST.text = '0000000000' # FIXME
emit_IM = SubElement(emit, 'IM')
emit_IM.text = '0000000000' # FIXME
emit_CNAE = SubElement(emit, 'CNAE')
emit_CNAE.text = '0111301' # FIXME
emit_CRT = SubElement(emit, 'CRT')
emit_CRT.text = '3' # FIXME
dest = SubElement(infNFe, 'dest')
dest_CNPJ = SubElement(dest, 'CNPJ')
dest_CNPJ.text = inv.partner_id.cnpj_cpf
dest_xNome = SubElement(dest, 'xNome')
dest_xNome.text = inv.partner_id.legal_name
enderDest = SubElement(dest, 'enderDest')
enderDest_xLgr = SubElement(enderDest, 'xLgr')
enderDest_xLgr.text = inv.address_invoice_id.street
enderDest_nro = SubElement(enderDest, 'nro')
enderDest_nro.text = inv.address_invoice_id.number
enderDest_xBairro = SubElement(enderDest, 'xBairro')
enderDest_xBairro.text = inv.address_invoice_id.district
enderDest_cMun = SubElement(enderDest, 'cMun')
enderDest_cMun.text = ('%s%s') % (inv.address_invoice_id.state_id.ibge_code, inv.address_invoice_id.l10n_br_city_id.ibge_code)
enderDest_xMun = SubElement(enderDest, 'xMun')
enderDest_xMun.text = inv.address_invoice_id.l10n_br_city_id.name
enderDest_UF = SubElement(enderDest, 'UF')
enderDest_UF.text = inv.address_invoice_id.state_id.code
enderDest_CEP = SubElement(enderDest, 'CEP')
enderDest_CEP.text = inv.address_invoice_id.zip
enderDest_cPais = SubElement(enderDest, 'cPais')
enderDest_cPais.text = inv.address_invoice_id.country_id.bc_code
enderDest_xPais = SubElement(enderDest, 'xPais')
enderDest_xPais.text = inv.address_invoice_id.country_id.name
enderDest_fone = SubElement(enderDest, 'fone')
enderDest_fone.text = inv.address_invoice_id.phone
dest_IE = SubElement(dest, 'IE')
dest_IE.text = inv.partner_id.inscr_est
i = 0
for inv_line in inv.invoice_line:
i += 1
det = SubElement(infNFe, 'det', {'nItem': str(i)})
det_prod = SubElement(det, 'prod')
prod_cProd = SubElement(det_prod, 'cProd')
if inv_line.product_id.code:
prod_cProd.text = inv_line.product_id.code
else:
prod_cProd.text = unicode(i).strip().rjust(4, u'0')
prod_cEAN = SubElement(det_prod, 'cEAN')
prod_cEAN.text = inv_line.product_id.ean13
prod_xProd = SubElement(det_prod, 'xProd')
prod_xProd.text = inv_line.product_id.name
prod_NCM = SubElement(det_prod, 'NCM')
prod_NCM.text = inv_line.product_id.property_fiscal_classification.name
prod_CFOP = SubElement(det_prod, 'CFOP')
prod_CFOP.text = inv_line.cfop_id.code
prod_uCom = SubElement(det_prod, 'uCom')
prod_uCom.text = inv_line.uos_id.name
prod_qCom = SubElement(det_prod, 'qCom')
prod_qCom.text = str("%.4f" % inv_line.quantity)
prod_vUnCom = SubElement(det_prod, 'vUnCom')
prod_vUnCom.text = str("%.4f" % inv_line.price_unit)
prod_vProd = SubElement(det_prod, 'vProd')
prod_vProd.text = str("%.2f" % inv_line.price_subtotal)
prod_cEANTrib = SubElement(det_prod, 'cEANTrib')
#prod_vProd.text(inv_line.total)
prod_uTrib = SubElement(det_prod, 'uTrib')
prod_uTrib.text = inv_line.uos_id.name
prod_qTrib = SubElement(det_prod, 'qTrib')
prod_qTrib.text = '0.0000' # TODO
prod_vUnTrib = SubElement(det_prod, 'vUnTrib')
prod_vUnTrib.text = '0.00' # TODO
prod_vFrete = SubElement(det_prod, 'vFrete')
prod_vFrete.text = '0.00' # TODO - Valor do Frete
prod_vSeg = SubElement(det_prod, 'vSeg')
prod_vSeg.text = '0.00' # TODO - Valor do seguro
prod_vDesc = SubElement(det_prod, 'vDesc')
prod_vDesc.text = str("%.2f" % inv_line.discount) # TODO
prod_vOutro = SubElement(det_prod, 'vOutro')
prod_vOutro.text = '0.0000' # TODO
prod_indTot = SubElement(det_prod, 'indTot')
prod_indTot.text = '1' # TODO
prod_imposto = SubElement(det, 'imposto')
imposto_icms = SubElement(prod_imposto, 'ICMS') # + inv_line.icms_cst)
imposto_icms_cst = SubElement(imposto_icms, 'ICMS%s' % (inv_line.icms_cst))
icms_orig = SubElement(imposto_icms_cst, 'orig')
icms_orig.text = inv_line.product_id.origin
icms_CST = SubElement(imposto_icms_cst, 'CST')
icms_CST.text = inv_line.icms_cst
icms_modBC = SubElement(imposto_icms_cst, 'modBC')
icms_modBC.text = '0' # TODO
icms_vBC = SubElement(imposto_icms_cst, 'vBC')
icms_vBC.text = str("%.2f" % inv_line.icms_base)
icms_pICMS = SubElement(imposto_icms_cst, 'pICMS')
icms_pICMS.text = str("%.2f" % inv_line.icms_percent)
icms_vICMS = SubElement(imposto_icms_cst, 'vICMS')
icms_vICMS.text = str("%.2f" % inv_line.icms_value)
imposto_ipi = SubElement(prod_imposto, 'IPI')
icms_cEnq = SubElement(imposto_ipi, 'cEnq')
icms_cEnq.text = '999'
#Imposto Não Tributado
ipi_IPINT = SubElement(imposto_ipi, 'IPINT')
ipi_CST = SubElement(ipi_IPINT, 'CST')
ipi_CST.text = inv_line.ipi_cst
imposto_pis = SubElement(prod_imposto, 'PIS')
pis_PISAliq = SubElement(imposto_pis, 'PISAliq')
pis_CST = SubElement(pis_PISAliq, 'CST')
pis_CST.text = inv_line.pis_cst
pis_vBC = SubElement(pis_PISAliq, 'vBC')
pis_vBC.text = str("%.2f" % inv_line.pis_base)
pis_pPIS = SubElement(pis_PISAliq, 'pPIS')
pis_pPIS.text = str("%.2f" % inv_line.pis_percent)
pis_vPIS = SubElement(pis_PISAliq, 'vPIS')
pis_vPIS.text = str("%.2f" % inv_line.pis_value)
imposto_cofins = SubElement(prod_imposto, 'COFINS')
cofins_COFINSAliq = SubElement(imposto_cofins, 'COFINSAliq')
cofins_CST = SubElement(cofins_COFINSAliq, 'CST')
cofins_CST.text = inv_line.pis_cst
cofins_vBC = SubElement(cofins_COFINSAliq, 'vBC')
cofins_vBC.text = str("%.2f" % inv_line.cofins_base)
cofins_pCOFINS = SubElement(cofins_COFINSAliq, 'pCOFINS')
cofins_pCOFINS.text = str("%.2f" % inv_line.cofins_percent)
cofins_vCOFINS = SubElement(cofins_COFINSAliq, 'vCOFINS')
cofins_vCOFINS.text = str("%.2f" % inv_line.cofins_value)
total = SubElement(infNFe, 'total')
total_ICMSTot = SubElement(total, 'ICMSTot')
ICMSTot_vBC = SubElement(total_ICMSTot, 'vBC')
ICMSTot_vBC.text = str("%.2f" % inv.icms_base)
ICMSTot_vICMS = SubElement(total_ICMSTot, 'vICMS')
ICMSTot_vICMS.text = str("%.2f" % inv.icms_value)
ICMSTot_vBCST = SubElement(total_ICMSTot, 'vBCST')
ICMSTot_vBCST.text = '0.00' # TODO
ICMSTot_vST = SubElement(total_ICMSTot, 'vST')
ICMSTot_vST.text = '0.00' # TODO
ICMSTot_vProd = SubElement(total_ICMSTot, 'vProd')
ICMSTot_vProd.text = str("%.2f" % inv.amount_untaxed)
ICMSTot_vFrete = SubElement(total_ICMSTot, 'vFrete')
ICMSTot_vFrete.text = '0.00' # TODO
ICMSTot_vSeg = SubElement(total_ICMSTot, 'vSeg')
ICMSTot_vSeg.text = str("%.2f" % inv.amount_insurance)
ICMSTot_vDesc = SubElement(total_ICMSTot, 'vDesc')
ICMSTot_vDesc.text = '0.00' # TODO
ICMSTot_II = SubElement(total_ICMSTot, 'vII')
ICMSTot_II.text = '0.00' # TODO
ICMSTot_vIPI = SubElement(total_ICMSTot, 'vIPI')
ICMSTot_vIPI.text = str("%.2f" % inv.ipi_value)
ICMSTot_vPIS = SubElement(total_ICMSTot, 'vPIS')
ICMSTot_vPIS.text = str("%.2f" % inv.pis_value)
ICMSTot_vCOFINS = SubElement(total_ICMSTot, 'vCOFINS')
ICMSTot_vCOFINS.text = str("%.2f" % inv.cofins_value)
ICMSTot_vOutro = SubElement(total_ICMSTot, 'vOutro')
ICMSTot_vOutro.text = str("%.2f" % inv.amount_costs)
ICMSTot_vNF = SubElement(total_ICMSTot, 'vNF')
ICMSTot_vNF.text = str("%.2f" % inv.amount_total)
transp = SubElement(infNFe, 'transp')
# Modo do Frete: 0- Por conta do emitente; 1- Por conta do destinatário/remetente; 2- Por conta de terceiros; 9- Sem frete (v2.0)
transp_modFrete = SubElement(transp, 'modFrete')
transp_modFrete.text = '0' # TODO
if inv.carrier_id:
#Endereço do company
carrier_addr = self.pool.get('res.partner').address_get(cr, uid, [inv.carrier_id.partner_id.id], ['default'])
carrier_addr_default = self.pool.get('res.partner.address').browse(cr, uid, [carrier_addr['default']])[0]
transp_transporta = SubElement(transp, 'transporta')
if inv.carrier_id.partner_id.tipo_pessoa == 'J':
transporta_CNPJ = SubElement(transp_transporta, 'CNPJ')
transporta_CNPJ.text = inv.carrier_id.partner_id.cnpj_cpf
else:
transporta_CPF = SubElement(transp_transporta, 'CPF')
transporta_CPF.text = inv.carrier_id.partner_id.cnpj_cpf
transporta_xNome = SubElement(transp_transporta, 'xNome')
if inv.carrier_id.partner_id.legal_name:
transporta_xNome.text = inv.carrier_id.partner_id.legal_name
else:
transporta_xNome.text = inv.carrier_id.partner_id.name
transporta_IE = SubElement(transp_transporta, 'IE')
transporta_IE.text = inv.carrier_id.partner_id.inscr_est
transporta_xEnder = SubElement(transp_transporta, 'xEnder')
transporta_xEnder.text = carrier_addr_default.street
transporta_xMun = SubElement(transp_transporta, 'xMun')
transporta_xMun.text = ('%s%s') % (carrier_addr_default.state_id.ibge_code, carrier_addr_default.l10n_br_city_id.ibge_code)
transporta_UF = SubElement(transp_transporta, 'UF')
transporta_UF.text = carrier_addr_default.state_id.code
if inv.number_of_packages:
transp_vol = SubElement(transp, 'vol')
vol_qVol = SubElement(transp_vol, 'qVol')
vol_qVol.text = inv.number_of_packages
vol_esp = SubElement(transp_vol, 'esp')
vol_esp.text = 'volume' # TODO
vol_pesoL = SubElement(transp_vol, 'pesoL')
vol_pesoL.text = inv.weight_net
vol_pesoB = SubElement(transp_vol, 'pesoB')
vol_pesoB.text = inv.weight
xml_string = ElementTree.tostring(nfeProc, 'utf-8')
return xml_string
def onchange_partner_id(self, cr, uid, ids, type, partner_id, \
date_invoice=False, payment_term=False, partner_bank_id=False, company_id=False, fiscal_operation_category_id=False):
if self.browse(cr, uid, ids)[0].partner_id.id != partner_id:
result = super(account_invoice, self).onchange_partner_id(cr, uid, ids, type, partner_id, date_invoice, payment_term, partner_bank_id, company_id)
result['value']['fiscal_operation_id'] = False
result['value']['cfop_id'] = False
result['value']['fiscal_document_id'] = False
if not partner_id or not company_id or not result['value']['address_invoice_id']:
return result
obj_company = self.pool.get('res.company').browse(cr, uid, [company_id])[0]
company_addr = self.pool.get('res.partner').address_get(cr, uid, [obj_company.partner_id.id], ['default'])
company_addr_default = self.pool.get('res.partner.address').browse(cr, uid, [company_addr['default']])[0]
from_country = company_addr_default.country_id.id
from_state = company_addr_default.state_id.id
obj_partner = self.pool.get('res.partner').browse(cr, uid, [partner_id])[0]
partner_fiscal_type = obj_partner.partner_fiscal_type_id.id
partner_addr_default = self.pool.get('res.partner.address').browse(cr, uid, [result['value']['address_invoice_id']])[0]
to_country = partner_addr_default.country_id.id
to_state = partner_addr_default.state_id.id
fsc_pos_id = self.pool.get('account.fiscal.position.rule').search(cr, uid, [('company_id', '=', company_id), ('from_country', '=', from_country), ('from_state', '=', from_state), ('to_country', '=', to_country), ('to_state', '=', to_state), ('use_invoice', '=', True), ('partner_fiscal_type_id', '=', partner_fiscal_type), ('fiscal_operation_category_id', '=', fiscal_operation_category_id)])
if not fsc_pos_id:
fsc_pos_id = self.pool.get('account.fiscal.position.rule').search(cr, uid, [('company_id', '=', company_id), ('from_country', '=', from_country), ('from_state', '=', from_state), ('to_country', '=', to_country), ('to_state', '=', to_state), ('use_invoice', '=', True), ('fiscal_operation_category_id', '=', fiscal_operation_category_id)])
if fsc_pos_id:
obj_fpo_rule = self.pool.get('account.fiscal.position.rule').browse(cr, uid, fsc_pos_id)[0]
obj_fpo = self.pool.get('account.fiscal.position').browse(cr, uid, [obj_fpo_rule.fiscal_position_id.id])[0]
obj_foperation = self.pool.get('l10n_br_account.fiscal.operation').browse(cr, uid, [obj_fpo.fiscal_operation_id.id])[0]
result['value']['fiscal_position'] = obj_fpo.id
result['value']['fiscal_operation_id'] = obj_foperation.id
result['value']['cfop_id'] = obj_foperation.cfop_id.id
result['value']['fiscal_document_id'] = obj_foperation.fiscal_document_id.id
#for inv in self.browse(cr, uid, ids):
# for line in inv.invoice_line:
# line.cfop_id = obj_foperation.cfop_id.id
#line.write(cr, uid, line.id, {'cfop_id': obj_foperation.cfop_id.id})
return result
else:
return True
def onchange_company_id(self, cr, uid, ids, company_id, partner_id, type, invoice_line, currency_id, address_invoice_id, fiscal_operation_category_id=False):
try:
if self.browse(cr, uid, ids)[0].company_id.id != company_id:
result = super(account_invoice, self).onchange_company_id(cr, uid, ids, company_id, partner_id, type, invoice_line, currency_id, address_invoice_id)
result['value']['fiscal_operation_id'] = False
result['value']['cfop_id'] = False
result['value']['fiscal_document_id'] = False
if not partner_id or not company_id or not address_invoice_id:
return result
obj_company = self.pool.get('res.company').browse(cr, uid, [company_id])[0]
company_addr = self.pool.get('res.partner').address_get(cr, uid, [obj_company.partner_id.id], ['default'])
company_addr_default = self.pool.get('res.partner.address').browse(cr, uid, [company_addr['default']])[0]
from_country = company_addr_default.country_id.id
from_state = company_addr_default.state_id.id
obj_partner = self.pool.get('res.partner').browse(cr, uid, [partner_id])[0]
partner_fiscal_type = obj_partner.partner_fiscal_type_id.id
if obj_partner.property_account_position.id:
obj_fpo = self.pool.get('account.fiscal.position').browse(cr, uid, [obj_fpo_rule.fiscal_position_id.id])[0]
obj_foperation = self.pool.get('l10n_br_account.fiscal.operation').browse(cr, uid, [obj_fpo.fiscal_operation_id.id])[0]
result['value']['fiscal_position'] = obj_fpo.id
result['value']['fiscal_operation_id'] = obj_foperation.id
result['value']['cfop_id'] = obj_foperation.cfop_id.id
result['value']['fiscal_document_id'] = obj_foperation.fiscal_document_id.id
return result
partner_addr_invoice = self.pool.get('res.partner.address').browse(cr, uid, [address_invoice_id])[0]
to_country = partner_addr_invoice.country_id.id
to_state = partner_addr_invoice.state_id.id
fsc_pos_id = self.pool.get('account.fiscal.position.rule').search(cr, uid, [('company_id', '=', company_id), ('from_country', '=', from_country), ('from_state', '=', from_state), ('to_country', '=', to_country), ('to_state', '=', to_state), ('use_invoice', '=', True), ('partner_fiscal_type_id', '=', partner_fiscal_type), ('fiscal_operation_category_id', '=', fiscal_operation_category_id)])
if not fsc_pos_id:
fsc_pos_id = self.pool.get('account.fiscal.position.rule').search(cr, uid, [('company_id', '=', company_id), ('from_country', '=', from_country), ('from_state', '=', from_state), ('to_country', '=', to_country), ('to_state', '=', to_state), ('use_invoice', '=', True), ('fiscal_operation_category_id', '=', fiscal_operation_category_id)])
if fsc_pos_id:
obj_fpo_rule = self.pool.get('account.fiscal.position.rule').browse(cr, uid, fsc_pos_id)[0]
obj_fpo = self.pool.get('account.fiscal.position').browse(cr, uid, [obj_fpo_rule.fiscal_position_id.id])[0]
obj_foperation = self.pool.get('l10n_br_account.fiscal.operation').browse(cr, uid, [obj_fpo.fiscal_operation_id.id])[0]
result['value']['fiscal_position'] = obj_fpo.id
result['value']['fiscal_operation_id'] = obj_foperation.id
result['value']['cfop_id'] = obj_foperation.cfop_id.id
result['value']['fiscal_document_id'] = obj_foperation.fiscal_document_id.id
for inv in self.browse(cr, uid, ids):
for line in inv.invoice_line:
line.cfop_id = obj_foperation.cfop_id.id
return result
except:
pass
result = super(account_invoice, self).onchange_company_id(cr, uid, ids, company_id, partner_id, type, invoice_line, currency_id, address_invoice_id)
result['value']['fiscal_operation_id'] = False
result['value']['cfop_id'] = False
result['value']['fiscal_document_id'] = False
return result
def onchange_address_invoice_id(self, cr, uid, ids, cpy_id, ptn_id, ptn_invoice_id, fiscal_operation_category_id=False):
result = super(account_invoice, self).onchange_address_invoice_id(cr, uid, ids, cpy_id, ptn_id, ptn_invoice_id)
result['value']['fiscal_operation_id'] = False
result['value']['cfop_id'] = False
result['value']['fiscal_document_id'] = False
if not ptn_id or not cpy_id or not ptn_invoice_id:
return result
obj_company = self.pool.get('res.company').browse(cr, uid, [cpy_id])[0]
company_addr = self.pool.get('res.partner').address_get(cr, uid, [obj_company.partner_id.id], ['default'])
company_addr_default = self.pool.get('res.partner.address').browse(cr, uid, [company_addr['default']])[0]
from_country = company_addr_default.country_id.id
from_state = company_addr_default.state_id.id
obj_partner = self.pool.get('res.partner').browse(cr, uid, [ptn_id])[0]
partner_fiscal_type = obj_partner.partner_fiscal_type_id.id
if obj_partner.property_account_position.id:
obj_fpo = self.pool.get('account.fiscal.position').browse(cr, uid, [obj_fpo_rule.fiscal_position_id.id])[0]
obj_foperation = self.pool.get('l10n_br_account.fiscal.operation').browse(cr, uid, [obj_fpo.fiscal_operation_id.id])[0]
result['value']['fiscal_position'] = obj_fpo.id
result['value']['fiscal_operation_id'] = obj_foperation.id
result['value']['cfop_id'] = obj_foperation.cfop_id.id
result['value']['fiscal_document_id'] = obj_foperation.fiscal_document_id.id
return result
partner_addr_invoice = self.pool.get('res.partner.address').browse(cr, uid, [ptn_invoice_id])[0]
to_country = partner_addr_invoice.country_id.id
to_state = partner_addr_invoice.state_id.id
fsc_pos_id = self.pool.get('account.fiscal.position.rule').search(cr, uid, [('company_id', '=', cpy_id), ('from_country', '=', from_country), ('from_state', '=', from_state), ('to_country', '=', to_country), ('to_state', '=', to_state), ('use_invoice', '=', True), ('partner_fiscal_type_id', '=', partner_fiscal_type), ('fiscal_operation_category_id', '=', fiscal_operation_category_id)])
if not fsc_pos_id:
fsc_pos_id = self.pool.get('account.fiscal.position.rule').search(cr, uid, [('company_id', '=', cpy_id), ('from_country', '=', from_country), ('from_state', '=', from_state), ('to_country', '=', to_country), ('to_state', '=', to_state), ('use_invoice', '=', True), ('fiscal_operation_category_id', '=', fiscal_operation_category_id)])
if fsc_pos_id:
obj_fpo_rule = self.pool.get('account.fiscal.position.rule').browse(cr, uid, fsc_pos_id)[0]
obj_fpo = self.pool.get('account.fiscal.position').browse(cr, uid, [obj_fpo_rule.fiscal_position_id.id])[0]
obj_foperation = self.pool.get('l10n_br_account.fiscal.operation').browse(cr, uid, [obj_fpo.fiscal_operation_id.id])[0]
result['value']['fiscal_position'] = obj_fpo.id
result['value']['fiscal_operation_id'] = obj_foperation.id
result['value']['cfop_id'] = obj_foperation.cfop_id.id
result['value']['fiscal_document_id'] = obj_foperation.fiscal_document_id.id
for inv in self.browse(cr, uid, ids):
for line in inv.invoice_line:
line.cfop_id = obj_foperation.cfop_id.id
return result
def onchange_cfop_id(self, cr, uid, ids, cfop_id):
if not cfop_id:
return False
for inv in self.browse(cr, uid, ids):
for inv_line in inv.invoice_line:
self.pool.get('account.invoice.line').write(cr, uid, inv_line.id, {'cfop_id': inv.fiscal_operation_id.cfop_id.id})
return {'value': {'cfop_id': cfop_id}}
account_invoice()
class account_invoice_line(osv.osv):
_inherit = 'account.invoice.line'
def fields_view_get(self, cr, uid, view_id=None, view_type=False, context=None, toolbar=False, submenu=False):
result = super(account_invoice_line, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
if context is None:
context = {}
if view_type == 'form':
eview = etree.fromstring(result['arch'])
if 'type' in context.keys():
operation_type = {'out_invoice': 'output', 'in_invoice': 'input', 'out_refund': 'input', 'in_refund': 'output'}
cfops = eview.xpath("//field[@name='cfop_id']")
for cfop_id in cfops:
cfop_id.set('domain', "[('type', '=', '%s')]" % (operation_type[context['type']], ))
cfop_id.set('required', '1')
fiscal_operation_categories = eview.xpath("//field[@name='fiscal_operation_category_id']")
for fiscal_operation_category_id in fiscal_operation_categories:
fiscal_operation_category_id.set('domain', "[('fiscal_type', '=', 'product'), ('type', '=', '%s'), ('use_invoice', '=', True)]" % (operation_type[context['type']], ))
fiscal_operation_category_id.set('required', '1')
fiscal_operations = eview.xpath("//field[@name='fiscal_operation_id']")
for fiscal_operation_id in fiscal_operations:
fiscal_operation_id.set('domain', "[('fiscal_type', '=', 'product'), ('type', '=', '%s'), ('fiscal_operation_category_id', '=', fiscal_operation_category_id), ('use_invoice', '=', True)]" % (operation_type[context['type']], ))
fiscal_operation_id.set('required', '1')
if context.get('fiscal_type', False) == 'service':
products = eview.xpath("//field[@name='product_id']")
for product_id in products:
product_id.set('domain', "[('fiscal_type', '=', '%s')]" % (context['fiscal_type']))
cfops = eview.xpath("//field[@name='cfop_id']")
for cfop_id in cfops:
cfop_id.set('invisible', '1')
cfop_id.set('required', '0')
if context['type'] in ('in_invoice', 'out_refund'):
fiscal_operation_categories = eview.xpath("//field[@name='fiscal_operation_category_id']")
for fiscal_operation_category_id in fiscal_operation_categories:
fiscal_operation_category_id.set('domain', "[('fiscal_type', '=', 'service'), ('type', '=', 'input'), ('use_invoice', '=', True)]")
fiscal_operation_category_id.set('required', '1')
fiscal_operations = eview.xpath("//field[@name='fiscal_operation_id']")
for fiscal_operation_id in fiscal_operations:
fiscal_operation_id.set('domain', "[('fiscal_type', '=', 'service'), ('type', '=', 'input'), ('fiscal_operation_category_id', '=', fiscal_operation_category_id), ('use_invoice', '=', True)]")
fiscal_operation_id.set('required', '1')
if context['type'] in ('out_invoice', 'in_refund'):
fiscal_operation_categories = eview.xpath("//field[@name='fiscal_operation_category_id']")
for fiscal_operation_category_id in fiscal_operation_categories:
fiscal_operation_category_id.set('domain', "[('fiscal_type', '=', 'service'), ('type', '=', 'output'), ('use_invoice', '=', True)]")
fiscal_operation_category_id.set('required', '1')
fiscal_operations = eview.xpath("//field[@name='fiscal_operation_id']")
for fiscal_operation_id in fiscal_operations:
fiscal_operation_id.set('domain', "[('fiscal_type', '=', 'service'), ('type', '=', 'output'), ('fiscal_operation_category_id', '=', fiscal_operation_category_id), ('use_invoice', '=', True)]")
fiscal_operation_id.set('required', '1')
result['arch'] = etree.tostring(eview)
if view_type == 'tree':
doc = etree.XML(result['arch'])
nodes = doc.xpath("//field[@name='partner_id']")
partner_string = _('Customer')
if context.get('type', 'out_invoice') in ('in_invoice', 'in_refund'):
partner_string = _('Supplier')
for node in nodes:
node.set('string', partner_string)
result['arch'] = etree.tostring(doc)
return result
def _amount_line(self, cr, uid, ids, prop, unknow_none, unknow_dict):
res = {} # super(account_invoice_line, self)._amount_line(cr, uid, ids, prop, unknow_none, unknow_dict)
tax_obj = self.pool.get('account.tax')
fsc_op_line_obj = self.pool.get('l10n_br_account.fiscal.operation.line')
cur_obj = self.pool.get('res.currency')
for line in self.browse(cr, uid, ids):
res[line.id] = {
'price_subtotal': 0.0,
'price_total': 0.0,
'icms_base': 0.0,
'icms_base_other': 0.0,
'icms_value': 0.0,
'icms_percent': 0.0,
'icms_percent_reduction': 0.0,
'icms_st_value': 0.0,
'icms_st_base': 0.0,
'icms_st_percent': 0.0,
'icms_st_mva': 0.0,
'icms_st_base_other': 0.0,
'icms_cst': '40', # Coloca como isento caso não tenha ICMS
'ipi_type': 'percent',
'ipi_base': 0.0,
'ipi_base_other': 0.0,
'ipi_value': 0.0,
'ipi_percent': 0.0,
'ipi_cst': '53', # Coloca como isento caso não tenha IPI
'pis_base': 0.0,
'pis_base_other': 0.0,
'pis_value': 0.0,
'pis_percent': 0.0,
'pis_cst': '99', # Coloca como isento caso não tenha PIS
'cofins_base': 0.0,
'cofins_base_other': 0.0,
'cofins_value': 0.0,
'cofins_percent': 0.0,
'cofins_cst': '99', # Coloca como isento caso não tenha COFINS
}
price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)
taxes = tax_obj.compute_all(cr, uid, line.invoice_line_tax_id, price, line.quantity, product=line.product_id, address_id=line.invoice_id.address_invoice_id, partner=line.invoice_id.partner_id)
icms_base = 0.0
icms_base_other = 0.0
icms_value = 0.0
icms_percent = 0.0
icms_percent_reduction = 0.0
icms_st_value = 0.0
icms_st_base = 0.0
icms_st_percent = 0.0
icms_st_mva = 0.0
icms_st_base_other = 0.0
icms_cst = '40'
ipi_type = 'percent'
ipi_base = 0.0
ipi_base_other = 0.0
ipi_value = 0.0
ipi_percent = 0.0
ipi_cst = '53'
pis_base = 0.0
pis_base_other = 0.0
pis_value = 0.0
pis_percent = 0.0
pis_cst = '99'
cofins_base = 0.0
cofins_base_other = 0.0
cofins_value = 0.0
cofins_percent = 0.0
cofins_cst = '99'
if line.fiscal_operation_id:
fiscal_operation_ids = self.pool.get('l10n_br_account.fiscal.operation.line').search(cr, uid, [('company_id', '=', line.company_id.id), ('fiscal_operation_id', '=', line.fiscal_operation_id.id), ('fiscal_classification_id', '=', False)])
for fo_line in self.pool.get('l10n_br_account.fiscal.operation.line').browse(cr, uid, fiscal_operation_ids):
if fo_line.tax_code_id.domain == 'icms':
icms_cst = fo_line.cst_id.code
if fo_line.tax_code_id.domain == 'ipi':
ipi_cst = fo_line.cst_id.code
if fo_line.tax_code_id.domain == 'pis':
pis_cst = fo_line.cst_id.code
if fo_line.tax_code_id.domain == 'cofins':
cofins_cst = fo_line.cst_id.code
if line.product_id:
fo_ids_ncm = self.pool.get('l10n_br_account.fiscal.operation.line').search(cr, uid, [('company_id', '=', line.company_id.id), ('fiscal_operation_id', '=', line.fiscal_operation_id.id), ('fiscal_classification_id', '=', line.product_id.property_fiscal_classification.id)])
for fo_line_ncm in self.pool.get('l10n_br_account.fiscal.operation.line').browse(cr, uid, fo_ids_ncm):
if fo_line_ncm.tax_code_id.domain == 'icms':
icms_cst = fo_line_ncm.cst_id.code
if fo_line_ncm.tax_code_id.domain == 'ipi':
ipi_cst = fo_line_ncm.cst_id.code
if fo_line_ncm.tax_code_id.domain == 'pis':
pis_cst = fo_line_ncm.cst_id.code
if fo_line_ncm.tax_code_id.domain == 'cofins':
cofins_cst = fo_line_ncm.cst_id.code
for tax in taxes['taxes']:
fsc_op_line_ids = 0
fsc_fp_tax_ids = 0
tax_brw = tax_obj.browse(cr, uid, tax['id'])
if tax_brw.domain == 'icms':
icms_base += tax['total_base']
icms_base_other += taxes['total'] - tax['total_base']
icms_value += tax['amount']
icms_percent += tax_brw.amount * 100
icms_percent_reduction += tax_brw.base_reduction * 100
if tax_brw.domain == 'ipi':
ipi_type = tax_brw.type
ipi_base += tax['total_base']
ipi_value += tax['amount']
ipi_percent += tax_brw.amount * 100
if tax_brw.domain == 'pis':
pis_base += tax['total_base']
pis_base_other += taxes['total'] - tax['total_base']
pis_value += tax['amount']
pis_percent += tax_brw.amount * 100
if tax_brw.domain == 'cofins':
cofins_base += tax['total_base']
cofins_base_other += taxes['total'] - tax['total_base']
cofins_value += tax['amount']
cofins_percent += tax_brw.amount * 100
if tax_brw.domain == 'icmsst':
icms_st_value += tax['amount']
icms_st_base += tax['total_base']
#cst do tipo pauta
#icms_st_percent += icms_value
icms_st_mva += tax_brw.amount_mva * 100
icms_st_base_other += 0
res[line.id] = {
'price_subtotal': taxes['total'],
'price_total': taxes['total'],
'icms_base': icms_base,
'icms_base_other': icms_base_other,
'icms_value': icms_value,
'icms_percent': icms_percent,
'icms_percent_reduction': icms_percent_reduction,
'icms_st_value': icms_st_value,
'icms_st_base': icms_st_base,
'icms_st_percent': icms_st_percent,
'icms_st_mva': icms_st_mva,
'icms_st_base_other': icms_st_base_other,
'icms_cst': icms_cst,
'ipi_type': ipi_type,
'ipi_base': ipi_base,
'ipi_base_other': ipi_base_other,
'ipi_value': ipi_value,
'ipi_percent': ipi_percent,
'ipi_cst': ipi_cst,
'pis_base': pis_base,
'pis_base_other': pis_base_other,
'pis_value': pis_value,
'pis_percent': pis_percent,
'pis_cst': pis_cst,
'cofins_base': cofins_base,
'cofins_base_other': cofins_base_other,
'cofins_value': cofins_value,
'cofins_percent': cofins_percent,
'cofins_cst': cofins_cst,
}
if line.invoice_id:
cur = line.invoice_id.currency_id
res[line.id] = {
'price_subtotal': cur_obj.round(cr, uid, cur, res[line.id]['price_subtotal']),
'price_total': cur_obj.round(cr, uid, cur, res[line.id]['price_total']),
'icms_base': cur_obj.round(cr, uid, cur, icms_base),
'icms_base_other': cur_obj.round(cr, uid, cur, icms_base_other),
'icms_value': cur_obj.round(cr, uid, cur, icms_value),
'icms_percent': icms_percent,
'icms_percent_reduction': icms_percent_reduction,
'icms_st_value': cur_obj.round(cr, uid, cur, icms_st_value),
'icms_st_base': cur_obj.round(cr, uid, cur, icms_st_base),
'icms_st_percent': icms_st_percent,
'icms_st_mva': icms_st_mva,
'icms_st_base_other': cur_obj.round(cr, uid, cur, icms_st_base_other),
'icms_cst': icms_cst,
'ipi_type': ipi_type,
'ipi_base': cur_obj.round(cr, uid, cur, ipi_base),
'ipi_base_other': cur_obj.round(cr, uid, cur, ipi_base_other),
'ipi_value': cur_obj.round(cr, uid, cur, ipi_value),
'ipi_percent': ipi_percent,
'ipi_cst': ipi_cst,
'pis_base': cur_obj.round(cr, uid, cur, pis_base),
'pis_base_other': cur_obj.round(cr, uid, cur, pis_base_other),
'pis_value': cur_obj.round(cr, uid, cur, pis_value),
'pis_percent': pis_percent,
'pis_cst': pis_cst,
'cofins_base': cur_obj.round(cr, uid, cur, cofins_base),
'cofins_base_other': cur_obj.round(cr, uid, cur, cofins_base_other),
'cofins_value': cur_obj.round(cr, uid, cur, cofins_value),
'cofins_percent': cofins_percent,
'cofins_cst': cofins_cst,
}
return res
_columns = {
'fiscal_operation_category_id': fields.many2one('l10n_br_account.fiscal.operation.category', 'Categoria', readonly=True, states={'draft': [('readonly', False)]}),
'fiscal_operation_id': fields.many2one('l10n_br_account.fiscal.operation', 'Operação Fiscal', domain="[('fiscal_operation_category_id', '=', fiscal_operation_category_id)]", readonly=True, states={'draft': [('readonly', False)]}),
'cfop_id': fields.many2one('l10n_br_account.cfop', 'CFOP'),
'price_subtotal': fields.function(_amount_line, method=True, string='Subtotal', type="float",
digits_compute=dp.get_precision('Account'), store=True, multi='all'),
'price_total': fields.function(_amount_line, method=True, string='Total', type="float",
digits_compute=dp.get_precision('Account'), store=True, multi='all'),
'icms_base': fields.function(_amount_line, method=True, string='Base ICMS', type="float",
digits_compute=dp.get_precision('Account'), store=True, multi='all'),
'icms_base_other': fields.function(_amount_line, method=True, string='Base ICMS Outras', type="float",
digits_compute=dp.get_precision('Account'), store=True, multi='all'),
'icms_value': fields.function(_amount_line, method=True, string='Valor ICMS', type="float",
digits_compute=dp.get_precision('Account'), store=True, multi='all'),
'icms_percent': fields.function(_amount_line, method=True, string='Perc ICMS', type="float",
digits_compute=dp.get_precision('Account'), store=True, multi='all'),
'icms_percent_reduction': fields.function(_amount_line, method=True, string='Perc Redução de Base ICMS', type="float",
digits_compute=dp.get_precision('Account'), store=True, multi='all'),
'icms_st_value': fields.function(_amount_line, method=True, string='Valor ICMS ST', type="float",
digits_compute=dp.get_precision('Account'), store=True, multi='all'),
'icms_st_base': fields.function(_amount_line, method=True, string='Base ICMS ST', type="float",
digits_compute=dp.get_precision('Account'), store=True, multi='all'),
'icms_st_percent': fields.function(_amount_line, method=True, string='Percentual ICMS ST', type="float",
digits_compute=dp.get_precision('Account'), store=True, multi='all'),
'icms_st_mva': fields.function(_amount_line, method=True, string='MVA ICMS ST', type="float",
digits_compute=dp.get_precision('Account'), store=True, multi='all'),
'icms_st_base_other': fields.function(_amount_line, method=True, string='Base ICMS ST Outras', type="float",
digits_compute=dp.get_precision('Account'), store=True, multi='all'),
'icms_cst': fields.function(_amount_line, method=True, string='CST ICMS', type="char", size=3,
store=True, multi='all'),
'ipi_type': fields.function(_amount_line, method=True, string='Tipo do IPI', type="char", size=64,
store=True, multi='all'),
'ipi_base': fields.function(_amount_line, method=True, string='Base IPI', type="float",
digits_compute=dp.get_precision('Account'), store=True, multi='all'),
'ipi_base_other': fields.function(_amount_line, method=True, string='Base IPI Outras', type="float",
digits_compute=dp.get_precision('Account'), store=True, multi='all'),
'ipi_value': fields.function(_amount_line, method=True, string='Valor IPI', type="float",
digits_compute=dp.get_precision('Account'), store=True, multi='all'),
'ipi_percent': fields.function(_amount_line, method=True, string='Perc IPI', type="float",
digits_compute=dp.get_precision('Account'), store=True, multi='all'),
'ipi_cst': fields.function(_amount_line, method=True, string='CST IPI', type="char", size=2,
store=True, multi='all'),
'pis_base': fields.function(_amount_line, method=True, string='Base PIS', type="float",
digits_compute=dp.get_precision('Account'), store=True, multi='all'),
'pis_base_other': fields.function(_amount_line, method=True, string='Base PIS Outras', type="float",
digits_compute=dp.get_precision('Account'), store=True, multi='all'),
'pis_value': fields.function(_amount_line, method=True, string='Valor PIS', type="float",
digits_compute=dp.get_precision('Account'), store=True, multi='all'),
'pis_percent': fields.function(_amount_line, method=True, string='Perc PIS', type="float",
digits_compute=dp.get_precision('Account'), store=True, multi='all'),
'pis_cst': fields.function(_amount_line, method=True, string='CST PIS', type="char", size=2,
store=True, multi='all'),
'cofins_base': fields.function(_amount_line, method=True, string='Base COFINS', type="float",
digits_compute=dp.get_precision('Account'), store=True, multi='all'),
'cofins_base_other': fields.function(_amount_line, method=True, string='Base COFINS Outras', type="float",
digits_compute=dp.get_precision('Account'), store=True, multi='all'),
'cofins_value': fields.function(_amount_line, method=True, string='Valor COFINS', type="float",
digits_compute=dp.get_precision('Account'), store=True, multi='all'),
'cofins_percent': fields.function(_amount_line, method=True, string='Perc COFINS', type="float",
digits_compute=dp.get_precision('Account'), store=True, multi='all'),
'cofins_cst': fields.function(_amount_line, method=True, string='Valor COFINS', type="char", size=2,
store=True, multi='all'),
}
def product_id_change(self, cr, uid, ids, product, uom, qty=0, name='', type='out_invoice', partner_id=False, fposition_id=False, price_unit=False, address_invoice_id=False, currency_id=False, context=None, cfop_id=False):
result = super(account_invoice_line, self).product_id_change(cr, uid, ids, product, uom, qty, name, type, partner_id, fposition_id, price_unit, address_invoice_id, currency_id, context)
if not cfop_id:
return result
result['value']['cfop_id'] = cfop_id
result['value']['fiscal_operation_category_id'] = cfop_id
result['value']['fiscal_operation_id'] = cfop_id
return result
account_invoice_line()
class account_invoice_tax(osv.osv):
_inherit = "account.invoice.tax"
_description = "Invoice Tax"
def compute(self, cr, uid, invoice_id, context={}):
tax_grouped = {}
tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
inv = self.pool.get('account.invoice').browse(cr, uid, invoice_id, context)
cur = inv.currency_id
company_currency = inv.company_id.currency_id.id
for line in inv.invoice_line:
taxes = tax_obj.compute_all(cr, uid, line.invoice_line_tax_id, (line.price_unit * (1 - (line.discount or 0.0) / 100.0)), line.quantity, inv.address_invoice_id.id, line.product_id, inv.partner_id)
for tax in taxes['taxes']:
val = {}
val['invoice_id'] = inv.id
val['name'] = tax['name']
val['amount'] = tax['amount']
val['manual'] = False
val['sequence'] = tax['sequence']
val['base'] = tax['total_base']
if inv.type in ('out_invoice', 'in_invoice'):
val['base_code_id'] = tax['base_code_id']
val['tax_code_id'] = tax['tax_code_id']
val['base_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, val['base'] * tax['base_sign'], context={'date': inv.date_invoice or time.strftime('%Y-%m-%d')}, round=False)
val['tax_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, val['amount'] * tax['tax_sign'], context={'date': inv.date_invoice or time.strftime('%Y-%m-%d')}, round=False)
val['account_id'] = tax['account_collected_id'] or line.account_id.id
else:
val['base_code_id'] = tax['ref_base_code_id']
val['tax_code_id'] = tax['ref_tax_code_id']
val['base_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, val['base'] * tax['ref_base_sign'], context={'date': inv.date_invoice or time.strftime('%Y-%m-%d')}, round=False)
val['tax_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, val['amount'] * tax['ref_tax_sign'], context={'date': inv.date_invoice or time.strftime('%Y-%m-%d')}, round=False)
val['account_id'] = tax['account_paid_id'] or line.account_id.id
key = (val['tax_code_id'], val['base_code_id'], val['account_id'])
if not key in tax_grouped:
tax_grouped[key] = val
else:
tax_grouped[key]['amount'] += val['amount']
tax_grouped[key]['base'] += val['base']
tax_grouped[key]['base_amount'] += val['base_amount']
tax_grouped[key]['tax_amount'] += val['tax_amount']
for t in tax_grouped.values():
t['base'] = cur_obj.round(cr, uid, cur, t['base'])
t['amount'] = cur_obj.round(cr, uid, cur, t['amount'])
t['base_amount'] = cur_obj.round(cr, uid, cur, t['base_amount'])
t['tax_amount'] = cur_obj.round(cr, uid, cur, t['tax_amount'])
return tax_grouped
account_invoice_tax()
|
proge/openerp-pt_br
|
l10n_br_account/invoice.py
|
Python
|
agpl-3.0
| 120,444
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-03-05 14:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('order', '0022_historicalorderdiscount'),
]
operations = [
migrations.CreateModel(
name='OrderStatusChange',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('old_status', models.CharField(blank=True, max_length=100, verbose_name='Old Status')),
('new_status', models.CharField(blank=True, max_length=100, verbose_name='New Status')),
('date_created', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='Date Created')),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='status_changes', to='order.Order', verbose_name='Order Status Changes')),
],
options={
'ordering': ['-date_created'],
'abstract': False,
'verbose_name_plural': 'Order Status Changes',
'verbose_name': 'Order Status Change',
},
),
migrations.AlterField(
model_name='communicationevent',
name='date_created',
field=models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='Date'),
),
migrations.AlterField(
model_name='paymentevent',
name='date_created',
field=models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='Date created'),
),
migrations.AlterField(
model_name='shippingevent',
name='date_created',
field=models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='Date Created'),
),
]
|
eduNEXT/edunext-ecommerce
|
ecommerce/extensions/order/migrations/0023_auto_20200305_1448.py
|
Python
|
agpl-3.0
| 1,921
|
###########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
"""
Replacement for the standard Django forms.widgets module. It contains all
standard widgets plus extra (amcat-specific) widgets.
"""
from django.forms import widgets
__all__ = ["JQuerySelect", "JQueryMultipleSelect"]
class JQuerySelect(widgets.Select):
def _build_attrs(self, attrs=None, **kwargs):
attrs = dict() if attrs is None else attrs
attrs.update(kwargs)
return attrs
def render(self, name, value, attrs=None):
attrs = self._build_attrs(attrs, **{'class' : 'multiselect'})
return super(JQuerySelect, self).render(name, value, attrs=attrs)
class JQueryMultipleSelect(JQuerySelect, widgets.SelectMultiple):
def render(self, name, value, attrs=None, *args, **kwargs):
attrs = self._build_attrs(attrs, multiple='multiple')
return super(JQueryMultipleSelect, self).render(name, value, attrs=attrs)
def convert_to_jquery_select(form):
for field in form.fields:
print field, type(form.fields[field].widget), type(form.fields[field].widget) == widgets.Select
w = form.fields[field].widget
if type(w) == widgets.Select:
form.fields[field].widget = JQuerySelect(attrs=w.attrs, choices=w.choices)
|
tschmorleiz/amcat
|
amcat/forms/widgets.py
|
Python
|
agpl-3.0
| 2,586
|
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from six import iteritems
from email_reply_parser import EmailReplyParser
from frappe.utils import (flt, getdate, get_url, now,
nowtime, get_time, today, get_datetime, add_days)
from erpnext.controllers.queries import get_filters_cond
from frappe.desk.reportview import get_match_cond
from erpnext.hr.doctype.daily_work_summary.daily_work_summary import get_users_email
from erpnext.hr.doctype.holiday_list.holiday_list import is_holiday
from frappe.model.document import Document
class Project(Document):
def get_feed(self):
return '{0}: {1}'.format(_(self.status), frappe.safe_decode(self.project_name))
def onload(self):
self.set_onload('activity_summary', frappe.db.sql('''select activity_type,
sum(hours) as total_hours
from `tabTimesheet Detail` where project=%s and docstatus < 2 group by activity_type
order by total_hours desc''', self.name, as_dict=True))
self.update_costing()
def before_print(self):
self.onload()
def validate(self):
if not self.is_new():
self.copy_from_template()
self.send_welcome_email()
self.update_costing()
self.update_percent_complete()
def copy_from_template(self):
'''
Copy tasks from template
'''
if self.project_template and not frappe.db.get_all('Task', dict(project = self.name), limit=1):
# has a template, and no loaded tasks, so lets create
if not self.expected_start_date:
# project starts today
self.expected_start_date = today()
template = frappe.get_doc('Project Template', self.project_template)
if not self.project_type:
self.project_type = template.project_type
# create tasks from template
for task in template.tasks:
frappe.get_doc(dict(
doctype = 'Task',
subject = task.subject,
project = self.name,
status = 'Open',
exp_start_date = add_days(self.expected_start_date, task.start),
exp_end_date = add_days(self.expected_start_date, task.start + task.duration),
description = task.description,
task_weight = task.task_weight
)).insert()
def is_row_updated(self, row, existing_task_data, fields):
if self.get("__islocal") or not existing_task_data: return True
d = existing_task_data.get(row.task_id, {})
for field in fields:
if row.get(field) != d.get(field):
return True
def update_project(self):
'''Called externally by Task'''
self.update_percent_complete()
self.update_costing()
self.db_update()
def after_insert(self):
self.copy_from_template()
if self.sales_order:
frappe.db.set_value("Sales Order", self.sales_order, "project", self.name)
def update_percent_complete(self):
if self.percent_complete_method == "Manual":
if self.status == "Completed":
self.percent_complete = 100
return
total = frappe.db.count('Task', dict(project=self.name))
if not total:
self.percent_complete = 0
else:
if (self.percent_complete_method == "Task Completion" and total > 0) or (
not self.percent_complete_method and total > 0):
completed = frappe.db.sql("""select count(name) from tabTask where
project=%s and status in ('Cancelled', 'Completed')""", self.name)[0][0]
self.percent_complete = flt(flt(completed) / total * 100, 2)
if (self.percent_complete_method == "Task Progress" and total > 0):
progress = frappe.db.sql("""select sum(progress) from tabTask where
project=%s""", self.name)[0][0]
self.percent_complete = flt(flt(progress) / total, 2)
if (self.percent_complete_method == "Task Weight" and total > 0):
weight_sum = frappe.db.sql("""select sum(task_weight) from tabTask where
project=%s""", self.name)[0][0]
weighted_progress = frappe.db.sql("""select progress, task_weight from tabTask where
project=%s""", self.name, as_dict=1)
pct_complete = 0
for row in weighted_progress:
pct_complete += row["progress"] * frappe.utils.safe_div(row["task_weight"], weight_sum)
self.percent_complete = flt(flt(pct_complete), 2)
# don't update status if it is cancelled
if self.status == 'Cancelled':
return
if self.percent_complete == 100:
self.status = "Completed"
else:
self.status = "Open"
def update_costing(self):
from_time_sheet = frappe.db.sql("""select
sum(costing_amount) as costing_amount,
sum(billing_amount) as billing_amount,
min(from_time) as start_date,
max(to_time) as end_date,
sum(hours) as time
from `tabTimesheet Detail` where project = %s and docstatus = 1""", self.name, as_dict=1)[0]
from_expense_claim = frappe.db.sql("""select
sum(total_sanctioned_amount) as total_sanctioned_amount
from `tabExpense Claim` where project = %s
and docstatus = 1""", self.name, as_dict=1)[0]
self.actual_start_date = from_time_sheet.start_date
self.actual_end_date = from_time_sheet.end_date
self.total_costing_amount = from_time_sheet.costing_amount
self.total_billable_amount = from_time_sheet.billing_amount
self.actual_time = from_time_sheet.time
self.total_expense_claim = from_expense_claim.total_sanctioned_amount
self.update_purchase_costing()
self.update_sales_amount()
self.update_billed_amount()
self.calculate_gross_margin()
def calculate_gross_margin(self):
expense_amount = (flt(self.total_costing_amount) + flt(self.total_expense_claim)
+ flt(self.total_purchase_cost) + flt(self.get('total_consumed_material_cost', 0)))
self.gross_margin = flt(self.total_billed_amount) - expense_amount
if self.total_billed_amount:
self.per_gross_margin = (self.gross_margin / flt(self.total_billed_amount)) * 100
def update_purchase_costing(self):
total_purchase_cost = frappe.db.sql("""select sum(base_net_amount)
from `tabPurchase Invoice Item` where project = %s and docstatus=1""", self.name)
self.total_purchase_cost = total_purchase_cost and total_purchase_cost[0][0] or 0
def update_sales_amount(self):
total_sales_amount = frappe.db.sql("""select sum(base_net_total)
from `tabSales Order` where project = %s and docstatus=1""", self.name)
self.total_sales_amount = total_sales_amount and total_sales_amount[0][0] or 0
def update_billed_amount(self):
total_billed_amount = frappe.db.sql("""select sum(base_net_total)
from `tabSales Invoice` where project = %s and docstatus=1""", self.name)
self.total_billed_amount = total_billed_amount and total_billed_amount[0][0] or 0
def after_rename(self, old_name, new_name, merge=False):
if old_name == self.copied_from:
frappe.db.set_value('Project', new_name, 'copied_from', new_name)
def send_welcome_email(self):
url = get_url("/project/?name={0}".format(self.name))
messages = (
_("You have been invited to collaborate on the project: {0}").format(self.name),
url,
_("Join")
)
content = """
<p>{0}.</p>
<p><a href="{1}">{2}</a></p>
"""
for user in self.users:
if user.welcome_email_sent == 0:
frappe.sendmail(user.user, subject=_("Project Collaboration Invitation"),
content=content.format(*messages))
user.welcome_email_sent = 1
def get_timeline_data(doctype, name):
'''Return timeline for attendance'''
return dict(frappe.db.sql('''select unix_timestamp(from_time), count(*)
from `tabTimesheet Detail` where project=%s
and from_time > date_sub(curdate(), interval 1 year)
and docstatus < 2
group by date(from_time)''', name))
def get_project_list(doctype, txt, filters, limit_start, limit_page_length=20, order_by="modified"):
return frappe.db.sql('''select distinct project.*
from tabProject project, `tabProject User` project_user
where
(project_user.user = %(user)s
and project_user.parent = project.name)
or project.owner = %(user)s
order by project.modified desc
limit {0}, {1}
'''.format(limit_start, limit_page_length),
{'user': frappe.session.user},
as_dict=True,
update={'doctype': 'Project'})
def get_list_context(context=None):
return {
"show_sidebar": True,
"show_search": True,
'no_breadcrumbs': True,
"title": _("Projects"),
"get_list": get_project_list,
"row_template": "templates/includes/projects/project_row.html"
}
def get_users_for_project(doctype, txt, searchfield, start, page_len, filters):
conditions = []
return frappe.db.sql("""select name, concat_ws(' ', first_name, middle_name, last_name)
from `tabUser`
where enabled=1
and name not in ("Guest", "Administrator")
and ({key} like %(txt)s
or full_name like %(txt)s)
{fcond} {mcond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, full_name), locate(%(_txt)s, full_name), 99999),
idx desc,
name, full_name
limit %(start)s, %(page_len)s""".format(**{
'key': searchfield,
'fcond': get_filters_cond(doctype, filters, conditions),
'mcond': get_match_cond(doctype)
}), {
'txt': "%%%s%%" % txt,
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len
})
@frappe.whitelist()
def get_cost_center_name(project):
return frappe.db.get_value("Project", project, "cost_center")
def hourly_reminder():
fields = ["from_time", "to_time"]
projects = get_projects_for_collect_progress("Hourly", fields)
for project in projects:
if (get_time(nowtime()) >= get_time(project.from_time) or
get_time(nowtime()) <= get_time(project.to_time)):
send_project_update_email_to_users(project.name)
def project_status_update_reminder():
daily_reminder()
twice_daily_reminder()
weekly_reminder()
def daily_reminder():
fields = ["daily_time_to_send"]
projects = get_projects_for_collect_progress("Daily", fields)
for project in projects:
if allow_to_make_project_update(project.name, project.get("daily_time_to_send"), "Daily"):
send_project_update_email_to_users(project.name)
def twice_daily_reminder():
fields = ["first_email", "second_email"]
projects = get_projects_for_collect_progress("Twice Daily", fields)
fields.remove("name")
for project in projects:
for d in fields:
if allow_to_make_project_update(project.name, project.get(d), "Twicely"):
send_project_update_email_to_users(project.name)
def weekly_reminder():
fields = ["day_to_send", "weekly_time_to_send"]
projects = get_projects_for_collect_progress("Weekly", fields)
current_day = get_datetime().strftime("%A")
for project in projects:
if current_day != project.day_to_send:
continue
if allow_to_make_project_update(project.name, project.get("weekly_time_to_send"), "Weekly"):
send_project_update_email_to_users(project.name)
def allow_to_make_project_update(project, time, frequency):
data = frappe.db.sql(""" SELECT name from `tabProject Update`
WHERE project = %s and date = %s """, (project, today()))
# len(data) > 1 condition is checked for twicely frequency
if data and (frequency in ['Daily', 'Weekly'] or len(data) > 1):
return False
if get_time(nowtime()) >= get_time(time):
return True
@frappe.whitelist()
def create_duplicate_project(prev_doc, project_name):
''' Create duplicate project based on the old project '''
import json
prev_doc = json.loads(prev_doc)
if project_name == prev_doc.get('name'):
frappe.throw(_("Use a name that is different from previous project name"))
# change the copied doc name to new project name
project = frappe.copy_doc(prev_doc)
project.name = project_name
project.project_template = ''
project.project_name = project_name
project.insert()
# fetch all the task linked with the old project
task_list = frappe.get_all("Task", filters={
'project': prev_doc.get('name')
}, fields=['name'])
# Create duplicate task for all the task
for task in task_list:
task = frappe.get_doc('Task', task)
new_task = frappe.copy_doc(task)
new_task.project = project.name
new_task.insert()
project.db_set('project_template', prev_doc.get('project_template'))
def get_projects_for_collect_progress(frequency, fields):
fields.extend(["name"])
return frappe.get_all("Project", fields = fields,
filters = {'collect_progress': 1, 'frequency': frequency, 'status': 'Open'})
def send_project_update_email_to_users(project):
doc = frappe.get_doc('Project', project)
if is_holiday(doc.holiday_list) or not doc.users: return
project_update = frappe.get_doc({
"doctype" : "Project Update",
"project" : project,
"sent": 0,
"date": today(),
"time": nowtime(),
"naming_series": "UPDATE-.project.-.YY.MM.DD.-",
}).insert()
subject = "For project %s, update your status" % (project)
incoming_email_account = frappe.db.get_value('Email Account',
dict(enable_incoming=1, default_incoming=1), 'email_id')
frappe.sendmail(recipients=get_users_email(doc),
message=doc.message,
subject=_(subject),
reference_doctype=project_update.doctype,
reference_name=project_update.name,
reply_to=incoming_email_account
)
def collect_project_status():
for data in frappe.get_all("Project Update",
{'date': today(), 'sent': 0}):
replies = frappe.get_all('Communication',
fields=['content', 'text_content', 'sender'],
filters=dict(reference_doctype="Project Update",
reference_name=data.name,
communication_type='Communication',
sent_or_received='Received'),
order_by='creation asc')
for d in replies:
doc = frappe.get_doc("Project Update", data.name)
user_data = frappe.db.get_values("User", {"email": d.sender},
["full_name", "user_image", "name"], as_dict=True)[0]
doc.append("users", {
'user': user_data.name,
'full_name': user_data.full_name,
'image': user_data.user_image,
'project_status': frappe.utils.md_to_html(
EmailReplyParser.parse_reply(d.text_content) or d.content
)
})
doc.save(ignore_permissions=True)
def send_project_status_email_to_users():
yesterday = add_days(today(), -1)
for d in frappe.get_all("Project Update",
{'date': yesterday, 'sent': 0}):
doc = frappe.get_doc("Project Update", d.name)
project_doc = frappe.get_doc('Project', doc.project)
args = {
"users": doc.users,
"title": _("Project Summary for {0}").format(yesterday)
}
frappe.sendmail(recipients=get_users_email(project_doc),
template='daily_project_summary',
args=args,
subject=_("Daily Project Summary for {0}").format(d.name),
reference_doctype="Project Update",
reference_name=d.name)
doc.db_set('sent', 1)
def update_project_sales_billing():
sales_update_frequency = frappe.db.get_single_value("Selling Settings", "sales_update_frequency")
if sales_update_frequency == "Each Transaction":
return
elif (sales_update_frequency == "Monthly" and frappe.utils.now_datetime().day != 1):
return
#Else simply fallback to Daily
exists_query = '(SELECT 1 from `tab{doctype}` where docstatus = 1 and project = `tabProject`.name)'
project_map = {}
for project_details in frappe.db.sql('''
SELECT name, 1 as order_exists, null as invoice_exists from `tabProject` where
exists {order_exists}
union
SELECT name, null as order_exists, 1 as invoice_exists from `tabProject` where
exists {invoice_exists}
'''.format(
order_exists=exists_query.format(doctype="Sales Order"),
invoice_exists=exists_query.format(doctype="Sales Invoice"),
), as_dict=True):
project = project_map.setdefault(project_details.name, frappe.get_doc('Project', project_details.name))
if project_details.order_exists:
project.update_sales_amount()
if project_details.invoice_exists:
project.update_billed_amount()
for project in project_map.values():
project.save()
@frappe.whitelist()
def create_kanban_board_if_not_exists(project):
from frappe.desk.doctype.kanban_board.kanban_board import quick_kanban_board
if not frappe.db.exists('Kanban Board', project):
quick_kanban_board('Task', project, 'status')
return True
@frappe.whitelist()
def set_project_status(project, status):
'''
set status for project and all related tasks
'''
if not status in ('Completed', 'Cancelled'):
frappe.throw(_('Status must be Cancelled or Completed'))
project = frappe.get_doc('Project', project)
frappe.has_permission(doc = project, throw = True)
for task in frappe.get_all('Task', dict(project = project.name)):
frappe.db.set_value('Task', task.name, 'status', status)
project.status = status
project.save()
|
gsnbng/erpnext
|
erpnext/projects/doctype/project/project.py
|
Python
|
agpl-3.0
| 16,359
|
# -*- coding: utf-8 -*-
# Copyright(C) 2009-2011 Romain Bignon, Christophe Benz
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from email.mime.text import MIMEText
from smtplib import SMTP
from email.Header import Header, decode_header
from email.Utils import parseaddr, formataddr, formatdate
from email import message_from_file, message_from_string
from smtpd import SMTPServer
import time
import re
import sys
import logging
import asyncore
import subprocess
import socket
from weboob.core import Weboob, CallErrors
from weboob.core.scheduler import Scheduler
from weboob.capabilities.messages import ICapMessages, ICapMessagesPost, Thread, Message
from weboob.tools.application.repl import ReplApplication
from weboob.tools.misc import html2text, get_backtrace, utc2local, to_unicode
__all__ = ['Monboob']
class FakeSMTPD(SMTPServer):
def __init__(self, app, bindaddr, port):
SMTPServer.__init__(self, (bindaddr, port), None)
self.app = app
def process_message(self, peer, mailfrom, rcpttos, data):
msg = message_from_string(data)
self.app.process_incoming_mail(msg)
class MonboobScheduler(Scheduler):
def __init__(self, app):
Scheduler.__init__(self)
self.app = app
def run(self):
if self.app.options.smtpd:
if ':' in self.app.options.smtpd:
host, port = self.app.options.smtpd.split(':', 1)
else:
host = '127.0.0.1'
port = self.app.options.smtpd
try:
FakeSMTPD(self.app, host, int(port))
except socket.error as e:
self.logger.error('Unable to start the SMTP daemon: %s' % e)
return False
# XXX Fuck, we shouldn't copy this piece of code from
# weboob.scheduler.Scheduler.run().
try:
while True:
self.stop_event.wait(0.1)
if self.app.options.smtpd:
asyncore.loop(timeout=0.1, count=1)
except KeyboardInterrupt:
self._wait_to_stop()
raise
else:
self._wait_to_stop()
return True
class Monboob(ReplApplication):
APPNAME = 'monboob'
VERSION = '0.i'
COPYRIGHT = 'Copyright(C) 2010-2011 Romain Bignon'
DESCRIPTION = 'Daemon allowing to regularly check for new messages on various websites, ' \
'and send an email for each message, and post a reply to a message on a website.'
SHORT_DESCRIPTION = "daemon to send and check messages"
CONFIG = {'interval': 300,
'domain': 'weboob.example.org',
'recipient': 'weboob@example.org',
'smtp': 'localhost',
'pipe': '',
'html': 0}
CAPS = ICapMessages
DISABLE_REPL = True
def add_application_options(self, group):
group.add_option('-S', '--smtpd', help='run a fake smtpd server and set the port')
def create_weboob(self):
return Weboob(scheduler=MonboobScheduler(self))
def load_default_backends(self):
self.load_backends(ICapMessages, storage=self.create_storage())
def main(self, argv):
self.load_config()
try:
self.config.set('interval', int(self.config.get('interval')))
if self.config.get('interval') < 1:
raise ValueError()
except ValueError:
print >>sys.stderr, 'Configuration error: interval must be an integer >0.'
return 1
try:
self.config.set('html', int(self.config.get('html')))
if self.config.get('html') not in (0, 1):
raise ValueError()
except ValueError:
print >>sys.stderr, 'Configuration error: html must be 0 or 1.'
return 2
return ReplApplication.main(self, argv)
def get_email_address_ident(self, msg, header):
s = msg.get(header)
if not s:
return None
m = re.match('.*<([^@]*)@(.*)>', s)
if m:
return m.group(1)
else:
try:
return s.split('@')[0]
except IndexError:
return s
def do_post(self, line):
"""
post
Pipe with a mail to post message.
"""
msg = message_from_file(sys.stdin)
return self.process_incoming_mail(msg)
def process_incoming_mail(self, msg):
to = self.get_email_address_ident(msg, 'To')
sender = msg.get('From')
reply_to = self.get_email_address_ident(msg, 'In-Reply-To')
title = msg.get('Subject')
if title:
new_title = u''
for part in decode_header(title):
if part[1]:
new_title += unicode(part[0], part[1])
else:
new_title += unicode(part[0])
title = new_title
content = u''
for part in msg.walk():
if part.get_content_type() == 'text/plain':
s = part.get_payload(decode=True)
charsets = part.get_charsets() + msg.get_charsets()
for charset in charsets:
try:
if charset is not None:
content += unicode(s, charset)
else:
content += unicode(s)
except UnicodeError as e:
self.logger.warning('Unicode error: %s' % e)
continue
except Exception as e:
self.logger.exception(e)
continue
else:
break
if len(content) == 0:
print >>sys.stderr, 'Unable to send an empty message'
return 1
# remove signature
content = content.split(u'\n-- \n')[0]
parent_id = None
if reply_to is None:
# This is a new message
if '.' in to:
bname, thread_id = to.split('.', 1)
else:
bname = to
thread_id = None
else:
# This is a reply
try:
bname, id = reply_to.split('.', 1)
thread_id, parent_id = id.rsplit('.', 1)
except ValueError:
print >>sys.stderr, 'In-Reply-To header might be in form <backend.thread_id.message_id>'
return 1
# Default use the To header field to know the backend to use.
if to and bname != to:
bname = to
try:
backend = self.weboob.backend_instances[bname]
except KeyError:
print >>sys.stderr, 'Backend %s not found' % bname
return 1
if not backend.has_caps(ICapMessagesPost):
print >>sys.stderr, 'The backend %s does not implement ICapMessagesPost' % bname
return 1
thread = Thread(thread_id)
message = Message(thread,
0,
title=title,
sender=sender,
receivers=[to],
parent=Message(thread, parent_id) if parent_id else None,
content=content)
try:
backend.post_message(message)
except Exception as e:
content = u'Unable to send message to %s:\n' % thread_id
content += u'\n\t%s\n' % to_unicode(e)
if logging.root.level == logging.DEBUG:
content += u'\n%s\n' % to_unicode(get_backtrace(e))
self.send_email(backend, Message(thread,
0,
title='Unable to send message',
sender='Monboob',
parent=Message(thread, parent_id) if parent_id else None,
content=content))
def do_run(self, line):
"""
run
Run the fetching daemon.
"""
self.weboob.repeat(self.config.get('interval'), self.process)
self.weboob.loop()
def do_once(self, line):
"""
once
Send mails only once, then exit.
"""
return self.process()
def process(self):
try:
for backend, message in self.weboob.do('iter_unread_messages'):
if self.send_email(backend, message):
backend.set_message_read(message)
except CallErrors as e:
self.bcall_errors_handler(e)
def send_email(self, backend, mail):
domain = self.config.get('domain')
recipient = self.config.get('recipient')
reply_id = ''
if mail.parent:
reply_id = u'<%s.%s@%s>' % (backend.name, mail.parent.full_id, domain)
subject = mail.title
sender = u'"%s" <%s@%s>' % (mail.sender.replace('"', '""') if mail.sender else '',
backend.name, domain)
# assume that .date is an UTC datetime
date = formatdate(time.mktime(utc2local(mail.date).timetuple()), localtime=True)
msg_id = u'<%s.%s@%s>' % (backend.name, mail.full_id, domain)
if self.config.get('html') and mail.flags & mail.IS_HTML:
body = mail.content
content_type = 'html'
else:
if mail.flags & mail.IS_HTML:
body = html2text(mail.content)
else:
body = mail.content
content_type = 'plain'
if body is None:
body = ''
if mail.signature:
if self.config.get('html') and mail.flags & mail.IS_HTML:
body += u'<p>-- <br />%s</p>' % mail.signature
else:
body += u'\n\n-- \n'
if mail.flags & mail.IS_HTML:
body += html2text(mail.signature)
else:
body += mail.signature
# Header class is smart enough to try US-ASCII, then the charset we
# provide, then fall back to UTF-8.
header_charset = 'ISO-8859-1'
# We must choose the body charset manually
for body_charset in 'US-ASCII', 'ISO-8859-1', 'UTF-8':
try:
body.encode(body_charset)
except UnicodeError:
pass
else:
break
# Split real name (which is optional) and email address parts
sender_name, sender_addr = parseaddr(sender)
recipient_name, recipient_addr = parseaddr(recipient)
# We must always pass Unicode strings to Header, otherwise it will
# use RFC 2047 encoding even on plain ASCII strings.
sender_name = str(Header(unicode(sender_name), header_charset))
recipient_name = str(Header(unicode(recipient_name), header_charset))
# Make sure email addresses do not contain non-ASCII characters
sender_addr = sender_addr.encode('ascii')
recipient_addr = recipient_addr.encode('ascii')
# Create the message ('plain' stands for Content-Type: text/plain)
msg = MIMEText(body.encode(body_charset), content_type, body_charset)
msg['From'] = formataddr((sender_name, sender_addr))
msg['To'] = formataddr((recipient_name, recipient_addr))
msg['Subject'] = Header(unicode(subject), header_charset)
msg['Message-Id'] = msg_id
msg['Date'] = date
if reply_id:
msg['In-Reply-To'] = reply_id
self.logger.info('Send mail from <%s> to <%s>' % (sender, recipient))
if len(self.config.get('pipe')) > 0:
p = subprocess.Popen(self.config.get('pipe'),
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.stdin.write(msg.as_string())
p.stdin.close()
if p.wait() != 0:
self.logger.error('Unable to deliver mail: %s' % p.stdout.read().strip())
return False
else:
# Send the message via SMTP to localhost:25
try:
smtp = SMTP(self.config.get('smtp'))
smtp.sendmail(sender, recipient, msg.as_string())
except Exception as e:
self.logger.error('Unable to deliver mail: %s' % e)
return False
else:
smtp.quit()
return True
|
yannrouillard/weboob
|
weboob/applications/monboob/monboob.py
|
Python
|
agpl-3.0
| 13,290
|
from unittest import skip
from django.conf import settings
# Runs each test in a transaction and flushes database:
from django.test import RequestFactory, TestCase
from lxml import etree as ET
from lxml.builder import E
from rest_framework.test import APIClient
from iati.factory.utils import _create_test_activity
iati_activities = getattr(E, 'iati-activities')
iati_activity = getattr(E, 'iati-activity')
iati_identifier = getattr(E, 'iati-identifier')
reporting_org = getattr(E, 'reporting-org')
title = getattr(E, 'title')
description = getattr(E, 'description')
participating_org = getattr(E, 'participating-org')
other_identifier = getattr(E, 'other-identifier')
activity_status = getattr(E, 'activity-status')
recipient_country = getattr(E, 'recipient-country')
recipient_region = getattr(E, 'recipient-region')
sector = getattr(E, 'sector')
document_link = getattr(E, 'document-link')
owner_org = getattr(E, 'owner-org')
transaction = getattr(E, 'transaction')
transaction_type = getattr(E, 'transaction-type')
transaction_date = getattr(E, 'transaction-date')
capital_spend = getattr(E, 'capital-spend')
value = getattr(E, 'value')
provider_org = getattr(E, 'provider-org')
receiver_org = getattr(E, 'receiver-org')
disbursement_channel = getattr(E, 'disbursement-channel')
flow_type = getattr(E, 'flow-type')
finance_type = getattr(E, 'finance-type')
aid_type = getattr(E, 'aid-type')
tied_status = getattr(E, 'tied-status')
location = getattr(E, 'location')
name = getattr(E, 'name')
location_reach = getattr(E, 'location-reach')
location_id = getattr(E, 'location-id')
activity_description = getattr(E, 'activity-description')
administrative = getattr(E, 'administrative')
point = getattr(E, 'point')
pos = getattr(E, 'pos')
exactness = getattr(E, 'exactness')
location_class = getattr(E, 'location-class')
feature_designation = getattr(E, 'feature-designation')
budget = getattr(E, 'budget')
period_start = getattr(E, 'period-start')
period_end = getattr(E, 'period-end')
conditions = getattr(E, 'conditions')
condition = getattr(E, 'condition')
contact_info = getattr(E, 'contact-info')
organisation = getattr(E, 'organisation')
department = getattr(E, 'department')
person_name = getattr(E, 'person-name')
job_title = getattr(E, 'job-title')
telephone = getattr(E, 'telephone')
email = getattr(E, 'email')
website = getattr(E, 'website')
mailing_address = getattr(E, 'mailing-address')
country_budget_items = getattr(E, 'country-budget-items')
budget_item = getattr(E, 'budget-item')
humanitarian_scope = getattr(E, 'humanitarian-scope')
legacy_data = getattr(E, 'legacy-data')
crs_add = getattr(E, 'crs-add')
other_flags = getattr(E, 'other-flags')
loan_terms = getattr(E, 'loan-terms')
repayment_type = getattr(E, 'repayment-type')
repayment_plan = getattr(E, 'repayment-plan')
commitment_date = getattr(E, 'commitment-date')
repayment_first_date = getattr(E, 'repayment-first-date')
repayment_final_date = getattr(E, 'repayment-final-date')
loan_status = getattr(E, 'loan-status')
interest_received = getattr(E, 'interest-received')
principal_outstanding = getattr(E, 'principal-outstanding')
principal_arrears = getattr(E, 'principal-arrears')
interest_arrears = getattr(E, 'interest-arrears')
channel_code = getattr(E, 'channel-code')
collaboration_type = getattr(E, 'collaboration-type')
default_flow_type = getattr(E, 'default-flow-type')
default_finance_type = getattr(E, 'default-finance-type')
default_aid_type = getattr(E, 'default-aid-type')
default_tied_status = getattr(E, 'default-tied-status')
related_activity = getattr(E, 'related-activity')
activity_scope = getattr(E, 'activity-scope')
policy_marker = getattr(E, 'policy-marker')
activity_date = getattr(E, 'activity-date')
planned_disbursement = getattr(E, 'planned-disbursement')
result = getattr(E, 'result')
period = getattr(E, 'period')
indicator = getattr(E, 'indicator')
reference = getattr(E, 'reference')
baseline = getattr(E, 'baseline')
comment = getattr(E, 'comment')
target = getattr(E, 'target')
dimension = getattr(E, 'dimension')
actual = getattr(E, 'actual')
fss = getattr(E, 'fss')
forecast = getattr(E, 'forecast')
def narrative(content):
return getattr(E, 'narrative')(content, **{
"{http://www.w3.org/XML/1998/namespace}lang": "en",
})
def boolToNum(b):
if b:
return "1"
else:
return "0"
@skip('Eimantas: these tests are unmaintainable.')
class ActivityXMLTestCase(TestCase):
"""
Test ActivityXMLSerializer outputs proper XML
"""
request_dummy = RequestFactory().get('/')
c = APIClient()
def setUp(self):
self.activity = _create_test_activity()
# TODO: generate full activity example so we can parse this and test
# the result - 2016-12-14
def test_create_activity(self):
res = self.c.get(
"/api/export/activities/IATI-search1?format=xml"
)
activity = self.activity
reporting_org1 = activity.publisher.organisation
description1 = activity.description_set.all()[0]
description2 = activity.description_set.all()[1]
participating_org1 = activity.participating_organisations.all()[0]
other_identifier1 = activity.otheridentifier_set.all()[0]
recipient_country1 = activity.activityrecipientcountry_set.all()[0]
recipient_region1 = activity.activityrecipientregion_set.all()[0]
sector1 = activity.activitysector_set.all()[0]
document_link1 = activity.documentlink_set.all()[0]
document_link_category1 = document_link1.documentlinkcategory_set\
.all()[0]
document_link_language1 = document_link1.documentlinklanguage_set\
.all()[0]
location1 = activity.location_set.all()[0]
location_administrative1 = location1.locationadministrative_set\
.all()[0]
transaction1 = activity.transaction_set.all()[0]
provider_org1 = transaction1.provider_organisation
receiver_org1 = transaction1.receiver_organisation
transaction_sector1 = transaction1.transactionsector_set.all()[0]
transaction_recipient_country1 = transaction1\
.transactionrecipientcountry_set.all()[0]
transaction_recipient_region1 = transaction1\
.transactionrecipientregion_set.all()[0]
budget1 = activity.budget_set.all()[0]
conditions1 = activity.conditions
condition1 = conditions1.condition_set.all()[0]
condition2 = conditions1.condition_set.all()[1]
contact_info1 = activity.contactinfo_set.all()[0]
country_budget_item1 = activity.country_budget_items
budget_item1 = country_budget_item1.budgetitem_set.all()[0]
humanitarian_scope1 = activity.humanitarianscope_set.all()[0]
legacy_data1 = activity.legacydata_set.all()[0]
legacy_data2 = activity.legacydata_set.all()[1]
crs_add1 = activity.crsadd_set.all()[0]
other_flag1 = crs_add1.other_flags.all()[0]
related_activity1 = activity.relatedactivity_set.all()[0]
policy_marker1 = activity.activitypolicymarker_set.all()[0]
activity_date1 = activity.activitydate_set.all()[0]
planned_disbursement1 = activity.planneddisbursement_set.all()[0]
planned_disbursement_provider1 = planned_disbursement1\
.provider_organisation
planned_disbursement_receiver1 = planned_disbursement1\
.receiver_organisation
result1 = activity.result_set.all()[0]
result_indicator1 = result1.resultindicator_set.all()[0]
result_indicator_reference1 = result_indicator1\
.resultindicatorreference_set.all()[0]
result_indicator_period1 = result_indicator1.resultindicatorperiod_set\
.all()[0]
result_indicator_period_target_location1 = result_indicator_period1.\
targets.all()[0].resultindicatorperiodtargetlocation_set.all()[0]
result_indicator_period_target_dimension1 = result_indicator_period1.\
targets.all()[0].resultindicatorperiodtargetdimension_set.all()[0]
result_indicator_period_actual_location1 = result_indicator_period1.\
resultindicatorperiodactuallocation_set.all()[0]
result_indicator_period_actual_dimension1 = result_indicator_period1.\
resultindicatorperiodactualdimension_set.all()[0]
location01 = related_activity1.ref_activity.location_set.all()[0]
location02 = related_activity1.ref_activity.location_set.all()[1]
fss1 = activity.fss_set.all()[0]
fss_forecast1 = fss1.fssforecast_set.all()[0]
transaction2 = related_activity1.ref_activity.transaction_set.all()[0]
xml = iati_activities(
ET.Comment(settings.EXPORT_COMMENT),
iati_activity(
iati_identifier(
related_activity1.ref_activity.iati_identifier
),
reporting_org(
narrative("reporting_organisation1"),
narrative("reporting_organisation2"),
**{
"ref": reporting_org1.organisation_identifier,
"type": reporting_org1.type.code,
"secondary-reporter": boolToNum(
activity.secondary_reporter
)
}
),
activity_status(**{
"code": str(
related_activity1.ref_activity.activity_status.code
)
}),
activity_scope(**{
"code": str(related_activity1.ref_activity.scope.code)
}),
location(
location_reach(code=location01.location_reach.code),
location_id(**{
"vocabulary": location01.location_id_vocabulary.code,
"code": location01.location_id_code,
}),
name(),
description(),
activity_description(),
point(
pos(
"{} {}".format(
location01.point_pos.y, location01.point_pos.x
)),
**{
"srsName": location01.point_srs_name,
}
),
exactness(code=location01.exactness.code),
location_class(code=location01.location_class.code),
feature_designation(
code=location01.feature_designation.code
),
**{
"ref": location01.ref,
}
),
location(
location_reach(code=location02.location_reach.code),
location_id(**{
"vocabulary": location02.location_id_vocabulary.code,
"code": location02.location_id_code,
}),
name(),
description(),
activity_description(),
point(
pos(
"{} {}".format(
location02.point_pos.y, location02.point_pos.x
)),
**{
"srsName": location02.point_srs_name,
}
),
exactness(code=location02.exactness.code),
location_class(code=location02.location_class.code),
feature_designation(
code=location02.feature_designation.code
),
**{
"ref": location02.ref,
}
),
collaboration_type(
**{"code": str(
related_activity1.ref_activity.collaboration_type.code
)}),
default_flow_type(
**{"code": str(
related_activity1.ref_activity.default_flow_type.code
)}),
default_finance_type(
**{"code": str(
related_activity1.ref_activity.default_finance_type
.code
)}),
default_aid_type(
**{"code": str(
related_activity1.ref_activity.default_aid_type.code
)}),
default_tied_status(
**{"code": str(
related_activity1.ref_activity.default_tied_status.code
)}),
transaction(
transaction_type(**{
"code": transaction2.transaction_type.code
}),
transaction_date(
**{
"iso-date": transaction2.transaction_date
.isoformat()
}),
value(
str(transaction2.value),
**{
"value-date": transaction2.value_date.isoformat(),
"currency": transaction2.currency.code
}
),
disbursement_channel(
**{"code": transaction2.disbursement_channel.code
}),
flow_type(**{"code": transaction2.flow_type.code}),
finance_type(**{"code": transaction2.finance_type.code}),
aid_type(**{"code": transaction2.aid_type.code}),
tied_status(**{"code": transaction2.tied_status.code}),
**{
"humanitarian": boolToNum(transaction2.humanitarian),
"ref": transaction2.ref
}
),
**{
"hierarchy": str(related_activity1.ref_activity.hierarchy),
"{http://www.w3.org/XML/1998/namespace}lang": related_activity1 # NOQA: E501
.ref_activity.default_lang.code,
}
),
iati_activity(
iati_identifier(activity.iati_identifier),
reporting_org(
narrative("reporting_organisation1"),
narrative("reporting_organisation2"),
**{
"ref": reporting_org1.organisation_identifier,
"type": reporting_org1.type.code,
"secondary-reporter": boolToNum(
activity.secondary_reporter
)
}
),
title(
narrative("title1"),
narrative("title2"),
),
description(
narrative("description1_1"),
narrative("description1_2"),
**{
"type": description1.type.code
}
),
description(
narrative("description2_1"),
narrative("description2_2"),
**{
"type": description2.type.code
}
),
participating_org(
narrative("participating_organisation1"),
narrative("participating_organisation2"),
**{
"ref": participating_org1.normalized_ref,
"type": participating_org1.type.code,
"role": participating_org1.role.code,
"activity-id": participating_org1.org_activity_id,
}
),
other_identifier(
owner_org(
narrative(
other_identifier1.narratives.all()[0].content
),
narrative(
other_identifier1.narratives.all()[1].content
),
**{
"ref": other_identifier1.owner_ref,
}
),
**{
"ref": other_identifier1.identifier,
"type": other_identifier1.type.code,
}
),
activity_status(
**{"code": str(activity.activity_status.code
)}),
activity_date(
**{
"iso-date": activity_date1.iso_date.isoformat(),
"type": activity_date1.type.code
}),
contact_info(
organisation(
narrative("Agency A"),
),
department(
narrative("Department B"),
),
person_name(
narrative("A. Example"),
),
job_title(
narrative("Transparency Lead"),
),
telephone(contact_info1.telephone),
email(contact_info1.email),
website(contact_info1.website),
mailing_address(
narrative(
"Transparency House, The Street, Town, City, "
"Postcode"
)
),
** {
"type": contact_info1.type.code,
}),
activity_scope(
**{"code": str(activity.scope.code)}),
recipient_country(
**{
"code": recipient_country1.country.code,
"percentage": str(recipient_country1.percentage),
}
),
recipient_region(
**{
"code": recipient_region1.region.code,
"vocabulary": recipient_region1.vocabulary.code,
"vocabulary-uri": recipient_region1.vocabulary_uri,
"percentage": str(recipient_region1.percentage)
}
),
location(
location_reach(code=location1.location_reach.code),
location_id(**{
"vocabulary": location1.location_id_vocabulary.code,
"code": location1.location_id_code,
}),
name(
narrative("location_name1_1"),
),
description(
narrative("location_description1_1"),
),
activity_description(
narrative("location_activity_description1_1"),
),
administrative(**{
"vocabulary": location_administrative1.vocabulary.code,
"code": location_administrative1.code,
"level": str(location_administrative1.level),
}),
point(
pos(
"{} {}".format(
location1.point_pos.y,
location1.point_pos.x
)),
**{
"srsName": location1.point_srs_name,
}
),
exactness(code=location1.exactness.code),
location_class(code=location1.location_class.code),
feature_designation(
code=location1.feature_designation.code
),
**{
"ref": location1.ref,
}
),
sector(
**{
"code": sector1.sector.code,
"vocabulary": sector1.vocabulary.code,
"vocabulary-uri": sector1.vocabulary_uri,
"percentage": str(sector1.percentage),
}
),
country_budget_items(
budget_item(
description(
narrative("Description text"),
),
**{"code": budget_item1.code.code}
),
**{"vocabulary": country_budget_item1.vocabulary.code}
),
humanitarian_scope(
# Add HumanitarianScope in data models Date 12-01-2017
# narrative("Nepal Earthquake April 2015"),
**{
"type": humanitarian_scope1.type.code,
"vocabulary": humanitarian_scope1.vocabulary.code,
"code": humanitarian_scope1.code
}),
policy_marker(
**{
"vocabulary": policy_marker1.vocabulary.code,
"code": policy_marker1.code.code,
"significance": policy_marker1.significance.code
}
),
collaboration_type(**{
"code": str(activity.collaboration_type.code)
}),
default_flow_type(**{
"code": str(activity.default_flow_type.code)
}),
default_finance_type(**{
"code": str(activity.default_finance_type.code)
}),
default_aid_type(**{
"code": str(activity.default_aid_type.code)
}),
default_tied_status(**{
"code": str(activity.default_tied_status.code)
}),
planned_disbursement(
period_start(**{
"iso-date": planned_disbursement1.period_start
.isoformat()
}),
period_end(**{
"iso-date": planned_disbursement1.period_end
.isoformat()
}),
value(
str(planned_disbursement1.value),
**{
"currency": planned_disbursement1.currency.code,
"value-date": planned_disbursement1.value_date
.isoformat()
}),
provider_org(
narrative("Agency B"),
**{
"provider-activity-id": planned_disbursement_provider1 # NOQA: E501
.provider_activity_ref,
"type": planned_disbursement_provider1.type.code,
"ref": planned_disbursement_provider1.ref
}),
receiver_org(
narrative("Agency A"),
**{
"receiver-activity-id": planned_disbursement_receiver1 # NOQA: E501
.receiver_activity_ref,
"type": planned_disbursement_receiver1.type.code,
"ref": planned_disbursement_receiver1.ref
}),
**{"type": planned_disbursement1.type.code}
),
budget(
period_start(**{
'iso-date': budget1.period_start.isoformat()
}),
period_end(**{'iso-date': budget1.period_end.isoformat()}),
value(
str(budget1.value),
**{
'currency': budget1.currency.code,
'value-date': budget1.value_date.isoformat(),
}),
**{
"type": budget1.type.code,
"status": budget1.status.code,
}),
capital_spend(
**{
"percentage": str(activity.capital_spend),
}
),
transaction(
transaction_type(code=transaction1.transaction_type.code),
transaction_date(**{
'iso-date': transaction1.transaction_date.isoformat()
}),
value(str(transaction1.value), **{
"currency": transaction1.currency.code,
"value-date": transaction1.value_date.isoformat()
}),
description(
narrative("transaction_description1_1"),
narrative("transaction_description1_2"),
),
provider_org(
narrative("transaction_provider_org1_1"),
narrative("transaction_provider_org1_2"),
**{
"provider-activity-id": provider_org1
.provider_activity_ref,
"ref": provider_org1.ref,
}),
receiver_org(
narrative("transaction_receiver_org1_1"),
narrative("transaction_receiver_org1_2"),
**{
"receiver-activity-id": receiver_org1
.receiver_activity_ref,
"ref": receiver_org1.ref,
}),
disbursement_channel(
code=transaction1.disbursement_channel.code
),
sector(**{
"vocabulary": transaction_sector1.vocabulary.code,
"vocabulary-uri": transaction_sector1.vocabulary_uri,
"code": transaction_sector1.sector.code,
}),
recipient_country(**{
"code": transaction_recipient_country1.country.code,
}),
recipient_region(**{
"vocabulary": transaction_recipient_region1.vocabulary
.code,
"vocabulary-uri": transaction_recipient_region1
.vocabulary_uri,
"code": transaction_recipient_region1.region.code,
}),
flow_type(code=transaction1.flow_type.code),
finance_type(code=transaction1.finance_type.code),
aid_type(code=transaction1.aid_type.code),
tied_status(code=transaction1.tied_status.code),
**{
"ref": transaction1.ref,
"humanitarian": boolToNum(transaction1.humanitarian)
}),
document_link(
title(
narrative("document_link_title1"),
narrative("document_link_title2"),
),
E.category(code=document_link_category1.category.code),
E.language(code=document_link_language1.language.code),
getattr(E, 'document-date')(**
{"iso-date": document_link1.iso_date.isoformat()}
),
**{
"format": document_link1.file_format.code,
"url": document_link1.url,
}
),
related_activity(**{
"ref": related_activity1.ref,
"type": str(related_activity1.type.code)
}),
legacy_data(
**{
"name": legacy_data1.name,
"value": legacy_data1.value,
"iati-equivalent": legacy_data1.iati_equivalent
}
),
legacy_data(
**{
"name": legacy_data2.name,
"value": legacy_data2.value,
"iati-equivalent": legacy_data2.iati_equivalent
}
),
conditions(
condition(
narrative("Conditions text"),
narrative("Conditions texte"),
**{"type": condition1.type.code, }
),
condition(
narrative("Conditions text2"),
narrative("Conditions texte2"),
**{"type": condition2.type.code, }
),
**{"attached": boolToNum(conditions1.attached), }
),
result(
title(narrative("Result title")),
description(narrative("Result description text")),
indicator(
title(narrative("Indicator title")),
description(narrative("Indicator description text")),
reference(
**{
"vocabulary": result_indicator_reference1
.vocabulary.code,
"code": result_indicator_reference1.code,
"indicator-uri": result_indicator_reference1
.indicator_uri
}),
baseline(
comment(narrative("Baseline comment text")),
**{
"year": str(result_indicator1.baseline_year),
"value": result_indicator1.baseline_value
}),
period(
period_start(
**{"iso-date": result_indicator_period1
.period_start.isoformat()}),
period_end(
**{"iso-date": result_indicator_period1
.period_end.isoformat()}),
target(
comment(narrative("Target comment text")),
location(
**{"ref": result_indicator_period_target_location1.ref} # NOQA: E501
),
dimension(**{
"name": result_indicator_period_target_dimension1.name, # NOQA: E501
"value": result_indicator_period_target_dimension1.value # NOQA: E501
}),
# **{"value": str(
# result_indicator_period1.target
# )}
),
actual(
comment(narrative("Actual comment text")),
location(**{
"ref": result_indicator_period_actual_location1.ref # NOQA: E501
}),
dimension(**{
"name": result_indicator_period_actual_dimension1.name, # NOQA: E501
"value": result_indicator_period_actual_dimension1.value # NOQA: E501
}),
**{"value": str(
result_indicator_period1.actual
)}
)
),
**{
"measure": result_indicator1.measure.code,
"ascending": boolToNum(result_indicator1.ascending)
}),
**{
"type": result1.type.code,
"aggregation-status": boolToNum(
result1.aggregation_status
)
}),
crs_add(
other_flags(
**{
"code": other_flag1.other_flags.code,
"significance": boolToNum(other_flag1.significance)
}
),
loan_terms(
repayment_type(
**{
"code": crs_add1.loan_terms.repayment_type.code
}
),
repayment_plan(
**{
"code": crs_add1.loan_terms.repayment_plan.code
}
),
commitment_date(
**{
"iso-date": str(
crs_add1.loan_terms.commitment_date
)
}
),
repayment_first_date(
**{
"iso-date": str(
crs_add1.loan_terms.repayment_first_date
)
}
),
repayment_final_date(
**{
"iso-date": str(
crs_add1.loan_terms.repayment_final_date
)
}
),
**{
"rate-1": str(crs_add1.loan_terms.rate_1),
"rate-2": str(crs_add1.loan_terms.rate_2)
}
),
loan_status(
interest_received(str(
crs_add1.loan_status.interest_received
)),
principal_outstanding(
str(crs_add1.loan_status.principal_outstanding)),
principal_arrears(str(
crs_add1.loan_status.principal_arrears
)),
interest_arrears(str(
crs_add1.loan_status.interest_arrears
)),
**{
"year": str(crs_add1.loan_status.year),
"currency": crs_add1.loan_status.currency.code,
"value-date": str(crs_add1.loan_status.value_date)
}
),
channel_code(crs_add1.channel_code)
),
fss(
forecast(
str(fss_forecast1.value),
**{
"year": str(fss_forecast1.year),
"value-date": fss_forecast1.value_date.isoformat(),
"currency": str(fss_forecast1.currency.code)
}),
**{
"extraction-date": fss1.extraction_date.isoformat(),
"priority": boolToNum(fss1.priority),
"phaseout-year": str(fss1.phaseout_year)
}),
**{
"hierarchy": str(activity.hierarchy),
"{http://www.w3.org/XML/1998/namespace}lang": activity
.default_lang.code,
}
),
version="2.02",
)
parsed_xml = ET.fromstring(res.content)
def elements_equal(e1, e2):
self.assertEqual(e1.tag, e2.tag)
self.assertEqual(e1.text, e2.text)
self.assertEqual(e1.tail, e2.tail)
self.assertEqual(e1.attrib, e2.attrib)
self.assertEqual(
len(e1),
len(e2),
"{} != {} for elements {} and {}".format(
len(e1),
len(e2),
e1.tag,
e2.tag))
return all(elements_equal(c1, c2) for c1, c2 in zip(e1, e2))
elements_equal(
ET.fromstring(ET.tostring(xml, pretty_print=True)),
parsed_xml
)
|
zimmerman-zimmerman/OIPA
|
OIPA/api/export/tests/test_full_activity.py
|
Python
|
agpl-3.0
| 37,603
|
from django.http import *
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.db import IntegrityError
from django.forms import ModelForm
from gstudio.models import *
from objectapp.models import *
def context_member(request,reltit , memtit):
member = []
subtype = []
subtypemember = []
finaldict = {}
nt = []
parenttype = []
#-------------------------------------------------------------
if Objecttype.objects.filter(title = str(memtit)):
ot = Objecttype.objects.get(title = str(memtit))
absolute_url_node = ot.get_absolute_url()
elif Gbobject.objects.filter(title = str(memtit)):
ot = Gbobject.objects.get(title = str(memtit))
absolute_url_node = ot.get_absolute_url()
#--------------------------------------------------------------
if Relationtype.objects.filter(title = str(reltit)):
r =Relationtype.objects.get(title = str(reltit))
role = r.left_subjecttype.ref
roletype = str(r.left_applicable_nodetypes)
print "Original is left role of relation"
newrole = r.right_subjecttype.ref
newroletype = str(r.right_applicable_nodetypes)
print 'original ' ,str(role)
print 'newrole (i.e right)', str(newrole)
else:
r = Relationtype.objects.get(inverse = str(reltit))
role = r.right_subjecttype.ref
roletype = str(r.right_applicable_nodetypes)
print "Original is right role of relation"
newrole = r.left_subjecttype.ref
newroletype = str(r.left_applicable_nodetypes)
print 'original ' ,str(role)
print 'newrole (i.e left)', str(newrole)
#---------------------------------------------------------------------
if newrole.reftype == 'Objecttype' and newroletype == 'OT':
print "Objecttype and OT"
for i in newrole.get_members:
member.append(i)
for i in member:
finaldict.update({i.id:str(i.title)})
# for i in newrole.get_children():
# subtype.append(i.ref)
for i in newrole.get_descendants():
subtype.append(i.ref)
for i in subtype:
finaldict.update({i.id:str(i.title)})
for i in subtype:
subtypemember.append(i.get_members)
subtypemember = [num for elem in subtypemember for num in elem]
for i in subtypemember:
finaldict.update({i.id:str(i.title)})
finaldict.update({newrole.id:str(newrole.title)})
elif newrole.reftype == 'Gbobject' and newroletype == 'OB':
print "Gbobject and OB"
nt = newrole.objecttypes.all()
for i in nt:
parenttype.append(i.ref)
for i in parenttype:
member.append(i.get_members)
member = [num for elem in member for num in elem]
subtypent = []
# for i in parenttype:
# subtypent.append(i.get_children())
# subtypent = [num for elem in subtypent for num in elem]
# for i in subtypent:
# subtype.append(i.ref)
# subtype = [num for elem in subtype for num in elem]
for i in parenttype:
subtypent.append(i.get_descendants())
for i in subtypent:
subtype.append(i.ref)
for i in subtype:
subtypemember.append(i.get_members)
subtypemember = [num for elem in subtypemember for num in elem]
for i in member:
finaldict.update({i.id:str(i.title)})
for i in subtypemember:
finaldict.update({i.id:str(i.title)})
elif newrole.reftype == 'Objecttype' and newroletype == 'OB':
print "Objecttype and OB"
for i in newrole.get_members:
member.append(i)
for i in member:
finaldict.update({i.id:str(i.title)})
# for i in newrole.get_children():
# subtype.append(i.ref)
for i in newrole.get_descendants():
subtype.append(i.ref)
for i in subtype:
subtypemember.append(i.get_members)
subtypemember = [num for elem in subtypemember for num in elem]
for i in subtypemember:
finaldict.update({i.id:str(i.title)})
print 'member',str(member)
print 'subtype', str(subtype)
print 'subtypemember', str(subtypemember)
elif newrole.reftype == 'Gbobject' and newroletype == 'OT':
print "Gbobject and OT"
nt = newrole.objecttypes.all()
for i in nt:
parenttype.append(i.ref)
for i in parenttype:
member.append(i.get_members)
member = [num for elem in member for num in elem]
subtypent = []
# for i in parenttype:
# subtypent.append(i.get_children())
# subtypent = [num for elem in subtypent for num in elem]
# for i in subtypent:
# subtype.append(i.ref)
# subtype = [num for elem in subtype for num in elem]
for i in parenttype:
subtypent.append(i.get_descendants())
for i in subtypent:
subtype.append(i.ref)
for i in subtype:
subtypemember.append(i.get_members)
subtypemember = [num for elem in subtypemember for num in elem]
for i in subtype:
finaldict.update({i.id:str(i.title)})
for i in parenttype:
finaldict.update({i.id:str(i.title)})
for i in member:
finaldict.update({i.id:str(i.title)})
for i in subtypemember:
finaldict.update({i.id:str(i.title)})
print 'absolute_url_node', str(absolute_url_node)
template="objectapp/selectRT.html"
context = RequestContext(request,{'finaldict':finaldict,'gb':memtit,'reltit':reltit, 'absolute_url_node': absolute_url_node})
return render_to_response(template,context)
def context_save(request,leftmem, reltype, rightmem):
try:
leftmem = str(leftmem)
reltype = str(reltype)
rightmem = str(rightmem)
print 'leftmem :', leftmem, 'rightmem :', rightmem
pt = []
nt = []
left = NID.objects.get(title = leftmem)
print 'leftid', str(left.id)
right = NID.objects.get(title = rightmem)
print 'rightid', str(right.id)
if Relationtype.objects.filter(title=reltype):
relation = Relationtype.objects.get(title = reltype)
else:
relation = Relationtype.objects.get(inverse = reltype)
rightrole = relation.right_subjecttype_id
r = relation.right_subjecttype.ref
print 'rightrole', str(r)
leftrole = relation.left_subjecttype_id
l=relation.left_subjecttype.ref
print 'leftrole', str(l)
#-----------------------------------------------------------------------
flag = 1
if Objecttype.objects.filter(title = leftmem):
obj = Objecttype.objects.get(title = leftmem)
print 'OT', str(obj)
while obj.parent:
pt.append((obj.parent).ref)
obj=obj.parent
for i in range(len(pt)):
if pt[i].id == leftrole :
flag = 0
print "Objecttype flag = 0 "
break
else:
print "Objecttype flag = 1 "
elif Gbobject.objects.filter(title = leftmem):
gb = Gbobject.objects.get(title = leftmem)
print 'Ob', str(gb)
nt = gb.objecttypes.all()
print 'nt ', str(nt)
for i in range(len(nt)):
pt.append(nt[i].ref)
obj = nt[i].ref
while obj.parent:
pt.append(obj.parent.ref)
obj = obj.parent
print 'pt ', str(pt)
for i in range(len(pt)):
if left.id == leftrole or pt[i].id == leftrole:
flag = 0
print "Object flag = 0"
break
else:
print "Object flag = 1"
print 'pt:',str(pt)
#-----------------------------------------------------------------------------------
if flag == 0:
print 'left_subject_id', l
savedict = {'title':relation, 'slug':relation, 'left_subject_id':left.id, 'right_subject_id':right.id, 'relationtype_id':relation.id, 'left_subject_scope':' ', 'right_subject_scope':' ', 'relationtype_scope':' ' }
else:
savedict = {'title':relation, 'slug':relation, 'left_subject_id':right.id, 'right_subject_id':left.id, 'relationtype_id':relation.id, 'left_subject_scope':' ', 'right_subject_scope':' ', 'relationtype_scope':' '}
rtt = Relation.objects.create(**savedict)
rtt.save()
print "left"+ str(left) + " right" + str(right) + " reltype" +str(relation)+ " leftrole"+ str(leftrole) + " rightrole " + str(rightrole)
print savedict
return HttpResponseRedirect("/nodetypes/")
#return savedict
except IntegrityError: #Exception raised when the relational integrity of the database is affected, e.g. a foreign key check fails, duplicate key, etc.
return HttpResponseRedirect("/nodetypes/")
#pass
|
gnowgi/gnowsys-studio
|
objectapp/views/dynamicRT.py
|
Python
|
agpl-3.0
| 9,486
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-09-05 11:48
from __future__ import unicode_literals
import uuid
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reference', '0005_auto_20160902_1639'),
]
operations = [
migrations.AddField(
model_name='assimilationcriteria',
name='uuid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, null=True),
),
migrations.AddField(
model_name='continent',
name='uuid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, null=True),
),
migrations.AddField(
model_name='country',
name='uuid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, null=True),
),
migrations.AddField(
model_name='currency',
name='uuid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, null=True),
),
migrations.AddField(
model_name='decree',
name='uuid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, null=True),
),
migrations.AddField(
model_name='domain',
name='uuid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, null=True),
),
migrations.AddField(
model_name='educationinstitution',
name='uuid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, null=True),
),
migrations.AddField(
model_name='educationtype',
name='uuid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, null=True),
),
migrations.AddField(
model_name='externaloffer',
name='uuid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, null=True),
),
migrations.AddField(
model_name='gradetype',
name='uuid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, null=True),
),
migrations.AddField(
model_name='institutionalgradetype',
name='uuid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, null=True),
),
migrations.AddField(
model_name='language',
name='uuid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, null=True),
),
]
|
uclouvain/OSIS-Louvain
|
reference/migrations/0006_add_uuid_field.py
|
Python
|
agpl-3.0
| 2,539
|
#!/usr/bin/env python
# This file is part of VoltDB.
# Copyright (C) 2008-2014 VoltDB Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with VoltDB. If not, see <http://www.gnu.org/licenses/>.
from parser import parse
from strings import *
import os.path
#inspired by code from python cookbook
def ensure_relative_path_exists(newdir):
if os.path.isdir(newdir):
pass
elif os.path.isfile(newdir):
raise OSError("a file with the same name as the desired " \
"dir, '%s', already exists." % newdir)
else:
head, tail = os.path.split(newdir)
print "Head, Tail: %s, %s" % (head, tail)
if head and not os.path.isdir(head):
ensure_relative_path_exists(head)
if tail:
os.mkdir(newdir)
|
eoneil1942/voltdb-4.7fix
|
src/catgen/catalog_utils/__init__.py
|
Python
|
agpl-3.0
| 1,341
|
# TODO: load settings.py here (for every env.)
import os
from dotenv import find_dotenv, load_dotenv
from OIPA.settings import * # NOQA: F401, F403
load_dotenv(find_dotenv())
DATABASES = {
'default': {
'ENGINE': os.getenv(
'OIPA_DB_ENGINE', 'django.contrib.gis.db.backends.postgis'
),
'HOST': os.getenv('OIPA_DB_HOST', 'localhost'),
'PORT': os.getenv('OIPA_DB_PORT', 5432),
'NAME': os.getenv('OIPA_DB_NAME'),
'USER': os.getenv('OIPA_DB_USER'),
'PASSWORD': os.getenv('OIPA_DB_PASSWORD'),
'CONN_MAX_AGE': int(os.getenv('OIPA_DB_CONN_MAX_AGE', 500))
},
}
# In production env, log everything to JSON files so DataDog can pick it up:
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'json': {
'()': 'pythonjsonlogger.jsonlogger.JsonFormatter',
'format': '%(threadName)s %(name)s %(thread)s %(created)f %(process)s %(processName)s %(relativeCreated)s %(module)s %(funcName)s %(levelno)s %(msecs)s %(pathname)s %(lineno)s %(asctime)s %(message)s %(filename)s %(levelname)s %(special)s %(run)s', # NOQA: E501
},
},
'handlers': {
'oipa-json-logfile': {
'level': OIPA_LOG_LEVEL, # NOQA: F405
'class': 'logging.handlers.WatchedFileHandler',
'filename': '/var/log/oipa/oipa/oipa-json.log',
'formatter': 'json',
},
'iati-parser-json-logfile': {
'level': OIPA_LOG_LEVEL, # NOQA: F405
'class': 'logging.handlers.WatchedFileHandler',
'filename': '/var/log/oipa/oipa/iati-parser-json.log',
'formatter': 'json',
},
'django-json-logfile': {
'level': OIPA_LOG_LEVEL, # NOQA: F405
'class': 'logging.handlers.WatchedFileHandler',
'filename': '/var/log/oipa/oipa/django-json.log',
'formatter': 'json',
},
},
'loggers': {
# All other errors:
'': {
'handlers': ['oipa-json-logfile'],
'level': OIPA_LOG_LEVEL, # NOQA: F405
'propagate': False,
},
# IATI Parser related errors:
'iati.parser': {
'handlers': ['iati-parser-json-logfile'],
'level': OIPA_LOG_LEVEL, # NOQA: F405
'propagate': False,
},
# Django-related errors:
'django': {
'handlers': ['django-json-logfile'],
'level': OIPA_LOG_LEVEL, # NOQA: F405
'propagate': False,
},
},
}
# FIXME: Caching is disabled COMPLETELY for now.
# See: #680
CACHES = {
'api': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
'TIMEOUT': 60 * 60,
},
}
# A setting indicating whether to save XML datasets (files) to local machine or
# not:
DOWNLOAD_DATASETS = True
try:
from .local_settings import * # noqa: F401, F403
except ImportError:
pass
|
openaid-IATI/OIPA
|
OIPA/OIPA/production_settings.py
|
Python
|
agpl-3.0
| 3,108
|
#!/usr/bin/env python
# -*- coding: utf-8; tab-width: 4; indent-tabs-mode: t -*-
#
# NetProfile: Pyramid event subscribers
# © Copyright 2013-2015 Alex 'Unik' Unigovsky
#
# This file is part of NetProfile.
# NetProfile is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later
# version.
#
# NetProfile is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General
# Public License along with NetProfile. If not, see
# <http://www.gnu.org/licenses/>.
from __future__ import (
unicode_literals,
print_function,
absolute_import,
division
)
from pyramid.i18n import (
TranslationString,
get_localizer
)
from pyramid.settings import asbool
def add_renderer_globals(event):
request = event['request']
if hasattr(request, 'translate'):
event['_'] = request.translate
def on_new_request(event):
request = event.request
mr = request.matched_route
if mr is None:
mr = 'netprofile_core'
else:
mr = 'netprofile_' + mr.name.split('.')[0]
def auto_translate(*args, **kwargs):
if 'domain' not in kwargs:
kwargs['domain'] = mr
return get_localizer(request).translate(TranslationString(*args, **kwargs))
request.translate = auto_translate
def on_response(event):
settings = event.request.registry.settings
res = event.response
# FIXME: add CSP
res.headerlist.append(('X-Content-Type-Options', 'nosniff'))
if 'X-Frame-Options' not in res.headers:
res.headerlist.append(('X-Frame-Options', 'DENY'))
if asbool(settings.get('netprofile.http.sts.enabled', False)):
try:
max_age = int(settings.get('netprofile.http.sts.max_age', 604800))
except (TypeError, ValueError):
max_age = 604800
sts_chunks = [ 'max-age=' + str(max_age) ]
if asbool(settings.get('netprofile.http.sts.include_subdomains', False)):
sts_chunks.append('includeSubDomains')
if asbool(settings.get('netprofile.http.sts.preload', False)):
sts_chunks.append('preload')
res.headerlist.append(('Strict-Transport-Security', '; '.join(sts_chunks)))
|
nikitos/npui
|
netprofile/netprofile/common/subscribers.py
|
Python
|
agpl-3.0
| 2,364
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
#
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# Copyright (C) 2009-2014:
# Jean Gabes, naparuba@gmail.com
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from alignak_test import *
class TestsericeTplNoHostname(AlignakTest):
def setUp(self):
self.setup_with_file(['etc/alignak_servicetpl_no_hostname.cfg'])
def test_dummy(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print "Get the hosts and services"
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
self.scheduler_loop(2, [[host, 0, 'UP | value1=1 value2=2'], [router, 0, 'UP | rtt=10'], [svc, 2, 'BAD | value1=0 value2=0']])
self.assertEqual('UP', host.state)
self.assertEqual('HARD', host.state_type)
if __name__ == '__main__':
unittest.main()
|
gst/alignak
|
test/test_servicetpl_no_hostname.py
|
Python
|
agpl-3.0
| 2,892
|
# Copyright 2019 Open Source Integrators
# License LGPL-3.0 or later (https://www.gnu.org/licenses/lgpl.html).
from . import stock_request_order
from . import stock_picking_type
|
OCA/stock-logistics-warehouse
|
stock_request_picking_type/models/__init__.py
|
Python
|
agpl-3.0
| 179
|
# Generated by Django 2.2.13 on 2020-11-09 17:54
from django.db import migrations
def keep_to_for(apps, schema_editor):
Program = apps.get_model("pipeline", "Program")
for program in Program.objects.all():
if program.keep:
program.keep_for = -1
else:
program.keep_for = 168
program.save()
def for_to_keep(apps, schema_editor):
Program = apps.get_model("pipeline", "Program")
for program in Program.objects.all():
program.keep = program.keep_for == -1
program.save()
class Migration(migrations.Migration):
dependencies = [
('pipeline', '0012_auto_20201109_1254'),
]
operations = [
migrations.RunPython(keep_to_for, for_to_keep),
]
|
IQSS/gentb-site
|
apps/pipeline/migrations/0013_auto_20201109_1254.py
|
Python
|
agpl-3.0
| 748
|
#
# ovirt-host-deploy -- ovirt host deployer
# Copyright (C) 2012-2015 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
"""Serial console PKI artifacts."""
import gettext
from otopi import plugin
from otopi import util
from ovirt_host_deploy import constants as odeploycons
def _(m):
return gettext.dgettext(message=m, domain='ovirt-host-deploy')
@util.export
class Plugin(plugin.PluginBase):
def __init__(self, context):
super(Plugin, self).__init__(context=context)
self._enabled = False
@plugin.event(
stage=plugin.Stages.STAGE_INIT,
)
def _init(self):
self._enabled = False
self.environment.setdefault(
odeploycons.VMConsoleEnv.ENABLE,
False
)
self.environment.setdefault(
odeploycons.VMConsoleEnv.SUPPORT,
odeploycons.Const.VMCONSOLE_SUPPORT_NONE
)
@plugin.event(
stage=plugin.Stages.STAGE_CUSTOMIZATION,
priority=plugin.Stages.PRIORITY_HIGH,
)
def _customization(self):
if self.packager.queryPackages(
patterns=('ovirt-vmconsole-host',),
):
self.environment[
odeploycons.VMConsoleEnv.SUPPORT
] = odeploycons.Const.VMCONSOLE_SUPPORT_V1
@plugin.event(
stage=plugin.Stages.STAGE_VALIDATION,
condition=lambda self: self.environment[
odeploycons.VMConsoleEnv.ENABLE
]
)
def _validation(self):
self._enabled = True
@plugin.event(
stage=plugin.Stages.STAGE_PACKAGES,
condition=lambda self: self._enabled,
)
def _packages(self):
self.packager.installUpdate(('ovirt-vmconsole-host',))
@plugin.event(
stage=plugin.Stages.STAGE_CLOSEUP,
priority=plugin.Stages.PRIORITY_LOW,
condition=lambda self: self._enabled,
)
def _start(self):
self.logger.info(_('Starting ovirt-vmconsole-host-sshd'))
if self.services.exists('ovirt-vmconsole-host-sshd'):
self.services.state('ovirt-vmconsole-host-sshd', False)
self.services.state('ovirt-vmconsole-host-sshd', True)
self.services.startup('ovirt-vmconsole-host-sshd', True)
# vim: expandtab tabstop=4 shiftwidth=4
|
alonbl/ovirt-host-deploy
|
src/plugins/ovirt-host-deploy/vmconsole/packages.py
|
Python
|
lgpl-2.1
| 2,970
|
#!/usr/bin/env python
from peacock.PeacockMainWindow import PeacockMainWindow
from peacock.utils import Testing
import argparse, os
class Tests(Testing.PeacockTester):
def newWidget(self, args=[]):
parser = argparse.ArgumentParser()
PeacockMainWindow.commandLineArgs(parser)
w = PeacockMainWindow()
w.show()
w.initialize(parser.parse_args(args))
return w
def testConsole(self):
w = self.newWidget()
self.assertEqual(w.console.isVisible(), False)
w._showConsole()
self.assertEqual(w.console.isVisible(), True)
w.setPythonVariable("foo", "bar")
def testConnections(self):
w = self.newWidget(args=[])
path = Testing.find_moose_test_exe()
w.tab_plugin.ExecuteTabPlugin.ExecuteOptionsPlugin.setExecutablePath(path)
self.assertIn(path, w.windowTitle())
runner = w.tab_plugin.ExecuteTabPlugin.ExecuteRunnerPlugin
self.assertEqual(runner._total_steps, 0)
w.tab_plugin.InputFileEditorWithMesh.setInputFile("../../common/transient.i")
self.assertIn("transient.i", w.windowTitle())
self.assertEqual(runner._total_steps, 8)
w.tab_plugin.ExecuteTabPlugin.ExecuteRunnerPlugin.runClicked()
Testing.process_events(self.qapp, t=2)
self.assertTrue(os.path.exists("out_transient.e"))
if __name__ == '__main__':
Testing.run_tests()
|
backmari/moose
|
python/peacock/tests/peacock_app/PeacockMainWindow/test_PeacockMainWindow.py
|
Python
|
lgpl-2.1
| 1,420
|
# Copyright 2012-2015, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Package for parsing and processing descriptor data.
**Module Overview:**
::
parse_file - Parses the descriptors in a file.
Descriptor - Common parent for all descriptor file types.
|- get_path - location of the descriptor on disk if it came from a file
|- get_archive_path - location of the descriptor within the archive it came from
|- get_bytes - similar to str(), but provides our original bytes content
|- get_unrecognized_lines - unparsed descriptor content
+- __str__ - string that the descriptor was made from
.. data:: DocumentHandler (enum)
Ways in which we can parse a
:class:`~stem.descriptor.networkstatus.NetworkStatusDocument`.
Both **ENTRIES** and **BARE_DOCUMENT** have a 'thin' document, which doesn't
have a populated **routers** attribute. This allows for lower memory usage
and upfront runtime. However, if read time and memory aren't a concern then
**DOCUMENT** can provide you with a fully populated document.
=================== ===========
DocumentHandler Description
=================== ===========
**ENTRIES** Iterates over the contained :class:`~stem.descriptor.router_status_entry.RouterStatusEntry`. Each has a reference to the bare document it came from (through its **document** attribute).
**DOCUMENT** :class:`~stem.descriptor.networkstatus.NetworkStatusDocument` with the :class:`~stem.descriptor.router_status_entry.RouterStatusEntry` it contains (through its **routers** attribute).
**BARE_DOCUMENT** :class:`~stem.descriptor.networkstatus.NetworkStatusDocument` **without** a reference to its contents (the :class:`~stem.descriptor.router_status_entry.RouterStatusEntry` are unread).
=================== ===========
"""
__all__ = [
'export',
'reader',
'remote',
'extrainfo_descriptor',
'server_descriptor',
'microdescriptor',
'networkstatus',
'router_status_entry',
'tordnsel',
'parse_file',
'Descriptor',
]
import base64
import codecs
import copy
import hashlib
import os
import re
import tarfile
import stem.prereq
import stem.util.enum
import stem.util.str_tools
import stem.util.system
from stem import str_type
try:
# added in python 2.7
from collections import OrderedDict
except ImportError:
from stem.util.ordereddict import OrderedDict
KEYWORD_CHAR = 'a-zA-Z0-9-'
WHITESPACE = ' \t'
KEYWORD_LINE = re.compile('^([%s]+)(?:[%s]+(.*))?$' % (KEYWORD_CHAR, WHITESPACE))
SPECIFIC_KEYWORD_LINE = '^(%%s)(?:[%s]+(.*))?$' % WHITESPACE
PGP_BLOCK_START = re.compile('^-----BEGIN ([%s%s]+)-----$' % (KEYWORD_CHAR, WHITESPACE))
PGP_BLOCK_END = '-----END %s-----'
DocumentHandler = stem.util.enum.UppercaseEnum(
'ENTRIES',
'DOCUMENT',
'BARE_DOCUMENT',
)
def parse_file(descriptor_file, descriptor_type = None, validate = False, document_handler = DocumentHandler.ENTRIES, **kwargs):
"""
Simple function to read the descriptor contents from a file, providing an
iterator for its :class:`~stem.descriptor.__init__.Descriptor` contents.
If you don't provide a **descriptor_type** argument then this automatically
tries to determine the descriptor type based on the following...
* The @type annotation on the first line. These are generally only found in
the `CollecTor archives <https://collector.torproject.org/formats.html#relay-descriptors>`_.
* The filename if it matches something from tor's data directory. For
instance, tor's 'cached-descriptors' contains server descriptors.
This is a handy function for simple usage, but if you're reading multiple
descriptor files you might want to consider the
:class:`~stem.descriptor.reader.DescriptorReader`.
Descriptor types include the following, including further minor versions (ie.
if we support 1.1 then we also support everything from 1.0 and most things
from 1.2, but not 2.0)...
========================================= =====
Descriptor Type Class
========================================= =====
server-descriptor 1.0 :class:`~stem.descriptor.server_descriptor.RelayDescriptor`
extra-info 1.0 :class:`~stem.descriptor.extrainfo_descriptor.RelayExtraInfoDescriptor`
microdescriptor 1.0 :class:`~stem.descriptor.microdescriptor.Microdescriptor`
directory 1.0 **unsupported**
network-status-2 1.0 :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV2` (with a :class:`~stem.descriptor.networkstatus.NetworkStatusDocumentV2`)
dir-key-certificate-3 1.0 :class:`~stem.descriptor.networkstatus.KeyCertificate`
network-status-consensus-3 1.0 :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` (with a :class:`~stem.descriptor.networkstatus.NetworkStatusDocumentV3`)
network-status-vote-3 1.0 :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` (with a :class:`~stem.descriptor.networkstatus.NetworkStatusDocumentV3`)
network-status-microdesc-consensus-3 1.0 :class:`~stem.descriptor.router_status_entry.RouterStatusEntryMicroV3` (with a :class:`~stem.descriptor.networkstatus.NetworkStatusDocumentV3`)
bridge-network-status 1.0 :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` (with a :class:`~stem.descriptor.networkstatus.BridgeNetworkStatusDocument`)
bridge-server-descriptor 1.0 :class:`~stem.descriptor.server_descriptor.BridgeDescriptor`
bridge-extra-info 1.1 or 1.2 :class:`~stem.descriptor.extrainfo_descriptor.BridgeExtraInfoDescriptor`
torperf 1.0 **unsupported**
bridge-pool-assignment 1.0 **unsupported**
tordnsel 1.0 :class:`~stem.descriptor.tordnsel.TorDNSEL`
hidden-service-descriptor 1.0 :class:`~stem.descriptor.hidden_service_descriptor.HiddenServiceDescriptor`
========================================= =====
If you're using **python 3** then beware that the open() function defaults to
using text mode. **Binary mode** is strongly suggested because it's both
faster (by my testing by about 33x) and doesn't do universal newline
translation which can make us misparse the document.
::
my_descriptor_file = open(descriptor_path, 'rb')
:param str,file,tarfile descriptor_file: path or opened file with the descriptor contents
:param str descriptor_type: `descriptor type <https://collector.torproject.org/formats.html>`_, this is guessed if not provided
:param bool validate: checks the validity of the descriptor's content if
**True**, skips these checks otherwise
:param stem.descriptor.__init__.DocumentHandler document_handler: method in
which to parse the :class:`~stem.descriptor.networkstatus.NetworkStatusDocument`
:param dict kwargs: additional arguments for the descriptor constructor
:returns: iterator for :class:`~stem.descriptor.__init__.Descriptor` instances in the file
:raises:
* **ValueError** if the contents is malformed and validate is True
* **TypeError** if we can't match the contents of the file to a descriptor type
* **IOError** if unable to read from the descriptor_file
"""
# Delegate to a helper if this is a path or tarfile.
handler = None
if isinstance(descriptor_file, (bytes, str_type)):
if stem.util.system.is_tarfile(descriptor_file):
handler = _parse_file_for_tar_path
else:
handler = _parse_file_for_path
elif isinstance(descriptor_file, tarfile.TarFile):
handler = _parse_file_for_tarfile
if handler:
for desc in handler(descriptor_file, descriptor_type, validate, document_handler, **kwargs):
yield desc
return
# The tor descriptor specifications do not provide a reliable method for
# identifying a descriptor file's type and version so we need to guess
# based on its filename. Metrics descriptors, however, can be identified
# by an annotation on their first line...
# https://trac.torproject.org/5651
initial_position = descriptor_file.tell()
first_line = stem.util.str_tools._to_unicode(descriptor_file.readline().strip())
metrics_header_match = re.match('^@type (\S+) (\d+).(\d+)$', first_line)
if not metrics_header_match:
descriptor_file.seek(initial_position)
descriptor_path = getattr(descriptor_file, 'name', None)
filename = '<undefined>' if descriptor_path is None else os.path.basename(descriptor_file.name)
file_parser = None
if descriptor_type is not None:
descriptor_type_match = re.match('^(\S+) (\d+).(\d+)$', descriptor_type)
if descriptor_type_match:
desc_type, major_version, minor_version = descriptor_type_match.groups()
file_parser = lambda f: _parse_metrics_file(desc_type, int(major_version), int(minor_version), f, validate, document_handler, **kwargs)
else:
raise ValueError("The descriptor_type must be of the form '<type> <major_version>.<minor_version>'")
elif metrics_header_match:
# Metrics descriptor handling
desc_type, major_version, minor_version = metrics_header_match.groups()
file_parser = lambda f: _parse_metrics_file(desc_type, int(major_version), int(minor_version), f, validate, document_handler, **kwargs)
else:
# Cached descriptor handling. These contain multiple descriptors per file.
if filename == 'cached-descriptors' or filename == 'cached-descriptors.new':
file_parser = lambda f: stem.descriptor.server_descriptor._parse_file(f, validate = validate, **kwargs)
elif filename == 'cached-extrainfo' or filename == 'cached-extrainfo.new':
file_parser = lambda f: stem.descriptor.extrainfo_descriptor._parse_file(f, validate = validate, **kwargs)
elif filename == 'cached-microdescs' or filename == 'cached-microdescs.new':
file_parser = lambda f: stem.descriptor.microdescriptor._parse_file(f, validate = validate, **kwargs)
elif filename == 'cached-consensus':
file_parser = lambda f: stem.descriptor.networkstatus._parse_file(f, validate = validate, document_handler = document_handler, **kwargs)
elif filename == 'cached-microdesc-consensus':
file_parser = lambda f: stem.descriptor.networkstatus._parse_file(f, is_microdescriptor = True, validate = validate, document_handler = document_handler, **kwargs)
if file_parser:
for desc in file_parser(descriptor_file):
if descriptor_path is not None:
desc._set_path(os.path.abspath(descriptor_path))
yield desc
return
# Not recognized as a descriptor file.
raise TypeError("Unable to determine the descriptor's type. filename: '%s', first line: '%s'" % (filename, first_line))
def _parse_file_for_path(descriptor_file, *args, **kwargs):
with open(descriptor_file, 'rb') as desc_file:
for desc in parse_file(desc_file, *args, **kwargs):
yield desc
def _parse_file_for_tar_path(descriptor_file, *args, **kwargs):
# TODO: use 'with' for tarfile after dropping python 2.6 support
tar_file = tarfile.open(descriptor_file)
try:
for desc in parse_file(tar_file, *args, **kwargs):
desc._set_path(os.path.abspath(descriptor_file))
yield desc
finally:
if tar_file:
tar_file.close()
def _parse_file_for_tarfile(descriptor_file, *args, **kwargs):
for tar_entry in descriptor_file:
if tar_entry.isfile():
entry = descriptor_file.extractfile(tar_entry)
try:
for desc in parse_file(entry, *args, **kwargs):
desc._set_archive_path(entry.name)
yield desc
finally:
entry.close()
def _parse_metrics_file(descriptor_type, major_version, minor_version, descriptor_file, validate, document_handler, **kwargs):
# Parses descriptor files from metrics, yielding individual descriptors. This
# throws a TypeError if the descriptor_type or version isn't recognized.
if descriptor_type == 'server-descriptor' and major_version == 1:
for desc in stem.descriptor.server_descriptor._parse_file(descriptor_file, is_bridge = False, validate = validate, **kwargs):
yield desc
elif descriptor_type == 'bridge-server-descriptor' and major_version == 1:
for desc in stem.descriptor.server_descriptor._parse_file(descriptor_file, is_bridge = True, validate = validate, **kwargs):
yield desc
elif descriptor_type == 'extra-info' and major_version == 1:
for desc in stem.descriptor.extrainfo_descriptor._parse_file(descriptor_file, is_bridge = False, validate = validate, **kwargs):
yield desc
elif descriptor_type == 'microdescriptor' and major_version == 1:
for desc in stem.descriptor.microdescriptor._parse_file(descriptor_file, validate = validate, **kwargs):
yield desc
elif descriptor_type == 'bridge-extra-info' and major_version == 1:
# version 1.1 introduced a 'transport' field...
# https://trac.torproject.org/6257
for desc in stem.descriptor.extrainfo_descriptor._parse_file(descriptor_file, is_bridge = True, validate = validate, **kwargs):
yield desc
elif descriptor_type == 'network-status-2' and major_version == 1:
document_type = stem.descriptor.networkstatus.NetworkStatusDocumentV2
for desc in stem.descriptor.networkstatus._parse_file(descriptor_file, document_type, validate = validate, document_handler = document_handler, **kwargs):
yield desc
elif descriptor_type == 'dir-key-certificate-3' and major_version == 1:
for desc in stem.descriptor.networkstatus._parse_file_key_certs(descriptor_file, validate = validate, **kwargs):
yield desc
elif descriptor_type in ('network-status-consensus-3', 'network-status-vote-3') and major_version == 1:
document_type = stem.descriptor.networkstatus.NetworkStatusDocumentV3
for desc in stem.descriptor.networkstatus._parse_file(descriptor_file, document_type, validate = validate, document_handler = document_handler, **kwargs):
yield desc
elif descriptor_type == 'network-status-microdesc-consensus-3' and major_version == 1:
document_type = stem.descriptor.networkstatus.NetworkStatusDocumentV3
for desc in stem.descriptor.networkstatus._parse_file(descriptor_file, document_type, is_microdescriptor = True, validate = validate, document_handler = document_handler, **kwargs):
yield desc
elif descriptor_type == 'bridge-network-status' and major_version == 1:
document_type = stem.descriptor.networkstatus.BridgeNetworkStatusDocument
for desc in stem.descriptor.networkstatus._parse_file(descriptor_file, document_type, validate = validate, document_handler = document_handler, **kwargs):
yield desc
elif descriptor_type == 'tordnsel' and major_version == 1:
document_type = stem.descriptor.tordnsel.TorDNSEL
for desc in stem.descriptor.tordnsel._parse_file(descriptor_file, validate = validate, **kwargs):
yield desc
elif descriptor_type == 'hidden-service-descriptor' and major_version == 1:
document_type = stem.descriptor.hidden_service_descriptor.HiddenServiceDescriptor
for desc in stem.descriptor.hidden_service_descriptor._parse_file(descriptor_file, validate = validate, **kwargs):
yield desc
else:
raise TypeError("Unrecognized metrics descriptor format. type: '%s', version: '%i.%i'" % (descriptor_type, major_version, minor_version))
def _value(line, entries):
return entries[line][0][0]
def _values(line, entries):
return [entry[0] for entry in entries[line]]
def _parse_simple_line(keyword, attribute):
def _parse(descriptor, entries):
setattr(descriptor, attribute, _value(keyword, entries))
return _parse
def _parse_bytes_line(keyword, attribute):
def _parse(descriptor, entries):
line_match = re.search(stem.util.str_tools._to_bytes('^(opt )?%s(?:[%s]+(.*))?$' % (keyword, WHITESPACE)), descriptor.get_bytes(), re.MULTILINE)
result = None
if line_match:
value = line_match.groups()[1]
result = b'' if value is None else value
setattr(descriptor, attribute, result)
return _parse
def _parse_timestamp_line(keyword, attribute):
# "<keyword>" YYYY-MM-DD HH:MM:SS
def _parse(descriptor, entries):
value = _value(keyword, entries)
try:
setattr(descriptor, attribute, stem.util.str_tools._parse_timestamp(value))
except ValueError:
raise ValueError("Timestamp on %s line wasn't parsable: %s %s" % (keyword, keyword, value))
return _parse
def _parse_forty_character_hex(keyword, attribute):
# format of fingerprints, sha1 digests, etc
def _parse(descriptor, entries):
value = _value(keyword, entries)
if not stem.util.tor_tools.is_hex_digits(value, 40):
raise ValueError('%s line had an invalid value (should be 40 hex characters): %s %s' % (keyword, keyword, value))
setattr(descriptor, attribute, value)
return _parse
def _parse_key_block(keyword, attribute, expected_block_type, value_attribute = None):
def _parse(descriptor, entries):
value, block_type, block_contents = entries[keyword][0]
if not block_contents or block_type != expected_block_type:
raise ValueError("'%s' should be followed by a %s block, but was a %s" % (keyword, expected_block_type, block_type))
setattr(descriptor, attribute, block_contents)
if value_attribute:
setattr(descriptor, value_attribute, value)
return _parse
class Descriptor(object):
"""
Common parent for all types of descriptors.
"""
ATTRIBUTES = {} # mapping of 'attribute' => (default_value, parsing_function)
PARSER_FOR_LINE = {} # line keyword to its associated parsing function
def __init__(self, contents, lazy_load = False):
self._path = None
self._archive_path = None
self._raw_contents = contents
self._lazy_loading = lazy_load
self._entries = {}
self._unrecognized_lines = []
def get_path(self):
"""
Provides the absolute path that we loaded this descriptor from.
:returns: **str** with the absolute path of the descriptor source
"""
return self._path
def get_archive_path(self):
"""
If this descriptor came from an archive then provides its path within the
archive. This is only set if the descriptor came from a
:class:`~stem.descriptor.reader.DescriptorReader`, and is **None** if this
descriptor didn't come from an archive.
:returns: **str** with the descriptor's path within the archive
"""
return self._archive_path
def get_bytes(self):
"""
Provides the ASCII **bytes** of the descriptor. This only differs from
**str()** if you're running python 3.x, in which case **str()** provides a
**unicode** string.
:returns: **bytes** for the descriptor's contents
"""
return self._raw_contents
def get_unrecognized_lines(self):
"""
Provides a list of lines that were either ignored or had data that we did
not know how to process. This is most common due to new descriptor fields
that this library does not yet know how to process. Patches welcome!
:returns: **list** of lines of unrecognized content
"""
if self._lazy_loading:
# we need to go ahead and parse the whole document to figure this out
self._parse(self._entries, False)
self._lazy_loading = False
return list(self._unrecognized_lines)
def _parse(self, entries, validate, parser_for_line = None):
"""
Parses a series of 'keyword => (value, pgp block)' mappings and applies
them as attributes.
:param dict entries: descriptor contents to be applied
:param bool validate: checks the validity of descriptor content if True
:param dict parsers: mapping of lines to the function for parsing it
:raises: **ValueError** if an error occurs in validation
"""
if parser_for_line is None:
parser_for_line = self.PARSER_FOR_LINE
# set defaults
for attr in self.ATTRIBUTES:
if not hasattr(self, attr):
setattr(self, attr, copy.copy(self.ATTRIBUTES[attr][0]))
for keyword, values in list(entries.items()):
try:
if keyword in parser_for_line:
parser_for_line[keyword](self, entries)
else:
for value, block_type, block_contents in values:
line = '%s %s' % (keyword, value)
if block_contents:
line += '\n%s' % block_contents
self._unrecognized_lines.append(line)
except ValueError as exc:
if validate:
raise exc
def _set_path(self, path):
self._path = path
def _set_archive_path(self, path):
self._archive_path = path
def _name(self, is_plural = False):
return str(type(self))
def _digest_for_signature(self, signing_key, signature):
"""
Provides the signed digest we should have given this key and signature.
:param str signing_key: key block used to make this signature
:param str signature: signed digest for this descriptor content
:returns: the digest string encoded in uppercase hex
:raises: ValueError if unable to provide a validly signed digest
"""
if not stem.prereq.is_crypto_available():
raise ValueError('Generating the signed digest requires pycrypto')
from Crypto.Util import asn1
from Crypto.Util.number import bytes_to_long, long_to_bytes
# get the ASN.1 sequence
seq = asn1.DerSequence()
seq.decode(_bytes_for_block(signing_key))
modulus, public_exponent = seq[0], seq[1]
sig_as_bytes = _bytes_for_block(signature)
sig_as_long = bytes_to_long(sig_as_bytes) # convert signature to an int
blocksize = 128 # block size will always be 128 for a 1024 bit key
# use the public exponent[e] & the modulus[n] to decrypt the int
decrypted_int = pow(sig_as_long, public_exponent, modulus)
# convert the int to a byte array
decrypted_bytes = long_to_bytes(decrypted_int, blocksize)
############################################################################
# The decrypted bytes should have a structure exactly along these lines.
# 1 byte - [null '\x00']
# 1 byte - [block type identifier '\x01'] - Should always be 1
# N bytes - [padding '\xFF' ]
# 1 byte - [separator '\x00' ]
# M bytes - [message]
# Total - 128 bytes
# More info here http://www.ietf.org/rfc/rfc2313.txt
# esp the Notes in section 8.1
############################################################################
try:
if decrypted_bytes.index(b'\x00\x01') != 0:
raise ValueError('Verification failed, identifier missing')
except ValueError:
raise ValueError('Verification failed, malformed data')
try:
identifier_offset = 2
# find the separator
seperator_index = decrypted_bytes.index(b'\x00', identifier_offset)
except ValueError:
raise ValueError('Verification failed, seperator not found')
digest_hex = codecs.encode(decrypted_bytes[seperator_index + 1:], 'hex_codec')
return stem.util.str_tools._to_unicode(digest_hex.upper())
def _digest_for_content(self, start, end):
"""
Provides the digest of our descriptor's content in a given range.
:param bytes start: start of the range to generate a digest for
:param bytes end: end of the range to generate a digest for
:returns: the digest string encoded in uppercase hex
:raises: ValueError if the digest canot be calculated
"""
raw_descriptor = self.get_bytes()
start_index = raw_descriptor.find(start)
end_index = raw_descriptor.find(end, start_index)
if start_index == -1:
raise ValueError("Digest is for the range starting with '%s' but that isn't in our descriptor" % start)
elif end_index == -1:
raise ValueError("Digest is for the range ending with '%s' but that isn't in our descriptor" % end)
digest_content = raw_descriptor[start_index:end_index + len(end)]
digest_hash = hashlib.sha1(stem.util.str_tools._to_bytes(digest_content))
return stem.util.str_tools._to_unicode(digest_hash.hexdigest().upper())
def __getattr__(self, name):
# If attribute isn't already present we might be lazy loading it...
if self._lazy_loading and name in self.ATTRIBUTES:
default, parsing_function = self.ATTRIBUTES[name]
try:
parsing_function(self, self._entries)
except (ValueError, KeyError):
try:
# despite having a validation failure check to see if we set something
return super(Descriptor, self).__getattribute__(name)
except AttributeError:
setattr(self, name, copy.copy(default))
return super(Descriptor, self).__getattribute__(name)
def __str__(self):
if stem.prereq.is_python_3():
return stem.util.str_tools._to_unicode(self._raw_contents)
else:
return self._raw_contents
def _read_until_keywords(keywords, descriptor_file, inclusive = False, ignore_first = False, skip = False, end_position = None, include_ending_keyword = False):
"""
Reads from the descriptor file until we get to one of the given keywords or reach the
end of the file.
:param str,list keywords: keyword(s) we want to read until
:param file descriptor_file: file with the descriptor content
:param bool inclusive: includes the line with the keyword if True
:param bool ignore_first: doesn't check if the first line read has one of the
given keywords
:param bool skip: skips buffering content, returning None
:param int end_position: end if we reach this point in the file
:param bool include_ending_keyword: provides the keyword we broke on if **True**
:returns: **list** with the lines until we find one of the keywords, this is
a two value tuple with the ending keyword if include_ending_keyword is
**True**
"""
if skip:
content = None
content_append = lambda x: None
else:
content = []
content_append = content.append
ending_keyword = None
if isinstance(keywords, (bytes, str_type)):
keywords = (keywords,)
if ignore_first:
first_line = descriptor_file.readline()
if first_line:
content_append(first_line)
keyword_match = re.compile(SPECIFIC_KEYWORD_LINE % '|'.join(keywords))
while True:
last_position = descriptor_file.tell()
if end_position and last_position >= end_position:
break
line = descriptor_file.readline()
if not line:
break # EOF
line_match = keyword_match.match(stem.util.str_tools._to_unicode(line))
if line_match:
ending_keyword = line_match.groups()[0]
if not inclusive:
descriptor_file.seek(last_position)
else:
content_append(line)
break
else:
content_append(line)
if include_ending_keyword:
return (content, ending_keyword)
else:
return content
def _bytes_for_block(content):
"""
Provides the base64 decoded content of a pgp-style block.
:param str content: block to be decoded
:returns: decoded block content
:raises: **TypeError** if this isn't base64 encoded content
"""
# strip the '-----BEGIN RSA PUBLIC KEY-----' header and footer
content = ''.join(content.split('\n')[1:-1])
return base64.b64decode(stem.util.str_tools._to_bytes(content))
def _get_pseudo_pgp_block(remaining_contents):
"""
Checks if given contents begins with a pseudo-Open-PGP-style block and, if
so, pops it off and provides it back to the caller.
:param list remaining_contents: lines to be checked for a public key block
:returns: **tuple** of the (block_type, content) or None if it doesn't exist
:raises: **ValueError** if the contents starts with a key block but it's
malformed (for instance, if it lacks an ending line)
"""
if not remaining_contents:
return None # nothing left
block_match = PGP_BLOCK_START.match(remaining_contents[0])
if block_match:
block_type = block_match.groups()[0]
block_lines = []
end_line = PGP_BLOCK_END % block_type
while True:
if not remaining_contents:
raise ValueError("Unterminated pgp style block (looking for '%s'):\n%s" % (end_line, '\n'.join(block_lines)))
line = remaining_contents.pop(0)
block_lines.append(line)
if line == end_line:
return (block_type, '\n'.join(block_lines))
else:
return None
def _get_descriptor_components(raw_contents, validate, extra_keywords = ()):
"""
Initial breakup of the server descriptor contents to make parsing easier.
A descriptor contains a series of 'keyword lines' which are simply a keyword
followed by an optional value. Lines can also be followed by a signature
block.
To get a sub-listing with just certain keywords use extra_keywords. This can
be useful if we care about their relative ordering with respect to each
other. For instance, we care about the ordering of 'accept' and 'reject'
entries because this influences the resulting exit policy, but for everything
else in server descriptors the order does not matter.
:param str raw_contents: descriptor content provided by the relay
:param bool validate: checks the validity of the descriptor's content if
True, skips these checks otherwise
:param list extra_keywords: entity keywords to put into a separate listing
with ordering intact
:returns:
**collections.OrderedDict** with the 'keyword => (value, pgp key) entries'
mappings. If a extra_keywords was provided then this instead provides a two
value tuple, the second being a list of those entries.
"""
if isinstance(raw_contents, bytes):
raw_contents = stem.util.str_tools._to_unicode(raw_contents)
entries = OrderedDict()
extra_entries = [] # entries with a keyword in extra_keywords
remaining_lines = raw_contents.split('\n')
while remaining_lines:
line = remaining_lines.pop(0)
# V2 network status documents explicitly can contain blank lines...
#
# "Implementations MAY insert blank lines for clarity between sections;
# these blank lines are ignored."
#
# ... and server descriptors end with an extra newline. But other documents
# don't say how blank lines should be handled so globally ignoring them.
if not line:
continue
# Some lines have an 'opt ' for backward compatibility. They should be
# ignored. This prefix is being removed in...
# https://trac.torproject.org/projects/tor/ticket/5124
if line.startswith('opt '):
line = line[4:]
line_match = KEYWORD_LINE.match(line)
if not line_match:
if not validate:
continue
raise ValueError('Line contains invalid characters: %s' % line)
keyword, value = line_match.groups()
if value is None:
value = ''
try:
block_attr = _get_pseudo_pgp_block(remaining_lines)
if block_attr:
block_type, block_contents = block_attr
else:
block_type, block_contents = None, None
except ValueError as exc:
if not validate:
continue
raise exc
if keyword in extra_keywords:
extra_entries.append('%s %s' % (keyword, value))
else:
entries.setdefault(keyword, []).append((value, block_type, block_contents))
if extra_keywords:
return entries, extra_entries
else:
return entries
# importing at the end to avoid circular dependencies on our Descriptor class
import stem.descriptor.server_descriptor
import stem.descriptor.extrainfo_descriptor
import stem.descriptor.networkstatus
import stem.descriptor.microdescriptor
import stem.descriptor.tordnsel
import stem.descriptor.hidden_service_descriptor
|
FedericoCeratto/stem
|
stem/descriptor/__init__.py
|
Python
|
lgpl-3.0
| 31,596
|
# Copyright Cartopy Contributors
#
# This file is part of Cartopy and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
#
# cython: embedsignature=True
"""
This module defines the Geodesic class which can interface with the Proj
geodesic functions.
"""
import numpy as np
import pyproj
import shapely.geometry as sgeom
class Geodesic:
"""
Define an ellipsoid on which to solve geodesic problems.
"""
def __init__(self, radius=6378137.0, flattening=1/298.257223563):
"""
Parameters
----------
radius: float, optional
Equatorial radius (metres). Defaults to the WGS84 semimajor axis
(6378137.0 metres).
flattening: float, optional
Flattening of ellipsoid. Setting flattening = 0 gives a sphere.
Negative flattening gives a prolate ellipsoid. If flattening > 1,
set flattening to 1/flattening.
Defaults to the WGS84 flattening (1/298.257223563).
"""
if flattening > 1:
flattening = 1 / flattening
self.geod = pyproj.Geod(a=radius, f=flattening)
self.radius = radius
self.flattening = flattening
def __str__(self):
return (f'<Geodesic: radius={self.radius:0.3f}, '
f'flattening=1/{1/self.flattening:0.3f}>')
def direct(self, points, azimuths, distances):
"""
Solve the direct geodesic problem where the length of the geodesic is
specified in terms of distance.
Can accept and broadcast length 1 arguments. For example, given a
single start point and distance, an array of different azimuths can be
supplied to locate multiple endpoints.
Parameters
----------
points: array_like, shape=(n *or* 1, 2)
The starting longitude-latitude point(s) from which to travel.
azimuths: float or array_like with shape=(n, )
List of azimuth values (degrees).
distances : float or array_like with shape(n, )
List of distances values (metres).
Returns
-------
`numpy.ndarray`, shape=(n, 3)
The longitudes, latitudes, and forward azimuths of the located
endpoint(s).
"""
# Create numpy arrays from inputs, and ensure correct shape. Note:
# reshape(-1) returns a 1D array from a 0 dimensional array as required
# for broadcasting.
pts = np.array(points, dtype=np.float64).reshape((-1, 2))
azims = np.array(azimuths, dtype=np.float64).reshape(-1)
dists = np.array(distances, dtype=np.float64).reshape(-1)
sizes = [pts.shape[0], azims.size, dists.size]
n_points = max(sizes)
if not all(size in [1, n_points] for size in sizes):
raise ValueError("Inputs must have common length n or length one.")
# Broadcast any length 1 arrays to the correct size.
if pts.shape[0] == 1:
orig_pts = pts
pts = np.empty([n_points, 2], dtype=np.float64)
pts[:, :] = orig_pts
if azims.size == 1:
azims = np.repeat(azims, n_points)
if dists.size == 1:
dists = np.repeat(dists, n_points)
lons, lats, azims = self.geod.fwd(pts[:, 0], pts[:, 1], azims, dists)
# Convert back azimuth to forward azimuth.
azims += np.where(azims > 0, -180, 180)
return np.column_stack([lons, lats, azims])
def inverse(self, points, endpoints):
"""
Solve the inverse geodesic problem.
Can accept and broadcast length 1 arguments. For example, given a
single start point, an array of different endpoints can be supplied to
find multiple distances.
Parameters
----------
points: array_like, shape=(n *or* 1, 2)
The starting longitude-latitude point(s) from which to travel.
endpoints: array_like, shape=(n *or* 1, 2)
The longitude-latitude point(s) to travel to.
Returns
-------
`numpy.ndarray`, shape=(n, 3)
The distances, and the (forward) azimuths of the start and end
points.
"""
# Create numpy arrays from inputs, and ensure correct shape.
points = np.array(points, dtype=np.float64)
endpoints = np.array(endpoints, dtype=np.float64)
if points.ndim > 2 or (points.ndim == 2 and points.shape[1] != 2):
raise ValueError(
f'Expecting input points to be (N, 2), got {points.shape}')
pts = points.reshape((-1, 2))
epts = endpoints.reshape((-1, 2))
sizes = [pts.shape[0], epts.shape[0]]
n_points = max(sizes)
if not all(size in [1, n_points] for size in sizes):
raise ValueError("Inputs must have common length n or length one.")
# Broadcast any length 1 arrays to the correct size.
if pts.shape[0] == 1:
orig_pts = pts
pts = np.empty([n_points, 2], dtype=np.float64)
pts[:, :] = orig_pts
if epts.shape[0] == 1:
orig_pts = epts
epts = np.empty([n_points, 2], dtype=np.float64)
epts[:, :] = orig_pts
start_azims, end_azims, dists = self.geod.inv(pts[:, 0], pts[:, 1],
epts[:, 0], epts[:, 1])
# Convert back azimuth to forward azimuth.
end_azims += np.where(end_azims > 0, -180, 180)
return np.column_stack([dists, start_azims, end_azims])
def circle(self, lon, lat, radius, n_samples=180, endpoint=False):
"""
Find a geodesic circle of given radius at a given point.
Parameters
----------
lon : float
Longitude coordinate of the centre.
lat : float
Latitude coordinate of the centre.
radius : float
The radius of the circle (metres).
n_samples: int, optional
Integer number of sample points of circle.
endpoint: bool, optional
Whether to repeat endpoint at the end of returned array.
Returns
-------
`numpy.ndarray`, shape=(n_samples, 2)
The evenly spaced longitude-latitude points on the circle.
"""
# Put the input arguments into c-typed values.
center = np.array([lon, lat]).reshape((1, 2))
radius_m = np.asarray(radius).reshape(1)
azimuths = np.linspace(360., 0., n_samples,
endpoint=endpoint).astype(np.double)
return self.direct(center, azimuths, radius_m)[:, 0:2]
def geometry_length(self, geometry):
"""
Return the distance (in physical meters) of the given Shapely geometry.
The geometry is assumed to be in spherical (lon, lat) coordinates.
Parameters
----------
geometry : `shapely.geometry.BaseGeometry`
The Shapely geometry to compute the length of. For polygons, the
exterior length will be calculated. For multi-part geometries, the
sum of the parts will be computed.
"""
result = None
if hasattr(geometry, 'geoms'):
# Multi-geometry.
result = sum(self.geometry_length(geom) for geom in geometry.geoms)
elif hasattr(geometry, 'exterior'):
# Polygon.
result = self.geometry_length(geometry.exterior)
elif (hasattr(geometry, 'coords') and
not isinstance(geometry, sgeom.Point)):
coords = np.array(geometry.coords)
result = self.geometry_length(coords)
elif isinstance(geometry, np.ndarray):
coords = geometry
distances, _, _ = np.array(
self.inverse(coords[:-1, :], coords[1:, :]).T)
result = distances.sum()
else:
raise TypeError(f'Unhandled type {geometry.__class__}')
return result
|
SciTools/cartopy
|
lib/cartopy/geodesic.py
|
Python
|
lgpl-3.0
| 8,059
|
# -*- coding: utf-8 -*-
#
# Moonstone is platform for processing of medical images (DICOM).
# Copyright (C) 2009-2011 by Neppo Tecnologia da Informação LTDA
# and Aevum Softwares LTDA
#
# This file is part of Moonstone.
#
# Moonstone is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import icu
def decode_string(data):
if type(data) == unicode:
return data
charsetName = icu.CharsetDetector(data).detect().getName()
decoded = data.decode(charsetName)
return decoded
|
aevum/moonstone
|
src/moonstone/utils/utils.py
|
Python
|
lgpl-3.0
| 1,082
|
# RUN: env ARTIQ_DUMP_IR=%t %python -m artiq.compiler.testbench.embedding +compile %s 2>%t
# RUN: OutputCheck %s --file-to-check=%t.txt
# XFAIL: *
from artiq.language.core import *
from artiq.language.types import *
class foo:
@kernel
def bar(self):
pass
x = foo()
@kernel
def entrypoint():
# CHECK-L: ; calls testbench.foo.bar
x.bar()
|
JQIamo/artiq
|
artiq/test/lit/devirtualization/method.py
|
Python
|
lgpl-3.0
| 363
|
import sys
def isAlly(ally):
return 0
def isEnemy(enemy):
if enemy == 'corsec' or enemy == 'cor_swoop':
return 1
return 0
|
ProjectSWGCore/NGECore2
|
scripts/faction/smashball.py
|
Python
|
lgpl-3.0
| 128
|
#!/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2013 Fabio Falcinelli
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from setuptools import find_packages, setup, Command
import sys
__author__ = 'fabio'
class RunTests(Command):
description = 'Run the django test suite from the tests dir.'
user_options = []
extra_env = {}
extra_args = []
def run(self):
for env_name, env_value in self.extra_env.items():
os.environ[env_name] = str(env_value)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tests.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
def initialize_options(self):
pass
def finalize_options(self):
pass
setup(name='django-ejabberd-bridge',
version='0.0.1',
description='A django app for ejabberd external authentication',
author='Fabio Falcinelli',
author_email='fabio.falcinelli@gmail.com',
url='https://github.com/ffalcinelli/django-ejabberd-bridge',
keywords=['django', 'ejabberd', 'authentication'],
packages=find_packages(),
license="LGPLv3",
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Intended Audience :: Telecommunications Industry',
'License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=[
'django',
'mock'],
cmdclass={"test": RunTests}
)
|
JeansReal/django-ejabberd-bridge
|
setup.py
|
Python
|
lgpl-3.0
| 2,697
|
#!/usr/bin/env python
import argparse
import os
import subprocess
import sys
from makegyp.core import command
module_path = os.path.abspath(__file__)
test_root_dir = os.path.dirname(module_path)
def test_library(name):
print '* Test %r...' % name
# Determines the directory of the tested library:
test_dir = os.path.join(test_root_dir, name)
test_dir = os.path.abspath(test_dir)
if not os.path.isdir(test_dir):
print('!! No test found for %r' % name)
return False
# Switches directory:
os.chdir(test_dir)
print('* Switched to directory: %r' % test_dir)
# Installs dependencies:
print('* Installing dependencies...')
args = command.parser.parse_args(['install', name])
args.func(args)
# Gyp:
os.chdir(test_dir)
gyp_command = 'gyp --depth=. -f ninja test.gyp'
print('* Run %r' % gyp_command)
if subprocess.call(gyp_command, shell=True) != 0:
return False
# Compiles:
ninja_command = 'ninja -C out/Debug/'
print('* Run %r' % ninja_command)
if subprocess.call(ninja_command, shell=True) != 0:
return False
# Run executable:
executable_command = 'out/Debug/test'
print('* Run %r' % executable_command)
if subprocess.call(executable_command, shell=True) != 0:
return False
return True
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Test makegyp formulas.'
)
parser.add_argument('library_names', metavar='library_name', nargs='*')
if len(sys.argv) == 1:
parser.print_help()
args = parser.parse_args()
library_names = set(args.library_names)
if not library_names:
library_names = set()
for library_name in os.listdir(test_root_dir):
path = os.path.join(test_root_dir, library_name)
if os.path.isdir(path):
library_names.add(library_name)
successful_tests = set()
failed_tests = set()
for library_name in library_names:
if test_library(library_name):
successful_tests.add(library_name)
else:
failed_tests.add(library_name)
print '=' * 3
print('* Test results:')
print('- Successful tests: %s' % sorted(successful_tests))
print('- Failed tests: %s' % sorted(failed_tests))
|
olliwang/makegyp
|
tests/test.py
|
Python
|
lgpl-3.0
| 2,325
|
# Copyright 2014 NeuroData (http://neurodata.io)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import os
import numpy as np
import h5py
import tempfile
import urllib, urllib2
import cStringIO
from PIL import Image
import zlib
import MySQLdb
#from cassandra.cluster import Cluster
sys.path += [os.path.abspath('../django')]
import OCP.settings
os.environ['DJANGO_SETTINGS_MODULE'] = 'OCP.settings'
from django.conf import settings
import ocpcaproj
import ocpcadb
import zindex
"""Build a Cassandra DB from an existing MySQL DB"""
def main():
parser = argparse.ArgumentParser(description='Build an aeropsike DB from mysql data.')
parser.add_argument('intoken', action="store", help='Token for the project.')
parser.add_argument('outtoken', action="store", help='Token for the project.')
parser.add_argument('resolution', action="store", type=int)
result = parser.parse_args()
# cassandra database
outprojdb = ocpcaproj.OCPCAProjectsDB()
outproj = outprojdb.loadProject ( result.outtoken )
# mysql database
inprojdb = ocpcaproj.OCPCAProjectsDB()
inproj = inprojdb.loadProject ( result.intoken )
# Bind the databases
inDB = ocpcadb.OCPCADB ( inproj )
outDB = ocpcadb.OCPCADB ( outproj )
# Get the source database sizes
[ximagesz, yimagesz] = inproj.datasetcfg.imagesz [ result.resolution ]
[xcubedim, ycubedim, zcubedim] = cubedim = inproj.datasetcfg.cubedim [ result.resolution ]
# Get the slices
[ startslice, endslice ] = inproj.datasetcfg.slicerange
slices = endslice - startslice + 1
# Set the limits for iteration on the number of cubes in each dimension
# and the limits of iteration
xlimit = (ximagesz-1) / xcubedim + 1
ylimit = (yimagesz-1) / ycubedim + 1
# Round up the zlimit to the next larger
zlimit = (((slices-1)/zcubedim+1)*zcubedim)/zcubedim
for z in range(zlimit):
for y in range(ylimit):
for x in range(xlimit):
zidx = zindex.XYZMorton ( [x,y,z] )
outDB.putCube ( zidx, result.resolution, inDB.getCube ( zidx, result.resolution ))
print "Ingesting {}".format(zidx)
if __name__ == "__main__":
main()
|
neurodata/ndstore
|
scripts/onetime/cassingest.py
|
Python
|
apache-2.0
| 2,658
|
# Copyright 2014 Huawei Technologies Co., LTD
# All Rights Reserved.
#
# @author: Huawei Technologies Co., LTD
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
cinder-proxy manages creating, attaching, detaching, and persistent storage.
cinder-proxy acts as the same role of cinder-volume in cascading OpenStack.
cinder-proxy treats cascaded cinder as its cinder volume,convert the internal
request message from the message bus to restful API calling to cascaded cinder.
Persistent storage volumes keep their state independent of instances. You can
attach to an instance, terminate the instance, spawn a new instance (even
one from a different image) and re-attach the volume with the same data
intact.
**Related Flags**
:volume_topic: What :mod:`rpc` topic to listen to (default: `cinder-volume`).
:volume_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`cinder.volume.cinder_proxy.CinderProxy`).
:volume_group: Name of the group that will contain exported volumes (default:
`cinder-volumes`)
:num_shell_tries: Number of times to attempt to run commands (default: 3)
"""
import time
from oslo.config import cfg
from oslo import messaging
from oslo.db import exception as db_exc
from cinder import context
from cinder import exception
from cinder import manager
from cinder import quota
from cinder import utils
from cinder import volume
from cinder.i18n import _
from cinder.image import glance
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import periodic_task
from cinder.openstack.common import timeutils
from cinder.openstack.common import uuidutils
from cinder.volume import volume_types
from cinder.volume.configuration import Configuration
from cinder.volume import utils as volume_utils
from cinderclient.v2 import client as cinder_client
from ceilometerclient import client as ceilometerclient
from cinderclient import exceptions as cinder_exception
from cinder.volume import rpcapi as volume_rpcapi
from eventlet.greenpool import GreenPool
from keystoneclient.v2_0 import client as kc
from keystoneclient import exceptions as keystone_exception
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
CGQUOTAS = quota.CGQUOTAS
volume_manager_opts = [
cfg.IntOpt('migration_create_volume_timeout_secs',
default=300,
help='Timeout for creating the volume to migrate to '
'when performing volume migration (seconds)'),
cfg.ListOpt('enabled_volume_types',
default=None,
help='A list of volume types to use'),
cfg.IntOpt('volume_sync_interval',
default=5,
help='seconds between cascading and cascaded cinders'
'when synchronizing volume data'),
cfg.IntOpt('volume_status_query_count',
default=5,
help='Volume status query times'),
cfg.IntOpt('pagination_limit',
default=50,
help='pagination limit query for volumes between'
'cascading and cascaded OpenStack'),
cfg.IntOpt('voltype_sync_interval',
default=3600,
help='seconds between cascading and cascaded cinders'
'when synchronizing volume type and qos data'),
cfg.BoolOpt('volume_sync_timestamp_flag',
default=True,
help='whether to sync volume status based on timestamp'),
cfg.BoolOpt('clean_extra_cascaded_vol_flag',
default=False,
help='whether to clean extra cascaded volumes while sync'
'volumes between cascading and cascaded OpenStack'
'please with caution when set to True'),
cfg.BoolOpt('volume_service_inithost_offload',
default=False,
help='Offload pending volume delete during '
'volume service startup'),
cfg.StrOpt('cinder_username',
default='cinder_username',
help='username for connecting to cinder in admin context'),
cfg.StrOpt('admin_password',
default='admin_password',
help='password for connecting to cinder in admin context',
secret=True),
cfg.StrOpt('cinder_tenant_name',
default='cinder_tenant_name',
help='tenant name for connecting to cinder in admin context'),
cfg.StrOpt('cinder_tenant_id',
default='cinder_tenant_id',
help='tenant id for connecting to cinder in admin context'),
cfg.StrOpt('cascaded_available_zone',
default='nova',
help='available zone for cascaded OpenStack'),
cfg.StrOpt('keystone_auth_url',
default='http://127.0.0.1:5000/v2.0/',
help='value of keystone url'),
cfg.StrOpt('cascaded_cinder_url',
default='http://127.0.0.1:8776/v2/%(project_id)s',
help='value of cascaded cinder url'),
cfg.StrOpt('cascading_cinder_url',
default='http://127.0.0.1:8776/v2/%(project_id)s',
help='value of cascading cinder url'),
cfg.BoolOpt('glance_cascading_flag',
default=False,
help='Whether to use glance cescaded'),
cfg.StrOpt('cascading_glance_url',
default='127.0.0.1:9292',
help='value of cascading glance url'),
cfg.StrOpt('cascaded_glance_url',
default='http://127.0.0.1:9292',
help='value of cascaded glance url'),
cfg.StrOpt('cascaded_region_name',
default='RegionOne',
help='Region name of this node'),
]
CONF = cfg.CONF
CONF.register_opts(volume_manager_opts)
def locked_volume_operation(f):
"""Lock decorator for volume operations.
Takes a named lock prior to executing the operation. The lock is named with
the operation executed and the id of the volume. This lock can then be used
by other operations to avoid operation conflicts on shared volumes.
Example use:
If a volume operation uses this decorator, it will block until the named
lock is free. This is used to protect concurrent operations on the same
volume e.g. delete VolA while create volume VolB from VolA is in progress.
"""
def lvo_inner1(inst, context, volume_id, **kwargs):
@utils.synchronized("%s-%s" % (volume_id, f.__name__), external=True)
def lvo_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return lvo_inner2(inst, context, volume_id, **kwargs)
return lvo_inner1
def locked_snapshot_operation(f):
"""Lock decorator for snapshot operations.
Takes a named lock prior to executing the operation. The lock is named with
the operation executed and the id of the snapshot. This lock can then be
used by other operations to avoid operation conflicts on shared snapshots.
Example use:
If a snapshot operation uses this decorator, it will block until the named
lock is free. This is used to protect concurrent operations on the same
snapshot e.g. delete SnapA while create volume VolA from SnapA is in
progress.
"""
def lso_inner1(inst, context, snapshot_id, **kwargs):
@utils.synchronized("%s-%s" % (snapshot_id, f.__name__), external=True)
def lso_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return lso_inner2(inst, context, snapshot_id, **kwargs)
return lso_inner1
class CinderProxy(manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
RPC_API_VERSION = '1.18'
target = messaging.Target(version=RPC_API_VERSION)
VOLUME_NAME_MAX_LEN = 255
VOLUME_UUID_MAX_LEN = 36
SNAPSHOT_NAME_MAX_LEN = 255
SNAPSHOT_UUID_MAX_LEN = 36
def __init__(self, service_name=None, *args, **kwargs):
"""Load the specified in args, or flags."""
# update_service_capabilities needs service_name to be volume
super(CinderProxy, self).__init__(service_name='volume',
*args, **kwargs)
self.configuration = Configuration(volume_manager_opts,
config_group=service_name)
self._tp = GreenPool()
self.volume_api = volume.API()
self._last_info_volume_state_heal = 0
self._change_since_time = None
self.volumes_mapping_cache = {'volumes': {}, 'snapshots': {}}
self.image_service = glance.get_default_image_service()
self.adminCinderClient = self._get_cinder_cascaded_admin_client()
self._init_volume_mapping_cache()
def _init_volume_mapping_cache(self):
try:
ctxt = context.get_admin_context()
volumes = \
self._query_vol_cascaded_pagination(change_since_time=None)
for vol in volumes:
try:
ccding_volume_id = self._get_ccding_volume_id(vol)
if ccding_volume_id == '':
continue
volume = self.db.volume_get(ctxt, ccding_volume_id)
volume_metadata = dict((item['key'], item['value'])
for item in volume['volume_metadata'])
mapping_uuid = volume_metadata.get('mapping_uuid', None)
if mapping_uuid != vol._info['id']:
LOG.info(_("cascade info: unmanage is %s"), vol._info['id'])
self.adminCinderClient.volumes.unmanage(vol._info['id'])
continue
self.volumes_mapping_cache['volumes'][ccding_volume_id] = \
vol._info['id']
except Exception as ex:
LOG.exception(ex)
continue
snapshots = self._query_snapshot_cascaded_all_tenant()
for snapshot in snapshots:
ccding__snapshot_id = self._get_ccding_snapsot_id(snapshot)
if ccding__snapshot_id == '':
continue
self.volumes_mapping_cache['snapshots'][ccding__snapshot_id] = \
snapshot._info['id']
LOG.info(_("cascade info: init volume mapping cache is %s"),
self.volumes_mapping_cache)
except Exception as ex:
LOG.error(_("Failed init volumes mapping cache"))
LOG.exception(ex)
def _get_ccding_volume_id(self, volume):
csd_name = volume._info.get("name", None)
if csd_name is None:
LOG.error(_("Cascade info: csd_name is None!!!. %s"),
volume._info)
return ''
uuid_len = self.VOLUME_UUID_MAX_LEN
if len(csd_name) > (uuid_len+1) and csd_name[-(uuid_len+1)] == '@':
return csd_name[-uuid_len:]
try:
return volume._info['metadata']['logicalVolumeId']
except KeyError:
return ''
def _get_ccding_snapsot_id(self, snapshot):
csd_name = snapshot._info["name"]
uuid_len = self.SNAPSHOT_UUID_MAX_LEN
if len(csd_name) > (uuid_len+1) and csd_name[-(uuid_len+1)] == '@':
return csd_name[-uuid_len:]
try:
return snapshot._info['metadata']['logicalVolumeId']
except KeyError:
return ''
def _gen_ccding_volume_name(self, volume_name, volume_id):
return "volume" + "@" + volume_id
def _gen_ccding_snapshot_name(self, snapshot_name, snapshot_id):
max_len = self.SNAPSHOT_NAME_MAX_LEN - self.SNAPSHOT_UUID_MAX_LEN - 1
if (len(snapshot_name) <= max_len):
return snapshot_name + "@" + snapshot_id
else:
return snapshot_name[0:max_len] + "@" + snapshot_id
def _get_ceilometer_cascading_client(self, context):
ctx_dict = context.to_dict()
creds = dict(
os_auth_url=cfg.CONF.keystone_auth_url,
os_region_name=cfg.CONF.os_region_name,
os_tenant_name=cfg.CONF.cinder_tenant_name,
os_password=cfg.CONF.admin_password,
os_username=cfg.CONF.cinder_username,
insecure=True)
ceiloclient = ceilometerclient.get_client(2,**creds)
LOG.info(_("cascade info: os_region_name:%s"), cfg.CONF.os_region_name)
return ceiloclient
def report_vol_resouce_toMonitoring(self, context, action,
cascading_volume_id,
cascaded_volume_id):
ctx_dict = context.to_dict()
if action == "create":
sample = {
"counter_name": "foo",
"counter_type": "gauge",
"counter_unit": "foo",
"counter_volume": 0,
"user_id": ctx_dict.get("user_id"),
"project_id": ctx_dict.get("project_id"),
"resource_id": cascading_volume_id,
"resource_metadata": {
"region": cfg.CONF.cascaded_region_name,
"cascaded_resource_id": cascaded_volume_id,
"type": "cinder.volume"
}
}
elif action == 'remove':
sample = {
"counter_name": "foo",
"counter_type": "gauge",
"counter_unit": "foo",
"counter_volume": 0,
"user_id": ctx_dict.get("user_id"),
"project_id": ctx_dict.get("project_id"),
"resource_id": cascading_volume_id,
"resource_metadata": {
}
}
LOG.info(_("cascade info, bein to report"))
ceiloclient = self._get_ceilometer_cascading_client(context)
response = ceiloclient.samples.create(**sample)
LOG.info(_("cascade info: ceilometer message reponse: %s"), str(response))
def _get_cinder_cascaded_admin_client(self):
try:
kwargs = {'username': cfg.CONF.cinder_username,
'password': cfg.CONF.admin_password,
'tenant_id': cfg.CONF.cinder_tenant_id,
'auth_url': cfg.CONF.keystone_auth_url,
'insecure': True
}
keystoneclient = kc.Client(**kwargs)
cinderclient = cinder_client.Client(
username=cfg.CONF.cinder_username,
auth_url=cfg.CONF.keystone_auth_url,
insecure=True)
cinderclient.client.auth_token = keystoneclient.auth_ref.auth_token
diction = {'project_id': cfg.CONF.cinder_tenant_id}
cinderclient.client.management_url = \
cfg.CONF.cascaded_cinder_url % diction
return cinderclient
except keystone_exception.Unauthorized:
with excutils.save_and_reraise_exception():
LOG.error(_('Token unauthorized failed for keystoneclient '
'constructed when get cascaded admin client'))
except cinder_exception.Unauthorized:
with excutils.save_and_reraise_exception():
LOG.error(_('Token unauthorized failed for cascaded '
'cinderClient constructed'))
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_('Failed to get cinder python client.'))
def _get_cinder_cascaded_user_client(self, context):
try:
ctx_dict = context.to_dict()
cinderclient = cinder_client.Client(
username=ctx_dict.get('user_id'),
auth_url=cfg.CONF.keystone_auth_url,
insecure=True)
cinderclient.client.auth_token = ctx_dict.get('auth_token')
cinderclient.client.management_url = \
cfg.CONF.cascaded_cinder_url % ctx_dict
return cinderclient
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_('Failed to get cinder python client.'))
def _get_image_cascaded(self, context, image_id, cascaded_glance_url):
try:
# direct_url is returned by v2 api
netloc = cfg.CONF.cascading_glance_url
header = 'http://'
if header in cfg.CONF.cascading_glance_url:
netloc = netloc[len(header):]
client = glance.GlanceClientWrapper(
context,
netloc=netloc,
use_ssl=False,
version="2")
image_meta = client.call(context, 'get', image_id)
except Exception:
glance._reraise_translated_image_exception(image_id)
if not self.image_service._is_image_available(context, image_meta):
raise exception.ImageNotFound(image_id=image_id)
LOG.debug(_("cascade ino: image glance get_image_cascaded,"
"cascaded_glance_url:%s"), cascaded_glance_url)
locations = getattr(image_meta, 'locations', None)
LOG.debug(_("cascade ino: image glance get_image_cascaded,"
"locations:%s"), str(locations))
cascaded_image_id = None
for loc in locations:
image_url = loc.get('url')
LOG.debug(_("cascade ino: image glance get_image_cascaded,"
"image_url:%s"), image_url)
if cascaded_glance_url in image_url:
(cascaded_image_id, glance_netloc, use_ssl) = \
glance._parse_image_ref(image_url)
LOG.debug(_("cascade ino : result :image glance "
"get_image_cascaded,%s") % cascaded_image_id)
break
if cascaded_image_id is None:
raise exception.CinderException(
_("cascade exception: cascaded image for image %s not exist.")
% image_id)
return cascaded_image_id
def _add_to_threadpool(self, func, *args, **kwargs):
self._tp.spawn_n(func, *args, **kwargs)
def init_host(self):
"""Do any initialization that needs to be run if this is a
standalone service.
"""
ctxt = context.get_admin_context()
volumes = self.db.volume_get_all_by_host(ctxt, self.host)
LOG.debug(_("Re-exporting %s volumes"), len(volumes))
LOG.debug(_('Resuming any in progress delete operations'))
for volume in volumes:
if volume['status'] == 'deleting':
LOG.info(_('Resuming delete on volume: %s') % volume['id'])
if CONF.volume_service_inithost_offload:
# Offload all the pending volume delete operations to the
# threadpool to prevent the main volume service thread
# from being blocked.
self._add_to_threadpool(self.delete_volume(ctxt,
volume['id']))
else:
# By default, delete volumes sequentially
self.delete_volume(ctxt, volume['id'])
# collect and publish service capabilities
self.publish_service_capabilities(ctxt)
def create_volume(self, context, volume_id, request_spec=None,
filter_properties=None, allow_reschedule=True,
snapshot_id=None, image_id=None, source_volid=None,
source_replicaid=None, consistencygroup_id=None):
"""Creates and exports the volume."""
ctx_dict = context.to_dict()
try:
volume_properties = request_spec.get('volume_properties')
size = volume_properties.get('size')
volume_name = volume_properties.get('display_name')
LOG.info(_("cascade info: begin to create volume: %s"), volume_name)
display_name = self._gen_ccding_volume_name(volume_name, volume_id)
display_description = volume_properties.get('display_description')
volume_type_id = volume_properties.get('volume_type_id')
share = volume_properties.get('shareable', False)
user_id = ctx_dict.get('user_id')
project_id = ctx_dict.get('project_id')
availability_zone = cfg.CONF.cascaded_available_zone
LOG.info(_('cascade ino: create volume with available zone:%s'),
availability_zone)
cascaded_volume_type = None
if volume_type_id is not None:
volume_type_ref = \
self.db.volume_type_get(context, volume_type_id)
cascaded_volume_type = volume_type_ref['name']
LOG.info(_('cascade ino: create volume use volume type, '
'cascade name:%s'), cascaded_volume_type)
cascaded_snapshot_id = None
snapshot_recovery= False
volume_recovery_id = None
snapshot_recovery_id= None
if snapshot_id is not None:
cascaded_snapshot_id = \
self.volumes_mapping_cache['snapshots'].get(snapshot_id,
None)
LOG.info(_('cascade ino: create volume from snapshot, '
'cascade id:%s'), cascaded_snapshot_id)
snapshot_ref = self.db.snapshot_get(context, snapshot_id)
volume_ref = self.db.volume_get(context, snapshot_ref['volume_id'])
volume_AZ = volume_ref['availability_zone']
if availability_zone != volume_AZ:
cinderClient = self._get_cinder_cascaded_user_client(context)
volumeResponse = cinderClient.volumes.create(
size=size,
name="snapshot_recovery@"+snapshot_ref['id'],
description=display_description,
volume_type=cascaded_volume_type,
user_id=user_id,
project_id=project_id,
availability_zone=availability_zone,
metadata={"volume_recovery":volume_id})
LOG.info(_("volume recovery finished create!!"))
time.sleep(30)
snapshot_meatadata={"snapshot_recovery":snapshot_ref['id']}
snapshotResponse = cinderClient.volume_snapshots.create(
volume_id=volumeResponse._info['id'],
force=False,
name="snapshot@"+snapshot_ref['id'],
description=display_description,
metadata=snapshot_meatadata)
snapshot_recovery = True
volume_recovery_id = volumeResponse._info['id']
snapshot_recovery_id = snapshotResponse._info['id']
LOG.info(_("volume recovery snapshot finished create!!"))
if cascaded_snapshot_id is None:
cascaded_snapshot_id = snapshot_recovery_id
time.sleep(60)
cascaded_source_volid = None
if source_volid is not None:
cascaded_source_volid = \
self.volumes_mapping_cache['volumes'].get(source_volid,
None)
LOG.info(_('cascade ino: create volume from source volume, '
'cascade id:%s'), cascaded_source_volid)
cascaded_image_id = None
if image_id is not None:
if cfg.CONF.glance_cascading_flag:
cascaded_image_id = self._get_image_cascaded(
context,
image_id,
cfg.CONF.cascaded_glance_url)
else:
cascaded_image_id = image_id
LOG.info(_("cascade ino: create volume use image, "
"cascaded image id is %s:"), cascaded_image_id)
metadata = volume_properties.get('metadata', {})
if metadata is None:
metadata = {}
if snapshot_id is not None:
metadata['snapshot_id'] = snapshot_id
metadata['logicalVolumeId'] = volume_id
LOG.info(_("begin to create volume: %s"), display_name)
cinderClient = self._get_cinder_cascaded_user_client(context)
bodyResponse = cinderClient.volumes.create(
size=size,
snapshot_id=cascaded_snapshot_id,
source_volid=cascaded_source_volid,
name=display_name,
description=display_description,
volume_type=cascaded_volume_type,
user_id=user_id,
project_id=project_id,
availability_zone=availability_zone,
metadata=metadata,
imageRef=cascaded_image_id,
shareable=share)
if bodyResponse._info['status'] == 'creating':
self.volumes_mapping_cache['volumes'][volume_id] = \
bodyResponse._info['id']
if 'logicalVolumeId' in metadata:
metadata.pop('logicalVolumeId')
metadata['mapping_uuid'] = bodyResponse._info['id']
metadata['__openstack_region_name'] = CONF.cascaded_region_name
self.db.volume_metadata_update(context, volume_id,
metadata, True)
self.report_vol_resouce_toMonitoring(context, "create",
volume_id,
bodyResponse._info['id'])
if snapshot_recovery:
cinderClient.volume_snapshots.delete(snapshot_recovery_id)
cinderClient.volumes.delete(volume_recovery_id)
return volume_id
except Exception as ex:
LOG.error('create volume raise %s' %str(ex))
with excutils.save_and_reraise_exception():
self.db.volume_update(context,
volume_id,
{'status': 'error'})
def _query_vol_cascaded_pagination(self, change_since_time=None):
if not CONF.volume_sync_timestamp_flag:
change_since_time = None
try:
page_limit = CONF.pagination_limit
marker = None
volumes = []
while True:
sopt = {'all_tenants': True,
'changes-since': change_since_time,
'sort_key': 'updated_at',
'sort_dir': 'desc',
'marker': marker,
'limit': page_limit,
}
vols = \
self.adminCinderClient.volumes.list(search_opts=sopt)
LOG.debug(_('cascade ino: volume pagination query. marker: %s,'
' pagination_limit: %s, change_since: %s, vols: %s'
), marker, page_limit, change_since_time, str(vols))
if (vols):
volumes.extend(vols)
marker = vols[-1]._info['id']
continue
else:
break
LOG.debug(_('cascade ino: ready to update volume status from '
'pagination query. volumes: %s'), str(volumes))
return volumes
except cinder_exception.Unauthorized:
self.adminCinderClient = self._get_cinder_cascaded_admin_client()
return self._query_vol_cascaded_pagination(change_since_time)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_('Failed to query volumes by pagination.'))
def _query_snapshot_cascaded_all_tenant(self):
""" cinder snapshots pagination query API has not been supported until
native OpenStack Juno version yet.
"""
try:
opts = {'all_tenants': True}
snapshots = \
self.adminCinderClient.volume_snapshots.list(search_opts=opts)
LOG.debug(_('cascade ino: snapshots query.'
'snapshots: %s'), str(snapshots))
return snapshots
except cinder_exception.Unauthorized:
self.adminCinderClient = self._get_cinder_cascaded_admin_client()
return self._query_snapshot_cascaded_all_tenant()
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_('Failed to query snapshots by all tenant.'))
def _check_update_volume(self, context, refresh_vol):
'''check refresh volumes before update'''
volume_id = self._get_ccding_volume_id(refresh_vol)
if volume_id is None:
LOG.error(_("cascade info: logicalVolumeId for %s is None !"),
volume_id)
return False
volume = self.db.volume_get(context, volume_id)
volume_metadata = dict((item['key'], item['value'])
for item in volume['volume_metadata'])
mapping_uuid = volume_metadata.get('mapping_uuid', None)
ccded_id = self.volumes_mapping_cache['volumes'].get(volume_id, None)
if ccded_id is None:
LOG.error(_("cascade info:cascaded volume for %s in volume mapping"
"cache is None"), volume_id)
return False
if mapping_uuid != ccded_id:
msg = _("cascade info: cascaded vol for %(volume_id)s in volume"
" mapping cache is %(ccded_id)s ,not equal mapping_uuid"
"%(mapping_uuid)s")
LOG.error(msg % {"volume_id": volume_id,
"ccded_id": ccded_id,
"mapping_uuid": mapping_uuid})
return False
if ccded_id != refresh_vol._info['id']:
rtn_id = refresh_vol._info['id']
msg = _("cascade info: cascaded vol id %(ccded_id)s not equal"
" return volume id:%(rtn_id)s")
LOG.error(msg % {"ccded_id": ccded_id,
"rtn_id": rtn_id})
return False
return True
def _update_volumes(self, context, volumes):
for volume in volumes:
LOG.debug(_("cascade ino: update volume:%s"), str(volume._info))
try:
ret = self._check_update_volume(context, volume)
if not ret:
if CONF.clean_extra_cascaded_vol_flag:
ccded_vol = volume._info['id']
self.adminCinderClient.volumes.delete(volume=ccded_vol)
LOG.info(_("Cascade info:cascaded volume %s deleted!"),
ccded_vol)
continue
volume_id = self._get_ccding_volume_id(volume)
volume_status = volume._info['status']
new_volume_type_id = None
volume_type_name = volume._info['volume_type']
if volume_type_name is not None:
volume_type_ref = self.db.volume_type_get_by_name(context, volume_type_name)
new_volume_type_id = volume_type_ref['id']
LOG.info(_("cascade info: ccded volumetype id: %s"),new_volume_type_id)
if volume_status == "available":
if volume._info['bootable'].lower() == 'false':
bootable_vl = '0'
else:
bootable_vl = '1'
self.db.volume_update(context, volume_id,
{'status': volume._info['status'],
'attach_status': 'detached',
'attach_time': None,
'bootable': bootable_vl,
'volume_type_id': new_volume_type_id
})
attachments = self.db.volume_attachment_get_used_by_volume_id(context, volume_id)
for attach in attachments:
self.db.volume_detached(context.elevated(), volume_id, attach.get('id'))
metadata = volume._info['metadata']
self._update_volume_metada(context, volume_id, metadata)
elif volume_status == "in-use":
self.db.volume_update(context, volume_id,
{'status': volume._info['status'],
'attach_status': 'attached',
'attach_time': timeutils.strtime(),
'volume_type_id': new_volume_type_id
})
else:
self.db.volume_update(context, volume_id,
{'status': volume._info['status']})
LOG.info(_('cascade ino: updated the volume %s status from'
'cinder-proxy'), volume_id)
except exception.VolumeNotFound:
LOG.error(_("cascade ino: cascading volume for %s not found!"),
volume._info['id'])
continue
def _update_volume_metada(self, context, volume_id, ccded_volume_metadata):
ccding_vol_metadata = self.db.volume_metadata_get(context, volume_id)
ccded_vol_metadata_keys = ccded_volume_metadata.keys()
unsync_metada_keys_list = ['logicalVolumeId', 'urn', 'uri']
for temp_unsync_key in unsync_metada_keys_list:
if temp_unsync_key in ccded_vol_metadata_keys:
ccded_vol_metadata_keys.remove(temp_unsync_key)
for temp_key in ccded_vol_metadata_keys:
ccding_vol_metadata[temp_key] =\
ccded_volume_metadata.get(temp_key, None)
self.db.volume_metadata_update(context, volume_id,
ccding_vol_metadata, False)
def _update_volume_types(self, context, volumetypes):
vol_types = self.db.volume_type_get_all(context, inactive=False)
LOG.debug(_("cascade info:, vol_types cascading :%s"), str(vol_types))
for volumetype in volumetypes:
try:
LOG.debug(_("cascade ino: vol types cascaded :%s"), str(volumetype))
volume_type_name = volumetype._info['name']
if volume_type_name not in vol_types.keys():
extraspec = volumetype._info['extra_specs']
self.db.volume_type_create(
context,
dict(name=volume_type_name, extra_specs=extraspec))
else:
casding_id = vol_types[volume_type_name].get('id')
casding_especs = vol_types[volume_type_name].get('extra_specs')
casded_especs = volumetype._info['extra_specs']
delete_keys = [k for k in set(casding_especs).difference(casded_especs)]
for key in delete_keys:
LOG.info('volume_type_extra_specs_delete: %s' %key)
self.db.volume_type_extra_specs_delete(context, casding_id, key)
new_especs = self.db.volume_type_extra_specs_get(context, casding_id)
if 0!=cmp(casded_especs, new_especs):
self.db.volume_type_extra_specs_update_or_create(context, casding_id, casded_especs)
except:
LOG.info('update volume types failed')
LOG.debug(_("cascade info: update volume types finished"))
def _update_volume_qos(self, context, qosSpecs):
qos_specs = self.db.qos_specs_get_all(context, inactive=False)
qosname_list_cascading = []
qosid_list_cascading = {}
qosspecs_list_cascading = {}
for qos_cascading in qos_specs:
qosname_list_cascading.append(qos_cascading['name'])
qosid_list_cascading[qos_cascading['name']] = qos_cascading['id']
qosspecs_list_cascading[qos_cascading['name']] = qos_cascading['specs']
for qos_cascaded in qosSpecs:
qos_name_cascaded = qos_cascaded._info['name']
"""update qos from cascaded cinder
"""
if qos_name_cascaded not in qosname_list_cascading:
qos_create_val = {}
qos_create_val['name'] = qos_name_cascaded
qos_spec_value = qos_cascaded._info['specs']
qos_spec_value['consumer'] = \
qos_cascaded._info['consumer']
qos_create_val['qos_specs'] = qos_spec_value
LOG.info(_('cascade ino: create qos_spec %sin db'),
qos_name_cascaded)
self.db.qos_specs_create(context, qos_create_val)
LOG.info(_('cascade ino: qos_spec finished %sin db'),
qos_create_val)
else:
try:
cascaded_specs = qos_cascaded._info['specs']
LOG.debug("cascaded_specs: %s" %(str(cascaded_specs)))
cascading_specs = qosspecs_list_cascading[qos_name_cascaded]
cascading_qos_id = qosid_list_cascading[qos_name_cascaded]
delete_keys = [k for k in set(cascading_specs).difference(cascaded_specs)]
for key in delete_keys:
LOG.info('qos_specs_item_delete: %s' %key)
self.db.qos_specs_item_delete(context, cascading_qos_id, key)
new=self.db.qos_specs_get(context, cascading_qos_id)
LOG.info("new cascading_specs: %s" %(str(new)))
if qos_cascaded._info['consumer'] != new['consumer'] or 0 != cmp(cascaded_specs, new['specs']):
LOG.info("new consumer: %s" %(qos_cascaded._info['consumer']))
cascaded_specs.update({'consumer': qos_cascaded._info['consumer']})
self.db.qos_specs_update(context, cascading_qos_id, cascaded_specs)
except db_exc.DBError as e:
LOG.exception(_('DB error: %s') % e)
continue
"""update qos specs association with vol types from cascaded
"""
casding_qos_specs = self.db.qos_specs_get_all(context, inactive=False)
qosid_list_casding = {}
for qos_cascading in casding_qos_specs:
qosid_list_casding[qos_cascading['name']] = qos_cascading['id']
try:
for qos_cascaded in qosSpecs:
casded_qos_id = qos_cascaded._info['id']
qos_nm = qos_cascaded._info['name']
casding_qos_id = qosid_list_casding[qos_nm]
casding_assoc = self.db.volume_type_qos_associations_get(context, casding_qos_id)
casding_types = [t['name'] for t in casding_assoc]
association = self.adminCinderClient.qos_specs.get_associations(casded_qos_id)
casded_types = [t._info['name'] for t in association]
for ass in casding_assoc:
if ass['name'] not in casded_types:
self.db.qos_specs_disassociate(context, casding_qos_id, ass['id'])
LOG.debug("qos_specs_disassociate: %s %s" %(casding_qos_id, ass['id']))
LOG.debug('casding_qos_id: %s casding_types: %s' %(casding_qos_id,str(casding_types)))
for assoc in association:
assoc_name = assoc._info['name']
LOG.debug('my cascade ino: associate %s to %s' %(assoc_name, casding_qos_id))
if assoc_name not in casding_types:
LOG.debug('associate %s to %s' %(assoc_name, casding_qos_id))
voltype = self.db.volume_type_get_by_name(context, assoc_name)
self.db.qos_specs_associate(context, casding_qos_id, voltype['id'])
except:
LOG.debug('update qos specs failed')
LOG.debug(_("cascade ino: update qos from cascaded finished"))
@periodic_task.periodic_task(spacing=CONF.volume_sync_interval,
run_immediately=True)
def _heal_volume_status(self, context):
# TIME_SHIFT_TOLERANCE = 3
heal_interval = CONF.volume_sync_interval
if not heal_interval:
return
curr_time = time.time()
if self._last_info_volume_state_heal + heal_interval > curr_time:
return
self._last_info_volume_state_heal = curr_time
try:
LOG.debug(_('cascade ino: current change since time:'
'%s'), self._change_since_time)
volumes = \
self._query_vol_cascaded_pagination(self._change_since_time)
if volumes:
self._update_volumes(context, volumes)
self._change_since_time = timeutils.isotime()
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_('Failed to sys volume status to db.'))
@periodic_task.periodic_task(spacing=CONF.voltype_sync_interval,
run_immediately=True)
def _heal_volumetypes_and_qos(self, context):
try:
volumetypes = self.adminCinderClient.volume_types.list()
if volumetypes:
self._update_volume_types(context, volumetypes)
qosSpecs = self.adminCinderClient.qos_specs.list()
if qosSpecs:
self._update_volume_qos(context, qosSpecs)
except cinder_exception.Unauthorized:
self.adminCinderClient = self._get_cinder_cascaded_admin_client()
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_('Failed to sys volume type to db.'))
@locked_volume_operation
def delete_volume(self, context, volume_id, unmanage_only=False):
"""Deletes and unexports volume."""
context = context.elevated()
volume_ref = self.db.volume_get(context, volume_id)
if context.project_id != volume_ref['project_id']:
project_id = volume_ref['project_id']
else:
project_id = context.project_id
LOG.info(_("volume %s: deleting"), volume_ref['id'])
if volume_ref['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_id)
self._notify_about_volume_usage(context, volume_ref, "delete.start")
self._reset_stats()
try:
if unmanage_only:
self._unmanage(context, volume_id)
else:
self._delete_cascaded_volume(context, volume_id)
except exception.VolumeIsBusy:
LOG.error(_("Cannot delete volume %s: volume is busy"),
volume_ref['id'])
self.db.volume_update(context, volume_ref['id'],
{'status': 'available'})
return True
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_update(context,
volume_ref['id'],
{'status': 'error_deleting'})
# If deleting the source volume in a migration, we want to skip quotas
# and other database updates.
if volume_ref['migration_status']:
return True
# Get reservations
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume_ref['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_("Failed to update usages deleting volume"))
# Delete glance metadata if it exists
try:
self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
LOG.debug(_("volume %s: glance metadata deleted"),
volume_ref['id'])
except exception.GlanceMetadataNotFound:
LOG.debug(_("no glance metadata found for volume %s"),
volume_ref['id'])
self.db.volume_destroy(context, volume_id)
LOG.info(_("volume %s: deleted successfully"), volume_ref['id'])
self._notify_about_volume_usage(context, volume_ref, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self.publish_service_capabilities(context)
return True
def _delete_cascaded_volume(self, context, volume_id):
try:
# vol_ref = self.db.volume_get(context, volume_id)
# caecaded_volume_id = vol_ref['mapping_uuid']
cascaded_volume_id = \
self.volumes_mapping_cache['volumes'].get(volume_id, None)
if cascaded_volume_id is None:
LOG.error(_("cascade info: physical volume for vol %s "
"not found !"), volume_id)
return
LOG.info(_('cascade ino: prepare to delete cascaded volume %s.'),
cascaded_volume_id)
cinderClient = self._get_cinder_cascaded_user_client(context)
cinderClient.volumes.get(cascaded_volume_id)
cinderClient.volumes.delete(volume=cascaded_volume_id)
self.volumes_mapping_cache['volumes'].pop(volume_id, '')
LOG.info(_('cascade ino: finished to delete cascade volume %s'),
cascaded_volume_id)
self.report_vol_resouce_toMonitoring(context, "remove",
volume_id,
cascaded_volume_id)
return
# self._heal_volume_mapping_cache(volume_id,casecade_volume_id,s'remove')
except cinder_exception.NotFound:
self.volumes_mapping_cache['volumes'].pop(volume_id, '')
LOG.info(_('cascade ino: finished to delete cascade volume %s'),
cascaded_volume_id)
return
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_update(context,
volume_id,
{'status': 'error_deleting'})
LOG.error(_('cascade ino: failed to delete cascaded'
'volume %s'), cascaded_volume_id)
def create_snapshot(self, context, volume_id, snapshot_id):
"""Creates and exports the snapshot."""
context = context.elevated()
snapshot_ref = self.db.snapshot_get(context, snapshot_id)
snap_name = snapshot_ref['display_name']
display_name = self._gen_ccding_snapshot_name(snap_name, snapshot_id)
display_description = snapshot_ref['display_description']
LOG.info(_("snapshot %s: creating"), snapshot_ref['id'])
self._notify_about_snapshot_usage(
context, snapshot_ref, "create.start")
vol_ref = self.db.volume_get(context, volume_id)
try:
cascaded_volume_id = \
self.volumes_mapping_cache['volumes'].get(volume_id, '')
LOG.debug(_('cascade ino: create snapshot, cascaded volume'
'id is : %s '), cascaded_volume_id)
cinderClient = self._get_cinder_cascaded_user_client(context)
bodyResponse = cinderClient.volume_snapshots.create(
volume_id=cascaded_volume_id,
force=True,
name=display_name,
description=display_description)
LOG.info(_("cascade ino: create snapshot while response is:%s"),
bodyResponse._info)
if bodyResponse._info['status'] == 'creating':
self.volumes_mapping_cache['snapshots'][snapshot_id] = \
bodyResponse._info['id']
while True:
time.sleep(CONF.volume_sync_interval)
queryResponse = \
cinderClient.volume_snapshots.get(bodyResponse._info['id'])
query_status = queryResponse._info['status']
if query_status != 'creating':
self.db.snapshot_update(context, snapshot_ref['id'],
{'status': query_status,
'progress': '100%'
})
break
else:
continue
# self.db.snapshot_update(
# context,
# snapshot_ref['id'],
# {'mapping_uuid': bodyResponse._info['id']})
except Exception:
with excutils.save_and_reraise_exception():
self.db.snapshot_update(context,
snapshot_ref['id'],
{'status': 'error'})
return
# vol_ref = self.db.volume_get(context, volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot_ref['id'], volume_id)
except exception.CinderException as ex:
LOG.exception(_("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
" %(volume_id)s metadata") %
{'volume_id': volume_id,
'snapshot_id': snapshot_id})
raise exception.MetadataCopyFailure(reason=ex)
LOG.info(_("cascade ino: snapshot %s, created successfully"),
snapshot_ref['id'])
self._notify_about_snapshot_usage(context, snapshot_ref, "create.end")
return snapshot_id
@locked_snapshot_operation
def delete_snapshot(self, context, snapshot_id):
"""Deletes and unexports snapshot."""
caller_context = context
context = context.elevated()
snapshot_ref = self.db.snapshot_get(context, snapshot_id)
project_id = snapshot_ref['project_id']
LOG.info(_("snapshot %s: deleting"), snapshot_ref['id'])
self._notify_about_snapshot_usage(
context, snapshot_ref, "delete.start")
try:
LOG.debug(_("snapshot %s: deleting"), snapshot_ref['id'])
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot_ref['context'] = caller_context
self._delete_snapshot_cascaded(context, snapshot_id)
except exception.SnapshotIsBusy:
LOG.error(_("Cannot delete snapshot %s: snapshot is busy"),
snapshot_ref['id'])
self.db.snapshot_update(context,
snapshot_ref['id'],
{'status': 'available'})
return True
except Exception:
with excutils.save_and_reraise_exception():
self.db.snapshot_update(context,
snapshot_ref['id'],
{'status': 'error_deleting'})
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot_ref['volume_size'],
}
volume_ref = self.db.volume_get(context, snapshot_ref['volume_id'])
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_("Failed to update usages deleting snapshot"))
self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot_id)
self.db.snapshot_destroy(context, snapshot_id)
LOG.info(_("snapshot %s: deleted successfully"), snapshot_ref['id'])
self._notify_about_snapshot_usage(context, snapshot_ref, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
return True
def _delete_snapshot_cascaded(self, context, snapshot_id):
try:
# snapshot_ref = self.db.snapshot_get(context, snapshot_id)
# cascaded_snapshot_id = snapshot_ref['mapping_uuid']
cascaded_snapshot_id = \
self.volumes_mapping_cache['snapshots'].get(snapshot_id, '')
LOG.info(_("cascade ino: delete cascaded snapshot:%s"),
cascaded_snapshot_id)
cinderClient = self._get_cinder_cascaded_user_client(context)
cinderClient.volume_snapshots.get(cascaded_snapshot_id)
resp = cinderClient.volume_snapshots.delete(cascaded_snapshot_id)
self.volumes_mapping_cache['snapshots'].pop(snapshot_id, '')
LOG.info(_("delete cascaded snapshot %s successfully. resp :%s"),
cascaded_snapshot_id, resp)
return
except cinder_exception.NotFound:
self.volumes_mapping_cache['snapshots'].pop(snapshot_id, '')
LOG.info(_("delete cascaded snapshot %s successfully."),
cascaded_snapshot_id)
return
except Exception:
with excutils.save_and_reraise_exception():
self.db.snapshot_update(context,
snapshot_id,
{'status': 'error_deleting'})
LOG.error(_("failed to delete cascaded snapshot %s"),
cascaded_snapshot_id)
def attach_volume(self, context, volume_id, instance_uuid, host_name,
mountpoint, mode):
"""Updates db to show volume is attached
interface about attch_volume has been realized in nova-proxy
cinder-proxy just update cascading level data, other fields
about attaching is synced from timer (_heal_volume_status)
"""
@utils.synchronized(volume_id, external=True)
def do_attach():
# check the volume status before attaching
volume = self.db.volume_get(context, volume_id)
volume_metadata = self.db.volume_admin_metadata_get(
context.elevated(), volume_id)
if (volume_metadata.get('attached_mode') and
volume_metadata.get('attached_mode') != mode):
msg = _("being attached by different mode")
raise exception.InvalidVolume(reason=msg)
if (volume['status'] == 'in-use' and
not volume['shareable']):
msg = _("volume is already attached")
raise exception.InvalidVolume(reason=msg)
attachment = None
host_name_sanitized = utils.sanitize_hostname(
host_name) if host_name else None
if instance_uuid:
attachment = \
self.db.volume_attachment_get_by_instance_uuid(
context, volume_id, instance_uuid)
else:
attachment = \
self.db.volume_attachment_get_by_host(context, volume_id,
host_name_sanitized)
if attachment is not None:
return
# TODO(jdg): attach_time column is currently varchar
# we should update this to a date-time object
# also consider adding detach_time?
self._notify_about_volume_usage(context, volume,
"attach.start")
values = {'volume_id': volume_id,
"instance_uuid": instance_uuid,
"attached_host": host_name,
"status": "attaching",
"attach_time": timeutils.strtime()}
attachment = self.db.volume_attach(context, values)
self.db.volume_admin_metadata_update(context.elevated(),
volume_id,
{"attached_mode": mode},
False)
attachment_id = attachment['id']
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
self.db.volume_attachment_update(context, attachment_id,
{'attach_status':
'error_attaching'})
raise exception.InvalidUUID(uuid=instance_uuid)
host_name_sanitized = utils.sanitize_hostname(
host_name) if host_name else None
volume = self.db.volume_get(context, volume_id)
if volume_metadata.get('readonly') == 'True' and mode != 'ro':
self.db.volume_update(context, volume_id,
{'status': 'error_attaching'})
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume_id)
self.db.volume_attachment_update(context, attachment_id,
{'mountpoint': mountpoint,
'attach_status': 'attached'})
if volume['migration_status']:
self.db.volume_update(context, volume_id,
{'migration_status': None})
self._notify_about_volume_usage(context, volume, "attach.end")
return do_attach()
@locked_volume_operation
def detach_volume(self, context, volume_id, attachment_id):
"""Updates db to show volume is detached
interface about detach_volume has been realized in nova-proxy
cinder-proxy just update cascading level data, other fields
about detaching is synced from timer (_heal_volume_status)
"""
# TODO(vish): refactor this into a more general "unreserve"
# TODO(sleepsonthefloor): Is this 'elevated' appropriate?
# self.db.volume_detached(context.elevated(), volume_id)
attachment = self.db.volume_attachment_get(context, attachment_id)
volume = self.db.volume_get(context, volume_id)
self._notify_about_volume_usage(context, volume, "detach.start")
self.db.volume_detached(context.elevated(), volume_id,
attachment.get('id'))
self.db.volume_admin_metadata_delete(context.elevated(), volume_id,
'attached_mode')
self._notify_about_volume_usage(context, volume, "detach.end")
def copy_volume_to_image(self, context, volume_id, image_meta):
"""Uploads the specified volume to Glance.
image_meta is a dictionary containing the following keys:
'id', 'container_format', 'disk_format'
"""
LOG.info(_("cascade ino: copy volume to image, image_meta is:%s"),
str(image_meta))
image_name = image_meta.get("name")
container_format = image_meta.get("container_format")
disk_format = image_meta.get("disk_format")
image_uuid = image_meta.get("id")
LOG.debug(_('HYBRID: image_meta %s'), str(image_meta))
# vol_ref = self.db.volume_get(context, volume_id)
# casecaded_volume_id = vol_ref['mapping_uuid']
cascaded_volume_id = \
self.volumes_mapping_cache['volumes'].get(volume_id, '')
LOG.debug(_('cascade ino: cop vol to img, ccded vol id is %s'),
cascaded_volume_id)
if not cfg.CONF.glance_cascading_flag:
image_name = "image@" + image_meta.get("id")
cinderClient = self._get_cinder_cascaded_user_client(context)
resp = cinderClient.volumes.upload_to_image(
volume=cascaded_volume_id,
force=True,
image_name=image_uuid,
container_format=container_format,
disk_format=disk_format)
if cfg.CONF.glance_cascading_flag:
cascaded_image_id = resp[1]['os-volume_upload_image']['image_id']
LOG.debug(_('cascade ino:upload volume to image,get cascaded '
'image id is %s'), cascaded_image_id)
url = '%s/v2/images/%s' % (cfg.CONF.cascaded_glance_url,
cascaded_image_id)
locations = [{
'url': url,
'metadata': {'image_id': str(cascaded_image_id),
'image_from': 'volume'
}
}]
image_service, image_id = \
glance.get_remote_image_service(context, image_meta['id'])
LOG.debug(_("cascade ino: image service:%s"), str(image_service))
netloc = cfg.CONF.cascading_glance_url
header = 'http://'
if header in cfg.CONF.cascading_glance_url:
netloc = netloc[len(header):]
glanceClient = glance.GlanceClientWrapper(
context,
netloc=netloc,
use_ssl=False,
version="2")
glanceClient.call(context, 'update', image_id,
remove_props=None, locations=locations)
LOG.debug(_('cascade ino:upload volume to image,finish update'
'image %s locations %s.'), (image_id, locations))
volume = self.db.volume_get(context, volume_id)
if (volume['instance_uuid'] is None and
volume['attached_host'] is None):
self.db.volume_update(context, volume_id,
{'status': 'available'})
else:
self.db.volume_update(context, volume_id,
{'status': 'in-use'})
def _get_original_status(self, volume):
if not volume['volume_attachment']:
return 'available'
else:
return 'in-use'
def retype(self, ctxt, volume_id, new_type_id, host,
migration_policy='never', reservations=None):
def _retype_error(context, volume_id, old_reservations,
new_reservations, status_update):
try:
self.db.volume_update(context, volume_id, status_update)
finally:
QUOTAS.rollback(context, old_reservations)
QUOTAS.rollback(context, new_reservations)
context = ctxt.elevated()
volume_ref = self.db.volume_get(ctxt, volume_id)
status_update = {'status': self._get_original_status(volume_ref)}
if context.project_id != volume_ref['project_id']:
project_id = volume_ref['project_id']
else:
project_id = context.project_id
# Get old reservations
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume_ref['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
old_reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
old_reservations = None
self.db.volume_update(context, volume_id, status_update)
LOG.exception(_("Failed to update usages while retyping volume."))
raise exception.CinderException(_("Failed to get old volume type"
" quota reservations"))
# We already got the new reservations
new_reservations = reservations
# If volume types have the same contents, no need to do anything
retyped = False
if not retyped:
try:
migration_policy = 'never'
cascaded_volume_id = self.volumes_mapping_cache['volumes'].get(volume_id, None)
new_type = volume_types.get_volume_type(context, new_type_id)
cinderClient = self._get_cinder_cascaded_user_client(context)
response = cinderClient.volumes.retype(cascaded_volume_id,
new_type['name'],
migration_policy)
LOG.info(_("cascade info: volume %s retype response :%s"), volume_id, response)
# Check if the driver retype provided a model update or
# just a retype indication
except Exception as ex:
retyped = False
LOG.error(_("Volume %s: when trying to retype, "
"falling back to generic mechanism."),
volume_ref['id'])
LOG.exception(ex)
if old_reservations:
QUOTAS.commit(context, old_reservations, project_id=project_id)
if new_reservations:
QUOTAS.commit(context, new_reservations, project_id=project_id)
self.publish_service_capabilities(context)
def initialize_connection(self, context, volume_id, connector):
"""Prepare volume for connection from host represented by connector.
volume in openstack cascading level is just a logical data,
initialize connection has losts its meaning, so the interface here
just return a None value
"""
return None
def terminate_connection(self, context, volume_id, connector, force=False):
"""Cleanup connection from host represented by connector.
volume in openstack cascading level is just a logical data,
terminate connection has losts its meaning, so the interface here
just return a None value
"""
return None
@periodic_task.periodic_task
def _report_driver_status(self, context):
"""cinder cascading driver has losts its meaning.
so driver-report info here is just a copy of simulation message
"""
LOG.info(_("report simulation volume driver"))
simu_location_info = 'LVMVolumeDriver:Huawei:cinder-volumes:default:0'
storage_pool = []
volume_stats = {
'volume_backend_name': 'LVM_ISCSI',
'QoS_support': True,
'free_capacity_gb': 1024000.0,
'location_info': simu_location_info,
'total_capacity_gb': 1024000.0,
'reserved_percentage': 0,
'driver_version': '2.0.0',
'vendor_name': 'Huawei',
'storage_protocol': 'iSCSI'}
if CONF.enabled_volume_types:
for voltype_name in CONF.enabled_volume_types:
vol_type =\
self.db.volume_type_get_by_name(context, voltype_name)
for key, value in vol_type['extra_specs'].iteritems():
LOG.debug("key %s, value %s", key, value)
if key == 'volume_backend_name':
pool = {'pool_name': value,
'volume_backend_name': value,
'total_capacity_gb': 1024000.0,
'free_capacity_gb': 1024000.0,
'allocated_capacity_gb': 0.0,
'QoS_support': 'True',
'reserved_percentage': 0
}
storage_pool.append(pool)
else:
continue
if storage_pool:
volume_stats.update({'pools': storage_pool})
LOG.info('volume_stats: %s' %(str(volume_stats)))
self.update_service_capabilities(volume_stats)
return
def publish_service_capabilities(self, context):
"""Collect driver status and then publish."""
self._report_driver_status(context)
self._publish_service_capabilities(context)
def _reset_stats(self):
LOG.info(_("Clear capabilities"))
self._last_volume_stats = []
def notification(self, context, event):
LOG.info(_("Notification {%s} received"), event)
self._reset_stats()
def _notify_about_volume_usage(self,
context,
volume,
event_suffix,
extra_usage_info=None):
volume_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_snapshot_usage(self,
context,
snapshot,
event_suffix,
extra_usage_info=None):
volume_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def extend_volume(self, context, volume_id, new_size, reservations):
volume = self.db.volume_get(context, volume_id)
self._notify_about_volume_usage(context, volume, "resize.start")
try:
LOG.info(_("volume %s: extending"), volume['id'])
cinderClient = self._get_cinder_cascaded_user_client(context)
# vol_ref = self.db.volume_get(context, volume_id)
# cascaded_volume_id = vol_ref['mapping_uuid']
cascaded_volume_id = \
self.volumes_mapping_cache['volumes'].get(volume_id, '')
LOG.info(_("cascade ino: extend volume cascaded volume id is:%s"),
cascaded_volume_id)
cinderClient.volumes.extend(cascaded_volume_id, new_size)
LOG.info(_("cascade ino: volume %s: extended successfully"),
volume['id'])
except Exception:
LOG.exception(_("volume %s: Error trying to extend volume"),
volume_id)
try:
self.db.volume_update(context, volume['id'],
{'status': 'error_extending'})
finally:
QUOTAS.rollback(context, reservations)
return
QUOTAS.commit(context, reservations)
self.db.volume_update(context, volume['id'], {'size': int(new_size),
'status': 'extending'})
self._notify_about_volume_usage(
context, volume, "resize.end",
extra_usage_info={'size': int(new_size)})
def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False):
"""Migrate the volume to the specified host (called on source host).
the interface is being realized
"""
LOG.info("migrate_volume: begin [%s]" %(volume_id))
orig_metadata = None
try:
volume = self.db.volume_get(ctxt, volume_id)
orig_metadata = dict((item['key'], item['value']) for item in volume['volume_metadata'])
volInfoUrl = orig_metadata.get('volInfoUrl', None)
if not volInfoUrl:
LOG.error("%s do not support manage" %volume_id)
return
mapping_uuid = orig_metadata.get('mapping_uuid', None)
cinderClient = self._get_cinder_cascaded_user_client(ctxt)
mapping_volume = cinderClient.volumes.get(mapping_uuid)
dst_host = mapping_volume._info['os-vol-host-attr:host']
volume['host'] = host['host']
LOG.info("host_name : %s" %host['host'])
self.db.volume_update(ctxt, volume_id, {'status': 'creating', 'migration_status': 'starting'})
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.manage_existing(ctxt, volume, dst_host)
attempts = 0
backoff = CONF.volume_sync_interval
while True:
volume = self.db.volume_get(ctxt, volume_id)
if volume['status']=='available':
msg = (_('manage already!'))
LOG.info(msg)
break
elif volume['status']=='creating':
attempts = attempts+1
if attempts > CONF.volume_status_query_count:
msg = (_('manage attempts out count!'))
LOG.error(msg)
raise exception.CinderException(msg)
else:
msg = (_('query volume attempts %s') %attempts)
LOG.info(msg)
time.sleep(backoff)
continue
else:
msg = (_('No available service'))
LOG.error(msg)
raise exception.CinderException(msg)
except Exception:
with excutils.save_and_reraise_exception():
if orig_metadata:
#restore orig_metadata
self.db.volume_metadata_update(ctxt, volume_id, orig_metadata, False)
self.db.volume_update(ctxt, volume_id, {'status': 'available', 'migration_status': 'error'})
try:
self.delete_volume(ctxt, volume_id, unmanage_only=True)
except Exception:
LOG.info('delete volume exception when Migrate')
self.db.volume_update(ctxt, volume_id, {'migration_status': None, 'host':host['host']})
return
def extract_backend(self, host):
lst = host.split('@')
if len(lst) == 2:
backend = lst[1].split('#')
return backend[0]
else:
return None
def get_cascade_service(self, ctxt, backend):
LOG.info("backend %s" %backend)
hosts=[]
try:
cinderClient = self._get_cinder_cascaded_user_client(ctxt)
rsp = cinderClient.services.list(binary='cinder-volume')
for s in rsp:
LOG.debug("manage_existing service %s" %str(s._info))
status = s._info['status']
state = s._info['state']
host = s._info['host']
dst_backend = self.extract_backend(host)
LOG.info("dst_backend %s" %dst_backend)
if status=='enabled' and state =='up' and backend==dst_backend:
hosts.append(host)
return hosts
except Exception:
with excutils.save_and_reraise_exception():
LOG.info("get_cascade_service service failed")
return []
def manage_existing(self, ctxt, volume_id, ref=None):
"""manage methed"""
LOG.info("manage_existing: begin [%s]" %(volume_id))
volume = self.db.volume_get(ctxt, volume_id)
volume_name = volume.get('display_name')
display_name = self._gen_ccding_volume_name(volume_name, volume_id)
display_description = volume.get('display_description')
volume_type = volume.get('volume_type')
volume_type_name = None
if volume_type:
volume_type_name = volume_type.name
backend = self.extract_backend(ref)
hosts = self.get_cascade_service(ctxt, backend)
if not hosts:
LOG.error("manage_existing have no cascade host")
raise exception.ServiceUnavailable()
LOG.info("manage_existing: cascade hosts %s" %str(hosts))
dst_host = hosts[0] + '#' + backend
LOG.debug("manage_existing: cascade dst_host[%s]" %dst_host)
volume_metadata = {'logicalVolumeId': volume_id}
for meta in volume['volume_metadata']:
if meta.key == 'mapping_uuid':
continue
volume_metadata.update({meta.key:meta.value})
LOG.debug("volume_metadata: %s" %volume_metadata)
try:
cinderClient = self._get_cinder_cascaded_user_client(ctxt)
rsp = cinderClient.volumes.manage(dst_host,
ref,
name=display_name,
description=display_description,
volume_type=volume_type_name,
availability_zone=cfg.CONF.cascaded_available_zone,
metadata=volume_metadata,
bootable=volume['bootable'])
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_('failed to manage cascaded %s') %(str(volume)))
if rsp._info['status'] == 'creating':
self.volumes_mapping_cache['volumes'][volume_id] = rsp._info['id']
self.db.volume_update(ctxt, volume_id, {'status': 'creating'})
metadata = {'mapping_uuid': rsp._info['id']}
self.db.volume_metadata_update(ctxt, volume_id, metadata, False)
def _unmanage(self, ctxt, volume_id):
try:
cascaded_volume_id = self.volumes_mapping_cache['volumes'].get(volume_id, '')
LOG.info(_('cascade ino: prepare to _unmanage cascaded volume %s.'), cascaded_volume_id)
cinderClient = self._get_cinder_cascaded_user_client(ctxt)
cinderClient.volumes.get(cascaded_volume_id)
cinderClient.volumes.unmanage(volume=cascaded_volume_id)
self.volumes_mapping_cache['volumes'].pop(volume_id, '')
LOG.info(_('finished to _unmanage cascade volume %s'), cascaded_volume_id)
return
# self._heal_volume_mapping_cache(volume_id,casecade_volume_id,s'remove')
except cinder_exception.NotFound:
self.volumes_mapping_cache['volumes'].pop(volume_id, '')
LOG.info(_('ummanage cascade volume %s not found'), cascaded_volume_id)
return
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_('failed to _unmanage cascaded'
'volume %s'), cascaded_volume_id)
return
|
Hybrid-Cloud/badam
|
fs_patches_of_hybrid_cloud/cherry_for_B038/cinder_cascading_proxy_aws_snapshot_disaster_recovery/proxy/cinder_proxy.py
|
Python
|
apache-2.0
| 80,774
|
from __future__ import absolute_import
from __future__ import unicode_literals
import hashlib
import json
import logging
import requests
from pypuppetdb.api.base import BaseAPI, COMMAND_VERSION, ERROR_STRINGS
from pypuppetdb.errors import (APIError, EmptyResponseError)
log = logging.getLogger(__name__)
class CommandAPI(BaseAPI):
"""This class provides methods that interact with the `pdb/cmd/*`
PuppetDB API endpoints.
"""
def command(self, command, payload):
return self._cmd(command, payload)
def _cmd(self, command, payload):
"""This method posts commands to PuppetDB. Provided a command and payload
it will fire a request at PuppetDB. If PuppetDB can be reached and
answers within the timeout we'll decode the response and give it back
or raise for the HTTP Status Code PuppetDB gave back.
:param command: The PuppetDB Command we want to execute.
:type command: :obj:`string`
:param command: The payload, in wire format, specific to the command.
:type command: :obj:`dict`
:raises: :class:`~pypuppetdb.errors.EmptyResponseError`
:returns: The decoded response from PuppetDB
:rtype: :obj:`dict` or :obj:`list`
"""
log.debug('_cmd called with command: {0}, data: {1}'.format(
command, payload))
url = self._url('cmd')
if command not in COMMAND_VERSION:
log.error("Only {0} supported, {1} unsupported".format(
list(COMMAND_VERSION.keys()), command))
raise APIError
params = {
"command": command,
"version": COMMAND_VERSION[command],
"certname": payload['certname'],
"checksum": hashlib.sha1(str(payload) # nosec
.encode('utf-8')).hexdigest()
}
try:
r = self.session.post(url,
params=params,
data=json.dumps(payload, default=str),
verify=self.ssl_verify,
cert=(self.ssl_cert, self.ssl_key),
timeout=self.timeout,
)
r.raise_for_status()
json_body = r.json()
if json_body is not None:
return json_body
else:
del json_body
raise EmptyResponseError
except requests.exceptions.Timeout:
log.error("{0} {1}:{2} over {3}.".format(ERROR_STRINGS['timeout'],
self.host, self.port,
self.protocol.upper()))
raise
except requests.exceptions.ConnectionError:
log.error("{0} {1}:{2} over {3}.".format(ERROR_STRINGS['refused'],
self.host, self.port,
self.protocol.upper()))
raise
except requests.exceptions.HTTPError as err:
log.error("{0} {1}:{2} over {3}.".format(err.response.text,
self.host, self.port,
self.protocol.upper()))
raise
|
puppet-community/pypuppetdb
|
pypuppetdb/api/command.py
|
Python
|
apache-2.0
| 3,377
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Import seq2seq python ops for backward compatibility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
raise ImportError(
"This module is deprecated. Use tf.contrib.legacy_seq2seq instead.")
|
deepakgupta1313/models
|
tutorials/rnn/seq2seq.py
|
Python
|
apache-2.0
| 952
|
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from neutron_lib import constants
from neutron.agent.linux.openvswitch_firewall import constants as ovsfw_consts
from neutron.agent.linux.openvswitch_firewall import firewall as ovsfw
from neutron.agent.linux.openvswitch_firewall import rules
from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants \
as ovs_consts
from neutron.tests import base
TESTING_VLAN_TAG = 1
class TestIsValidPrefix(base.BaseTestCase):
def test_valid_prefix_ipv4(self):
is_valid = rules.is_valid_prefix('10.0.0.0/0')
self.assertTrue(is_valid)
def test_invalid_prefix_ipv4(self):
is_valid = rules.is_valid_prefix('0.0.0.0/0')
self.assertFalse(is_valid)
def test_valid_prefix_ipv6(self):
is_valid = rules.is_valid_prefix('ffff::0/0')
self.assertTrue(is_valid)
def test_invalid_prefix_ipv6(self):
is_valid = rules.is_valid_prefix('0000:0::0/0')
self.assertFalse(is_valid)
is_valid = rules.is_valid_prefix('::0/0')
self.assertFalse(is_valid)
is_valid = rules.is_valid_prefix('::/0')
self.assertFalse(is_valid)
class TestCreateFlowsFromRuleAndPort(base.BaseTestCase):
def setUp(self):
super(TestCreateFlowsFromRuleAndPort, self).setUp()
ovs_port = mock.Mock(vif_mac='00:00:00:00:00:00')
ovs_port.ofport = 1
port_dict = {'device': 'port_id'}
self.port = ovsfw.OFPort(
port_dict, ovs_port, vlan_tag=TESTING_VLAN_TAG)
self.create_flows_mock = mock.patch.object(
rules, 'create_protocol_flows').start()
@property
def passed_flow_template(self):
return self.create_flows_mock.call_args[0][1]
def _test_create_flows_from_rule_and_port_helper(
self, rule, expected_template):
rules.create_flows_from_rule_and_port(rule, self.port)
self.assertEqual(expected_template, self.passed_flow_template)
def test_create_flows_from_rule_and_port_no_ip_ipv4(self):
rule = {
'ethertype': constants.IPv4,
'direction': constants.INGRESS_DIRECTION,
}
expected_template = {
'priority': 74,
'dl_type': constants.ETHERTYPE_IP,
'reg_port': self.port.ofport,
}
self._test_create_flows_from_rule_and_port_helper(rule,
expected_template)
def test_create_flows_from_rule_and_port_src_and_dst_ipv4(self):
rule = {
'ethertype': constants.IPv4,
'direction': constants.INGRESS_DIRECTION,
'source_ip_prefix': '192.168.0.0/24',
'dest_ip_prefix': '10.0.0.1/32',
}
expected_template = {
'priority': 74,
'dl_type': constants.ETHERTYPE_IP,
'reg_port': self.port.ofport,
'nw_src': '192.168.0.0/24',
'nw_dst': '10.0.0.1/32',
}
self._test_create_flows_from_rule_and_port_helper(rule,
expected_template)
def test_create_flows_from_rule_and_port_src_and_dst_with_zero_ipv4(self):
rule = {
'ethertype': constants.IPv4,
'direction': constants.INGRESS_DIRECTION,
'source_ip_prefix': '192.168.0.0/24',
'dest_ip_prefix': '0.0.0.0/0',
}
expected_template = {
'priority': 74,
'dl_type': constants.ETHERTYPE_IP,
'reg_port': self.port.ofport,
'nw_src': '192.168.0.0/24',
}
self._test_create_flows_from_rule_and_port_helper(rule,
expected_template)
def test_create_flows_from_rule_and_port_no_ip_ipv6(self):
rule = {
'ethertype': constants.IPv6,
'direction': constants.INGRESS_DIRECTION,
}
expected_template = {
'priority': 74,
'dl_type': constants.ETHERTYPE_IPV6,
'reg_port': self.port.ofport,
}
self._test_create_flows_from_rule_and_port_helper(rule,
expected_template)
def test_create_flows_from_rule_and_port_src_and_dst_ipv6(self):
rule = {
'ethertype': constants.IPv6,
'direction': constants.INGRESS_DIRECTION,
'source_ip_prefix': '2001:db8:bbbb::1/64',
'dest_ip_prefix': '2001:db8:aaaa::1/64',
}
expected_template = {
'priority': 74,
'dl_type': constants.ETHERTYPE_IPV6,
'reg_port': self.port.ofport,
'ipv6_src': '2001:db8:bbbb::1/64',
'ipv6_dst': '2001:db8:aaaa::1/64',
}
self._test_create_flows_from_rule_and_port_helper(rule,
expected_template)
def test_create_flows_from_rule_and_port_src_and_dst_with_zero_ipv6(self):
rule = {
'ethertype': constants.IPv6,
'direction': constants.INGRESS_DIRECTION,
'source_ip_prefix': '2001:db8:bbbb::1/64',
'dest_ip_prefix': '::/0',
}
expected_template = {
'priority': 74,
'dl_type': constants.ETHERTYPE_IPV6,
'reg_port': self.port.ofport,
'ipv6_src': '2001:db8:bbbb::1/64',
}
self._test_create_flows_from_rule_and_port_helper(rule,
expected_template)
class TestCreateProtocolFlows(base.BaseTestCase):
def setUp(self):
super(TestCreateProtocolFlows, self).setUp()
ovs_port = mock.Mock(vif_mac='00:00:00:00:00:00')
ovs_port.ofport = 1
port_dict = {'device': 'port_id'}
self.port = ovsfw.OFPort(
port_dict, ovs_port, vlan_tag=TESTING_VLAN_TAG)
def _test_create_protocol_flows_helper(self, direction, rule,
expected_flows):
flow_template = {'some_settings': 'foo'}
for flow in expected_flows:
flow.update(flow_template)
flows = rules.create_protocol_flows(
direction, flow_template, self.port, rule)
self.assertEqual(expected_flows, flows)
def test_create_protocol_flows_ingress(self):
rule = {'protocol': constants.PROTO_NUM_TCP}
expected_flows = [{
'table': ovs_consts.RULES_INGRESS_TABLE,
'actions': 'output:1',
'nw_proto': constants.PROTO_NUM_TCP,
}]
self._test_create_protocol_flows_helper(
constants.INGRESS_DIRECTION, rule, expected_flows)
def test_create_protocol_flows_egress(self):
rule = {'protocol': constants.PROTO_NUM_TCP}
expected_flows = [{
'table': ovs_consts.RULES_EGRESS_TABLE,
'actions': 'resubmit(,{:d})'.format(
ovs_consts.ACCEPT_OR_INGRESS_TABLE),
'nw_proto': constants.PROTO_NUM_TCP,
}]
self._test_create_protocol_flows_helper(
constants.EGRESS_DIRECTION, rule, expected_flows)
def test_create_protocol_flows_no_protocol(self):
rule = {}
expected_flows = [{
'table': ovs_consts.RULES_EGRESS_TABLE,
'actions': 'resubmit(,{:d})'.format(
ovs_consts.ACCEPT_OR_INGRESS_TABLE),
}]
self._test_create_protocol_flows_helper(
constants.EGRESS_DIRECTION, rule, expected_flows)
def test_create_protocol_flows_icmp6(self):
rule = {'ethertype': constants.IPv6,
'protocol': constants.PROTO_NUM_IPV6_ICMP}
expected_flows = [{
'table': ovs_consts.RULES_EGRESS_TABLE,
'actions': 'resubmit(,{:d})'.format(
ovs_consts.ACCEPT_OR_INGRESS_TABLE),
'nw_proto': constants.PROTO_NUM_IPV6_ICMP,
}]
self._test_create_protocol_flows_helper(
constants.EGRESS_DIRECTION, rule, expected_flows)
def test_create_protocol_flows_port_range(self):
rule = {'ethertype': constants.IPv4,
'protocol': constants.PROTO_NUM_TCP,
'port_range_min': 22,
'port_range_max': 23}
expected_flows = [{
'table': ovs_consts.RULES_EGRESS_TABLE,
'actions': 'resubmit(,{:d})'.format(
ovs_consts.ACCEPT_OR_INGRESS_TABLE),
'nw_proto': constants.PROTO_NUM_TCP,
'tcp_dst': '0x0016/0xfffe'
}]
self._test_create_protocol_flows_helper(
constants.EGRESS_DIRECTION, rule, expected_flows)
def test_create_protocol_flows_icmp(self):
rule = {'ethertype': constants.IPv4,
'protocol': constants.PROTO_NUM_ICMP,
'port_range_min': 0}
expected_flows = [{
'table': ovs_consts.RULES_EGRESS_TABLE,
'actions': 'resubmit(,{:d})'.format(
ovs_consts.ACCEPT_OR_INGRESS_TABLE),
'nw_proto': constants.PROTO_NUM_ICMP,
'icmp_type': 0
}]
self._test_create_protocol_flows_helper(
constants.EGRESS_DIRECTION, rule, expected_flows)
def test_create_protocol_flows_ipv6_icmp(self):
rule = {'ethertype': constants.IPv6,
'protocol': constants.PROTO_NUM_IPV6_ICMP,
'port_range_min': 5,
'port_range_max': 0}
expected_flows = [{
'table': ovs_consts.RULES_EGRESS_TABLE,
'actions': 'resubmit(,{:d})'.format(
ovs_consts.ACCEPT_OR_INGRESS_TABLE),
'nw_proto': constants.PROTO_NUM_IPV6_ICMP,
'icmp_type': 5,
'icmp_code': 0,
}]
self._test_create_protocol_flows_helper(
constants.EGRESS_DIRECTION, rule, expected_flows)
class TestCreatePortRangeFlows(base.BaseTestCase):
def _test_create_port_range_flows_helper(self, expected_flows, rule):
flow_template = {'some_settings': 'foo'}
for flow in expected_flows:
flow.update(flow_template)
port_range_flows = rules.create_port_range_flows(flow_template, rule)
self.assertEqual(expected_flows, port_range_flows)
def test_create_port_range_flows_with_source_and_destination(self):
rule = {
'protocol': constants.PROTO_NUM_TCP,
'source_port_range_min': 123,
'source_port_range_max': 124,
'port_range_min': 10,
'port_range_max': 11,
}
expected_flows = [
{'tcp_src': '0x007b', 'tcp_dst': '0x000a/0xfffe'},
{'tcp_src': '0x007c', 'tcp_dst': '0x000a/0xfffe'},
]
self._test_create_port_range_flows_helper(expected_flows, rule)
def test_create_port_range_flows_with_source(self):
rule = {
'protocol': constants.PROTO_NUM_TCP,
'source_port_range_min': 123,
'source_port_range_max': 124,
}
expected_flows = [
{'tcp_src': '0x007b'},
{'tcp_src': '0x007c'},
]
self._test_create_port_range_flows_helper(expected_flows, rule)
def test_create_port_range_flows_with_destination(self):
rule = {
'protocol': constants.PROTO_NUM_TCP,
'port_range_min': 10,
'port_range_max': 11,
}
expected_flows = [
{'tcp_dst': '0x000a/0xfffe'},
]
self._test_create_port_range_flows_helper(expected_flows, rule)
def test_create_port_range_flows_without_port_range(self):
rule = {
'protocol': constants.PROTO_NUM_TCP,
}
expected_flows = []
self._test_create_port_range_flows_helper(expected_flows, rule)
def test_create_port_range_with_icmp_protocol(self):
# NOTE: such call is prevented by create_protocols_flows
rule = {
'protocol': constants.PROTO_NUM_ICMP,
'port_range_min': 10,
'port_range_max': 11,
}
expected_flows = []
self._test_create_port_range_flows_helper(expected_flows, rule)
class TestCreateFlowsForIpAddress(base.BaseTestCase):
def _generate_conjuncion_actions(self, conj_ids, offset):
return ','.join(
["conjunction(%d,1/2)" % (c + offset)
for c in conj_ids])
def test_create_flows_for_ip_address_egress(self):
expected_template = {
'table': ovs_consts.RULES_EGRESS_TABLE,
'priority': 72,
'dl_type': constants.ETHERTYPE_IP,
'reg_net': 0x123,
'nw_dst': '192.168.0.1/32'
}
conj_ids = [12, 20]
flows = rules.create_flows_for_ip_address(
('192.168.0.1', 'fa:16:3e:aa:bb:cc'),
constants.EGRESS_DIRECTION, constants.IPv4,
0x123, conj_ids)
self.assertEqual(2, len(flows))
self.assertEqual(ovsfw_consts.OF_STATE_ESTABLISHED_NOT_REPLY,
flows[0]['ct_state'])
self.assertEqual(ovsfw_consts.OF_STATE_NEW_NOT_ESTABLISHED,
flows[1]['ct_state'])
for i in range(2):
self.assertEqual(self._generate_conjuncion_actions(conj_ids, i),
flows[i]['actions'])
for f in flows:
del f['actions']
del f['ct_state']
self.assertEqual(expected_template, f)
class TestCreateConjFlows(base.BaseTestCase):
def test_create_conj_flows(self):
ovs_port = mock.Mock(ofport=1, vif_mac='00:00:00:00:00:00')
port_dict = {'device': 'port_id'}
port = ovsfw.OFPort(
port_dict, ovs_port, vlan_tag=TESTING_VLAN_TAG)
conj_id = 1234
expected_template = {
'table': ovs_consts.RULES_INGRESS_TABLE,
'dl_type': constants.ETHERTYPE_IPV6,
'priority': 71,
'conj_id': conj_id,
'reg_port': port.ofport
}
flows = rules.create_conj_flows(port, conj_id,
constants.INGRESS_DIRECTION,
constants.IPv6)
self.assertEqual(ovsfw_consts.OF_STATE_ESTABLISHED_NOT_REPLY,
flows[0]['ct_state'])
self.assertEqual(ovsfw_consts.OF_STATE_NEW_NOT_ESTABLISHED,
flows[1]['ct_state'])
self.assertEqual("output:{:d}".format(port.ofport),
flows[0]['actions'])
self.assertEqual("ct(commit,zone=NXM_NX_REG{:d}[0..15]),{:s},"
"resubmit(,{:d})".format(
ovsfw_consts.REG_NET, flows[0]['actions'],
ovs_consts.ACCEPTED_INGRESS_TRAFFIC_TABLE),
flows[1]['actions'])
for f in flows:
del f['actions']
del f['ct_state']
self.assertEqual(expected_template, f)
expected_template['conj_id'] += 1
class TestMergeRules(base.BaseTestCase):
def setUp(self):
super(TestMergeRules, self).setUp()
self.rule_tmpl = [('direction', 'ingress'), ('ethertype', 'IPv4'),
('protocol', 6)]
def _test_merge_port_ranges_helper(self, expected, result):
"""Take a list of (port_range_min, port_range_max, conj_ids)
and an output from rules.merge_port_ranges and check if they
are identical, ignoring the other rule fields.
"""
self.assertEqual(len(expected), len(result))
for (range_min, range_max, conj_ids), result1 in zip(
expected, result):
self.assertEqual(range_min, result1[0].get('port_range_min'))
self.assertEqual(range_max, result1[0].get('port_range_max'))
self.assertEqual(conj_ids, set(result1[1]))
def test__assert_mergeable_rules(self):
self.assertRaises(RuntimeError,
rules._assert_mergeable_rules,
[({'direction': 'ingress', 'ethertype': 'IPv4',
'protocol': 1}, 8),
({'direction': 'ingress', 'ethertype': 'IPv6'},
16)])
def test_merge_common_rules_single(self):
rule_conj_tuple = ({'direction': 'egress', 'ethertype': 'IPv4',
'protocol': 1}, 8)
result = rules.merge_common_rules([rule_conj_tuple])
self.assertEqual([(rule_conj_tuple[0], [rule_conj_tuple[1]])],
result)
def test_merge_common_rules(self):
rule_conj_list = [({'direction': 'ingress', 'ethertype': 'IPv4',
'protocol': 1}, 8),
({'direction': 'ingress', 'ethertype': 'IPv4',
'protocol': 1, 'port_range_min': 3}, 16),
({'direction': 'ingress', 'ethertype': 'IPv4',
'protocol': 1, 'port_range_min': 3,
'port_range_max': 0}, 40),
({'direction': 'ingress', 'ethertype': 'IPv4',
'protocol': 1}, 24)]
result = rules.merge_common_rules(rule_conj_list)
self.assertCountEqual(
[({'direction': 'ingress', 'ethertype': 'IPv4',
'protocol': 1}, [8, 24]),
({'direction': 'ingress', 'ethertype': 'IPv4',
'protocol': 1, 'port_range_min': 3}, [16]),
({'direction': 'ingress', 'ethertype': 'IPv4',
'protocol': 1, 'port_range_min': 3, 'port_range_max': 0},
[40])],
result)
def test_merge_port_ranges_overlapping(self):
result = rules.merge_port_ranges(
[(dict([('port_range_min', 20), ('port_range_max', 30)] +
self.rule_tmpl), 6),
(dict([('port_range_min', 30), ('port_range_max', 40)] +
self.rule_tmpl), 14),
(dict([('port_range_min', 35), ('port_range_max', 40)] +
self.rule_tmpl), 22),
(dict([('port_range_min', 20), ('port_range_max', 20)] +
self.rule_tmpl), 30)])
self._test_merge_port_ranges_helper([
# port_range_min, port_range_max, conj_ids
(20, 20, {6, 30}),
(21, 29, {6}),
(30, 30, {6, 14}),
(31, 34, {14}),
(35, 40, {14, 22})], result)
def test_merge_port_ranges_no_port_ranges(self):
result = rules.merge_port_ranges(
[(dict(self.rule_tmpl), 10),
(dict(self.rule_tmpl), 12),
(dict([('port_range_min', 30), ('port_range_max', 40)] +
self.rule_tmpl), 4)])
self._test_merge_port_ranges_helper([
(1, 29, {10, 12}),
(30, 40, {10, 12, 4}),
(41, 65535, {10, 12})], result)
def test_merge_port_ranges_no_port_ranges_same_conj_id(self):
result = rules.merge_port_ranges(
[(dict(self.rule_tmpl), 10),
(dict(self.rule_tmpl), 12),
(dict([('port_range_min', 30), ('port_range_max', 30)] +
self.rule_tmpl), 10)])
self._test_merge_port_ranges_helper([
(None, None, {10, 12})], result)
def test_merge_port_ranges_nonoverlapping(self):
result = rules.merge_port_ranges(
[(dict([('port_range_min', 30), ('port_range_max', 40)] +
self.rule_tmpl), 32),
(dict([('port_range_min', 100), ('port_range_max', 140)] +
self.rule_tmpl), 40)])
self._test_merge_port_ranges_helper(
[(30, 40, {32}), (100, 140, {40})], result)
class TestFlowPriority(base.BaseTestCase):
def test_flow_priority_offset(self):
self.assertEqual(0,
rules.flow_priority_offset(
{'foo': 'bar',
'remote_group_id': 'hoge'}))
self.assertEqual(4,
rules.flow_priority_offset({'foo': 'bar'}))
self.assertEqual(5,
rules.flow_priority_offset(
{'protocol': constants.PROTO_NUM_ICMP}))
self.assertEqual(7,
rules.flow_priority_offset(
{'protocol': constants.PROTO_NUM_TCP}))
self.assertEqual(6,
rules.flow_priority_offset(
{'protocol': constants.PROTO_NUM_ICMP,
'port_range_min': 0}))
self.assertEqual(7,
rules.flow_priority_offset(
{'protocol': constants.PROTO_NUM_IPV6_ICMP,
'port_range_min': 0, 'port_range_max': 0}))
|
mahak/neutron
|
neutron/tests/unit/agent/linux/openvswitch_firewall/test_rules.py
|
Python
|
apache-2.0
| 21,451
|
#!/usr/bin/env python
#==============================================================================
# Copyright 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Amazon Software License (the "License"). You may not use
# this file except in compliance with the License. A copy of the License is
# located at
#
# http://aws.amazon.com/asl/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or
# implied. See the License for the specific language governing permissions
# and limitations under the License.
#==============================================================================
import argparse
from contextlib import closing
import logging
from io import StringIO
from lib.utility import misc
from scli.resources import CommandType
from scli.resources import CLISwitch
from scli.resources import CLISwitchMsg
from scli.resources import EBSCliAttr
from scli.constants import ServiceDefault
from scli.constants import ServiceRegionId
from scli.constants import ParameterName
from scli.constants import ParameterSource
from scli.parameter import Parameter
from scli.exception import ArgumentError
log = logging.getLogger('cli')
def _word_join(word_list, separator = ''):
x = separator.join(map(str, word_list))
return x
def command(string):
command = str(string)
for item in CommandType:
if item.lower() == command.lower().strip():
return item
raise AttributeError(str.format(EBSCliAttr.InvalidCommand, command))
def _init_parser(parser):
commands = ', '.join(map(str.lower, CommandType))
parser.add_argument(CLISwitch[ParameterName.Command],
type = command,
metavar = 'COMMAND', help = commands)
# AWS credential
parser.add_argument('-I', '--' + CLISwitch[ParameterName.AwsAccessKeyId],
dest = ParameterName.AwsAccessKeyId,
metavar = 'ACCESS_KEY_ID',
help = CLISwitchMsg[ParameterName.AwsAccessKeyId])
parser.add_argument('-S', '--' + CLISwitch[ParameterName.AwsSecretAccessKey],
dest = ParameterName.AwsSecretAccessKey,
metavar = 'SECRET_ACCESS_KEY',
help = CLISwitchMsg[ParameterName.AwsSecretAccessKey])
parser.add_argument('--' + CLISwitch[ParameterName.AwsCredentialFile],
dest = ParameterName.AwsCredentialFile,
metavar = 'FILE_PATH_NAME',
help = CLISwitchMsg[ParameterName.AwsCredentialFile])
# Application/environment
parser.add_argument('-s', '--' + CLISwitch[ParameterName.SolutionStack],
dest = ParameterName.SolutionStack, nargs = '+',
metavar = '',
help = CLISwitchMsg[ParameterName.SolutionStack])
parser.add_argument('-a', '--' + CLISwitch[ParameterName.ApplicationName],
dest = ParameterName.ApplicationName,
metavar = 'APPLICATION_NAME',
help = CLISwitchMsg[ParameterName.ApplicationName])
parser.add_argument('-l', '--' + CLISwitch[ParameterName.ApplicationVersionName],
dest = ParameterName.ApplicationVersionName,
metavar = 'VERSION_LABEL',
help = CLISwitchMsg[ParameterName.ApplicationVersionName])
parser.add_argument('-e', '--' + CLISwitch[ParameterName.EnvironmentName],
dest = ParameterName.EnvironmentName,
metavar = 'ENVIRONMENT_NAME',
help = CLISwitchMsg[ParameterName.EnvironmentName])
# Output
parser.add_argument('--' + CLISwitch[ParameterName.Verbose],
action = 'store_const', const = ServiceDefault.ENABLED,
dest = ParameterName.Verbose,
metavar = '',
help = CLISwitchMsg[ParameterName.Verbose])
parser.add_argument('-f', '--' + CLISwitch[ParameterName.Force],
action = 'store_const', const = ServiceDefault.ENABLED,
dest = ParameterName.Force,
metavar = '',
help = CLISwitchMsg[ParameterName.Force])
# Service
parser.add_argument('--' + CLISwitch[ParameterName.WaitForFinishTimeout], type = int,
dest = ParameterName.WaitForFinishTimeout,
metavar = 'TIMEOUT_IN_SEC',
help = str.format(CLISwitchMsg[ParameterName.WaitForFinishTimeout],
ServiceDefault.WAIT_TIMEOUT_IN_SEC))
parser.add_argument('--' + CLISwitch[ParameterName.Region],
dest = ParameterName.Region,
metavar = 'REGION',
help = CLISwitchMsg[ParameterName.Region])
parser.add_argument('--' + CLISwitch[ParameterName.ServiceEndpoint],
dest = ParameterName.ServiceEndpoint,
metavar = 'ENDPOINT',
help = CLISwitchMsg[ParameterName.ServiceEndpoint])
# SCli Helper switch
parser.add_argument('--version', action='version', version=EBSCliAttr.Version)
def parse(parameter_pool, line = None):
''' Parse command arguments'''
parser = ArgumentParser(description = EBSCliAttr.Name,
usage = EBSCliAttr.Usage)
_init_parser(parser)
if line is not None:
args = vars(parser.parse_args(line.split()))
else:
args = vars(parser.parse_args())
# Post prcessing
if args[ParameterName.SolutionStack] is not None:
solution_stack = _word_join(args[ParameterName.SolutionStack], ' ')
args[ParameterName.SolutionStack] = solution_stack
if args[ParameterName.Region] is not None:
region_id = args[ParameterName.Region]
region = list(ServiceRegionId.keys())[list(ServiceRegionId.values()).index(region_id)]
args[ParameterName.Region] = region
# Store command line arguments into parameter pool
for arg, value in args.items():
if value is not None:
arg = misc.to_unicode(arg)
value = misc.to_unicode(value)
if arg == CLISwitch[ParameterName.Command]:
parameter_pool.put(Parameter(ParameterName.Command,
value,
ParameterSource.CliArgument))
else:
parameter_pool.put(Parameter(arg,
value,
ParameterSource.CliArgument))
log.info('Finished parsing command line arguments')
if log.isEnabledFor(logging.DEBUG):
log.debug('Received arguments: {0}'.\
format(misc.collection_to_string(parameter_pool.parameter_names)))
return args
class ArgumentParser(argparse.ArgumentParser):
'''Subclass of argparse.ArgumentParser to override behavior of error()'''
def error(self, error_message):
with closing(StringIO()) as usage:
self.print_usage(usage)
message = EBSCliAttr.ErrorMsg.format(error_message, usage.getvalue(), self.prog)
raise ArgumentError(message)
|
JoaoVasques/aws-devtool
|
eb/macosx/python3/scli/cli_parse.py
|
Python
|
apache-2.0
| 7,634
|
# -*- coding: utf-8 -*-
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import pyxb.binding.generate
import pyxb.binding.datatypes as xs
import pyxb.binding.basis
import pyxb.utils.domutils
import pyxb.binding.facets
import os.path
xsd='''<?xml version="1.0" encoding="UTF-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:simpleType name="tUC">
<xs:annotation><xs:documentation>Simple type to represent an ASCII upper-case letter</xs:documentation></xs:annotation>
<xs:restriction base="xs:string">
<xs:pattern value="[A-Z]"/>
</xs:restriction>
</xs:simpleType>
<xs:element name="UC" type="tUC"/>
</xs:schema>'''
#open('schema.xsd', 'w').write(xsd)
code = pyxb.binding.generate.GeneratePython(schema_text=xsd)
#open('code.py', 'w').write(code)
#print code
rv = compile(code, 'test', 'exec')
eval(rv)
from pyxb.exceptions_ import *
import unittest
class TestTrac_0190 (unittest.TestCase):
def testBasic (self):
i = UC('A')
self.assertEqual(i, 'A')
if sys.version_info[:2] < (2, 7):
self.assertRaises(pyxb.SimpleFacetValueError, UC, 'a')
else:
with self.assertRaises(pyxb.SimpleFacetValueError) as cm:
i = UC('a')
e = cm.exception
self.assertEqual(e.type, tUC)
self.assertEqual(e.value, 'a')
self.assertTrue(isinstance(e.facet, pyxb.binding.facets.CF_pattern))
self.assertEqual(e.details(), 'Type tUC pattern constraint violated by value a')
def testUnicode (self):
if sys.version_info[:2] < (2, 7):
self.assertRaises(pyxb.SimpleFacetValueError, UC, six.unichr(0xf6))
else:
with self.assertRaises(pyxb.SimpleFacetValueError) as cm:
i = UC(six.unichr(0xf6))
e = cm.exception
self.assertEqual(e.type, tUC)
self.assertEqual(e.value, six.unichr(0xf6))
self.assertTrue(isinstance(e.facet, pyxb.binding.facets.CF_pattern))
if six.PY2:
self.assertRaises(UnicodeEncodeError, str, e.details())
self.assertEqual(e.details(), six.u('Type tUC pattern constraint violated by value \xf6'))
if __name__ == '__main__':
unittest.main()
|
pabigot/pyxb
|
tests/trac/test-trac-0190.py
|
Python
|
apache-2.0
| 2,294
|
#!/usr/bin/python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
import dfconvert
exitFlag = 0
class dfconvertThread (threading.Thread):
def __init__(self, threadID, name, input, abstract):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.input = input
self.abstract = abstract
def run(self):
print "Starting " + self.name
if exitFlag:
thread.exit()
status = dfconvert.get(self.input, self.abstract);
print "%s: %s status:%s" % (self.name, time.ctime(time.time()), status)
print "Exiting " + self.name
# Create new threads
thread1 = dfconvertThread(1, "Thread-1", "input.docx", "output1.html")
thread2 = dfconvertThread(2, "Thread-2", "input.docx", "output2.html")
thread3 = dfconvertThread(3, "Thread-3", "input.docx", "output3.html")
thread4 = dfconvertThread(4, "Thread-4", "input.docx", "output4.html")
thread5 = dfconvertThread(5, "Thread-5", "input.docx", "output5.html")
# Start new Threads
thread1.start()
thread2.start()
thread3.start()
thread4.start()
thread5.start()
print "Exiting Main Thread"
|
apache/incubator-corinthia
|
experiments/dfwebserver/python/testSubprocess.py
|
Python
|
apache-2.0
| 1,667
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class AccountsView(object):
def __init__(self, accounts_summary):
self.accounts_summary = accounts_summary
def data(self):
return {'accounts': self.accounts_summary.accounts}
class AccountView(object):
def __init__(self, account):
self.account = account
def data(self):
instance_list = [InstanceView(instance).data()
for instance in self.account.instances]
return {
'account': {
'id': self.account.id,
'instances': instance_list,
}
}
class InstanceView(object):
def __init__(self, instance):
self.instance = instance
def data(self):
server_host = None
if self.instance.server is not None:
server_host = self.instance.server.host
return {'id': self.instance.id,
'status': self.instance.status,
'name': self.instance.name,
'host': server_host,
}
|
citrix-openstack-build/trove
|
trove/extensions/account/views.py
|
Python
|
apache-2.0
| 1,652
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Benchmarks for low-level eager execution primitives.
To run CPU benchmarks:
bazel run -c opt benchmarks_test -- --benchmarks=.
To run GPU benchmarks:
bazel run --config=cuda -c opt --copt="-mavx" benchmarks_test -- \
--benchmarks=.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python import keras
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import backprop # pylint: disable=unused-import
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import function
from tensorflow.python.eager import test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
CPU = "/device:CPU:0"
GPU = "/device:GPU:0"
def c_tfe_py_fastpath_execute(a,
b,
transpose_a=False,
transpose_b=False,
name=None):
ctx = context.context()
assert ctx.executing_eagerly(
), "The prototype doesn't contain C code for graph construction"
try:
return pywrap_tensorflow.TFE_Py_FastPathExecute(
ctx._handle, ctx.device_name, "MatMul", name,
ctx._post_execution_callbacks, a, b, "transpose_a", transpose_a,
"transpose_b", transpose_b)
except core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
six.raise_from(core._status_to_exception(e.code, message), None)
class SubclassedKerasModel(keras.Model):
def __init__(self):
super(SubclassedKerasModel, self).__init__()
self.layer = keras.layers.Dense(
10, kernel_initializer="ones", bias_initializer="zeros")
def call(self, x):
return self.layer(x)
def make_keras_model():
x = keras.Input(shape=(10,))
y = keras.layers.Dense(
10, kernel_initializer="ones", bias_initializer="zeros")(
x)
return keras.Model(inputs=x, outputs=y)
class MicroBenchmarks(test.Benchmark):
def __init__(self):
# used for multiply benchmarks
self._m_2 = random_ops.random_uniform([2])
# used for matmul benchmarks
self._m_2_by_2 = random_ops.random_uniform((2, 2))
self._m_100_by_784 = random_ops.random_uniform((100, 784))
self._num_iters_2_by_2 = 30000
self._num_iters_100_by_784 = 1000
def _run(self, func, num_iters, execution_mode=None):
# call func to maybe warm up the GPU
ctx = context.context()
with ctx.execution_mode(execution_mode):
func()
if execution_mode == context.ASYNC:
ctx.async_wait()
start = time.time()
for _ in xrange(num_iters):
func()
if execution_mode == context.ASYNC:
ctx.async_wait()
end = time.time()
mean_us = (end - start) * 1e6 / num_iters
self.report_benchmark(
iters=num_iters,
wall_time=mean_us,
extras={"examples_per_sec": num_iters / (end - start)})
def benchmark_create_np_array(self):
func = lambda: np.array([3.0])
self._run(func, 30000)
def _benchmark_create_tensor(self, value, dtype, device):
"""Benchmark overheads of creating a Tensor object."""
ctx = context.context()
handle = ctx._handle
if device == GPU:
# Warmup the GPU
ops.EagerTensor(value, context=handle, device=device)
def func():
ops.EagerTensor(value, context=handle, device=device, dtype=dtype)
self._run(func, 30000)
def benchmark_create_float_tensor_from_list_CPU(self):
self._benchmark_create_tensor([[3.0]], dtypes.float32.as_datatype_enum, CPU)
def benchmark_create_float_tensor_from_np_array_CPU(self):
self._benchmark_create_tensor(
np.array([[3.0]], dtype=np.float32), dtypes.float32.as_datatype_enum,
CPU)
def benchmark_create_int32_tensor_from_list_CPU(self):
self._benchmark_create_tensor([[3]], dtypes.int32.as_datatype_enum, CPU)
def benchmark_create_int32_tensor_from_np_array_CPU(self):
self._benchmark_create_tensor(
np.array([[3]], dtype=np.int32), dtypes.int32.as_datatype_enum, CPU)
def benchmark_create_float_tensor_from_list_GPU(self):
if not context.num_gpus():
return
self._benchmark_create_tensor([[3.0]], dtypes.float32.as_datatype_enum, GPU)
def benchmark_create_float_tensor_from_np_array_GPU(self):
if not context.num_gpus():
return
self._benchmark_create_tensor(
np.array([[3.0]], dtype=np.float32), dtypes.float32.as_datatype_enum,
GPU)
def benchmark_create_int32_tensor_from_list_GPU(self):
# int32's are kept on host memory even when executing on GPU.
if not context.num_gpus():
return
self._benchmark_create_tensor([[3]], dtypes.int32.as_datatype_enum, GPU)
def benchmark_create_int32_tensor_from_np_array_GPU(self):
# int32's are kept on host memory even when executing on GPU.
if not context.num_gpus():
return
self._benchmark_create_tensor(
np.array([[3]], dtype=np.int32), dtypes.int32.as_datatype_enum, GPU)
def _benchmark_np_multiply(self, m, num_iters):
a = m.cpu().numpy()
func = lambda: a * a
self._run(func, num_iters)
def _benchmark_tf_multiply(self, m, num_iters):
func = lambda: m * m
self._run(func, num_iters)
def _benchmark_tf_multiply_op(self, m, num_iters):
func = lambda: math_ops.multiply(m, m)
self._run(func, num_iters)
def benchmark_np_multiply(self):
self._benchmark_np_multiply(self._m_2, 30000)
def benchmark_tf_multiply_CPU(self):
with context.device(CPU):
m = self._m_2.cpu()
self._benchmark_tf_multiply(m, 30000)
def benchmark_tf_multiply_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2.gpu()
self._benchmark_tf_multiply(m, 30000)
def benchmark_tf_multiply_op_CPU(self):
with context.device(CPU):
m = self._m_2.cpu()
self._benchmark_tf_multiply_op(m, 30000)
def benchmark_tf_multiply_op_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2.gpu()
self._benchmark_tf_multiply_op(m, 30000)
def benchmark_tf_identity(self):
m = self._m_2
self._run(lambda: gen_array_ops.identity(m), 30000)
def benchmark_slowpath_tf_identity(self):
self._run(lambda: gen_array_ops.identity(1), 30000)
def benchmark_tfe_py_execute_identity(self):
m = self._m_2
ctx_handle = context.context()._handle
attrs = ("T", self._m_2.dtype.as_datatype_enum)
inputs = [m]
def f():
pywrap_tensorflow.TFE_Py_Execute(ctx_handle, None, "Identity", inputs,
attrs, 1)
self._run(f, 30000)
def benchmark_tf_gradient_function_identity(self):
with context.device(CPU):
m = gen_array_ops.identity(self._m_2)
self._run(
lambda: backprop.gradients_function(gen_array_ops.identity, [0])(m),
30000)
def benchmark_tf_gradient_forward_identity(self):
with backprop.GradientTape() as tape:
m = self._m_2
tape.watch(m)
self._run(lambda: gen_array_ops.identity(m), 30000)
def benchmark_tf_gradient_tape_push_pop(self):
def f():
with backprop.GradientTape():
pass
self._run(f, 30000)
def benchmark_tf_gradient_function_no_op(self):
with context.device(CPU):
m = gen_array_ops.identity(self._m_2)
self._run(lambda: backprop.gradients_function(lambda x: x, [0])(m), 30000)
def _benchmark_np_matmul(self, m, transpose_b, num_iters):
a = m.cpu().numpy()
b = a.T if transpose_b else a
func = lambda: np.dot(a, b)
self._run(func, num_iters)
def _benchmark_tf_matmul(self, m, transpose_b, num_iters,
execution_mode=None):
func = lambda: math_ops.matmul(m, m, transpose_b=transpose_b)
self._run(func, num_iters, execution_mode=execution_mode)
def _benchmark_gen_math_ops_matmul(self, m, transpose_b, num_iters):
def func():
gen_math_ops.mat_mul(m, m, transpose_b=transpose_b)
self._run(func, num_iters)
def _benchmark_tfe_py_fastpath_execute_matmul(self, m, transpose_b,
num_iters):
def func():
c_tfe_py_fastpath_execute(m, m, transpose_b=transpose_b)
self._run(func, num_iters)
def _benchmark_tfe_py_execute_matmul(self, m, transpose_b, num_iters):
inputs = [m, m]
# pylint: disable=protected-access
ctx_handle = context.context()._handle
# pylint: enable=protected-access
device = context.context().device_name
attrs = ("transpose_a", False, "transpose_b", transpose_b, "T",
m.dtype.as_datatype_enum)
def func():
pywrap_tensorflow.TFE_Py_Execute(ctx_handle, device, "MatMul", inputs,
attrs, 1)
self._run(func, num_iters)
def _benchmark_defun_matmul(self,
m,
transpose_b,
num_iters,
execution_mode=None):
f = function.defun(math_ops.matmul)
func = lambda: f(m, m, transpose_b)
self._run(func, num_iters, execution_mode=execution_mode)
def _benchmark_read_variable(self, m, num_iters):
self._run(m.value, num_iters)
def _benchmark_matmul_read_variable(self, m, num_iters):
self._benchmark_gen_math_ops_matmul(
m, transpose_b=False, num_iters=num_iters)
def _benchmark_matmul_read_variable_with_tape(self, m, num_iters):
with backprop.GradientTape() as tape:
tape.watch(m)
self._benchmark_gen_math_ops_matmul(
m, transpose_b=False, num_iters=num_iters)
def _benchmark_read_variable_with_tape(self, m, num_iters):
with backprop.GradientTape() as tape:
tape.watch(m)
self._run(m.value, num_iters)
# Benchmarks for A^2, A of dimension 2 by 2.
def benchmark_np_matmul_2_by_2(self):
self._benchmark_np_matmul(
self._m_2_by_2, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tf_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_tf_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tf_matmul_2_by_2_CPU_async(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_tf_matmul(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
def benchmark_gen_math_ops_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_gen_math_ops_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tfe_py_fastpath_execute_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_tfe_py_fastpath_execute_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tfe_py_execute_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_2_by_2_CPU(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_defun_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_2_by_2_CPU_async(self):
with context.device(CPU):
m = self._m_2_by_2.cpu()
self._benchmark_defun_matmul(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
def benchmark_tf_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_tf_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tf_matmul_2_by_2_GPU_async(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_tf_matmul(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
def benchmark_gen_math_ops_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_gen_math_ops_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_tfe_py_execute_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_defun_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)
def benchmark_defun_matmul_2_by_2_GPU_async(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_defun_matmul(
m,
transpose_b=False,
num_iters=self._num_iters_2_by_2,
execution_mode=context.ASYNC)
# Benchmarks for AA.T, A of dimension 100 by 784.
def benchmark_np_matmul_100_by_784(self):
self._benchmark_np_matmul(
self._m_100_by_784,
transpose_b=True,
num_iters=self._num_iters_100_by_784)
def benchmark_tf_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_tf_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tf_matmul_100_by_784_CPU_async(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_tf_matmul(
m,
transpose_b=True,
num_iters=self._num_iters_100_by_784,
execution_mode=context.ASYNC)
def benchmark_gen_math_ops_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_gen_math_ops_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tfe_py_fastpath_execute_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_tfe_py_fastpath_execute_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tfe_py_execute_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_defun_matmul_100_by_784_CPU(self):
with context.device(CPU):
m = self._m_100_by_784.cpu()
self._benchmark_defun_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tf_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_tf_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tf_matmul_100_by_784_GPU_async(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_tf_matmul(
m,
transpose_b=True,
num_iters=self._num_iters_100_by_784,
execution_mode=context.ASYNC)
def benchmark_gen_math_ops_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_gen_math_ops_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_tfe_py_execute_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_tfe_py_execute_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_defun_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_defun_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
def benchmark_matmul_read_variable_op_2_by_2_CPU(self):
with context.device(CPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_matmul_read_variable(m, num_iters=self._num_iters_2_by_2)
def benchmark_matmul_read_variable_op_with_tape_2_by_2_CPU(self):
with context.device(CPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_matmul_read_variable_with_tape(
m, num_iters=self._num_iters_2_by_2)
def benchmark_read_variable_op_2_by_2_CPU(self):
with context.device(CPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_read_variable(m, num_iters=self._num_iters_2_by_2)
def benchmark_read_variable_op_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2.gpu())
self._benchmark_read_variable(m, num_iters=self._num_iters_2_by_2)
def benchmark_read_variable_op_with_tape_2_by_2_CPU(self):
with context.device(CPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2)
self._benchmark_read_variable_with_tape(
m, num_iters=self._num_iters_2_by_2)
def benchmark_read_variable_op_with_tape_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = resource_variable_ops.ResourceVariable(self._m_2_by_2.gpu())
self._benchmark_read_variable_with_tape(
m, num_iters=self._num_iters_2_by_2)
def benchmark_keras_model_subclassed(self):
model = SubclassedKerasModel()
data = random_ops.random_uniform((10, 10))
func = lambda: model(data)
# First call is more expensive (creates variables etc.), discount that.
func()
# The whole point of this test is to contrast subclassing with
# the functional style of keras model building, so validate that
# the models are equivalent.
assert np.equal(func(), make_keras_model()(data)).all()
self._run(func, 30000)
def benchmark_keras_model_functional(self):
model = make_keras_model()
data = random_ops.random_uniform((10, 10))
func = lambda: model(data)
# Symmetry with benchmark_keras_model_subclassed
func()
assert np.equal(func(), SubclassedKerasModel()(data)).all()
self._run(func, 30000)
if __name__ == "__main__":
test.main()
|
aselle/tensorflow
|
tensorflow/python/eager/benchmarks_test.py
|
Python
|
apache-2.0
| 20,015
|
# Copyright 2014 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from neutron.common import utils as n_utils
from neutron.plugins.ml2.drivers.mech_sriov.agent.common import config
from neutron.plugins.ml2.drivers.mech_sriov.agent \
import sriov_nic_agent as agent
from neutron.tests import base
class TestSriovAgentConfig(base.BaseTestCase):
EXCLUDE_DEVICES_LIST = ['p7p1:0000:07:00.1;0000:07:00.2',
'p3p1:0000:04:00.3']
EXCLUDE_DEVICES_LIST_INVALID = ['p7p2:0000:07:00.1;0000:07:00.2']
EXCLUDE_DEVICES_WITH_SPACES_LIST = ['p7p1: 0000:07:00.1 ; 0000:07:00.2',
'p3p1:0000:04:00.3 ']
EXCLUDE_DEVICES_WITH_SPACES_ERROR = ['p7p1',
'p3p1:0000:04:00.3 ']
EXCLUDE_DEVICES = {'p7p1': set(['0000:07:00.1', '0000:07:00.2']),
'p3p1': set(['0000:04:00.3'])}
DEVICE_MAPPING_LIST = ['physnet7:p7p1',
'physnet3:p3p1']
DEVICE_MAPPING_WITH_ERROR_LIST = ['physnet7',
'physnet3:p3p1']
DEVICE_MAPPING_WITH_SPACES_LIST = ['physnet7 : p7p1',
'physnet3 : p3p1 ']
DEVICE_MAPPING = {'physnet7': 'p7p1',
'physnet3': 'p3p1'}
def test_defaults(self):
self.assertEqual(config.DEFAULT_DEVICE_MAPPINGS,
cfg.CONF.SRIOV_NIC.physical_device_mappings)
self.assertEqual(config.DEFAULT_EXCLUDE_DEVICES,
cfg.CONF.SRIOV_NIC.exclude_devices)
self.assertEqual(2,
cfg.CONF.AGENT.polling_interval)
def test_device_mappings(self):
cfg.CONF.set_override('physical_device_mappings',
self.DEVICE_MAPPING_LIST,
'SRIOV_NIC')
device_mappings = n_utils.parse_mappings(
cfg.CONF.SRIOV_NIC.physical_device_mappings)
self.assertEqual(self.DEVICE_MAPPING, device_mappings)
def test_device_mappings_with_error(self):
cfg.CONF.set_override('physical_device_mappings',
self.DEVICE_MAPPING_WITH_ERROR_LIST,
'SRIOV_NIC')
self.assertRaises(ValueError, n_utils.parse_mappings,
cfg.CONF.SRIOV_NIC.physical_device_mappings)
def test_device_mappings_with_spaces(self):
cfg.CONF.set_override('physical_device_mappings',
self.DEVICE_MAPPING_WITH_SPACES_LIST,
'SRIOV_NIC')
device_mappings = n_utils.parse_mappings(
cfg.CONF.SRIOV_NIC.physical_device_mappings)
self.assertEqual(self.DEVICE_MAPPING, device_mappings)
def test_exclude_devices(self):
cfg.CONF.set_override('exclude_devices',
self.EXCLUDE_DEVICES_LIST,
'SRIOV_NIC')
exclude_devices = config.parse_exclude_devices(
cfg.CONF.SRIOV_NIC.exclude_devices)
self.assertEqual(self.EXCLUDE_DEVICES, exclude_devices)
def test_exclude_devices_with_spaces(self):
cfg.CONF.set_override('exclude_devices',
self.EXCLUDE_DEVICES_WITH_SPACES_LIST,
'SRIOV_NIC')
exclude_devices = config.parse_exclude_devices(
cfg.CONF.SRIOV_NIC.exclude_devices)
self.assertEqual(self.EXCLUDE_DEVICES, exclude_devices)
def test_exclude_devices_with_error(self):
cfg.CONF.set_override('exclude_devices',
self.EXCLUDE_DEVICES_WITH_SPACES_ERROR,
'SRIOV_NIC')
self.assertRaises(ValueError, config.parse_exclude_devices,
cfg.CONF.SRIOV_NIC.exclude_devices)
def test_validate_config_ok(self):
cfg.CONF.set_override('physical_device_mappings',
self.DEVICE_MAPPING_LIST,
'SRIOV_NIC')
cfg.CONF.set_override('exclude_devices',
self.EXCLUDE_DEVICES_LIST,
'SRIOV_NIC')
config_parser = agent.SriovNicAgentConfigParser()
config_parser.parse()
device_mappings = config_parser.device_mappings
exclude_devices = config_parser.exclude_devices
self.assertEqual(self.EXCLUDE_DEVICES, exclude_devices)
self.assertEqual(self.DEVICE_MAPPING, device_mappings)
def test_validate_config_fail(self):
cfg.CONF.set_override('physical_device_mappings',
self.DEVICE_MAPPING_LIST,
'SRIOV_NIC')
cfg.CONF.set_override('exclude_devices',
self.EXCLUDE_DEVICES_LIST_INVALID,
'SRIOV_NIC')
config_parser = agent.SriovNicAgentConfigParser()
self.assertRaises(ValueError, config_parser.parse)
|
wolverineav/neutron
|
neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/common/test_config.py
|
Python
|
apache-2.0
| 5,542
|
################################################################################
# The Neural Network (NN) based Speech Synthesis System
# https://github.com/CSTR-Edinburgh/merlin
#
# Centre for Speech Technology Research
# University of Edinburgh, UK
# Copyright (c) 2014-2015
# All Rights Reserved.
#
# The system as a whole and most of the files in it are distributed
# under the following copyright and conditions
#
# Permission is hereby granted, free of charge, to use and distribute
# this software and its documentation without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of this work, and to
# permit persons to whom this work is furnished to do so, subject to
# the following conditions:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# - The authors' names may not be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THE UNIVERSITY OF EDINBURGH AND THE CONTRIBUTORS TO THIS WORK
# DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT
# SHALL THE UNIVERSITY OF EDINBURGH NOR THE CONTRIBUTORS BE LIABLE
# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
# AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
# ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
# THIS SOFTWARE.
################################################################################
import sys
if sys.version_info.major >= 3:
import configparser
else:
import ConfigParser as configparser
import logging
import os
class configuration(object):
def __init__(self):
pass;
def configure(self, configFile=None):
# get a logger
logger = logging.getLogger("configuration")
# this (and only this) logger needs to be configured immediately, otherwise it won't work
# we can't use the full user-supplied configuration mechanism in this particular case,
# because we haven't loaded it yet!
#
# so, just use simple console-only logging
logger.setLevel(logging.DEBUG) # this level is hardwired here - should change it to INFO
# add a handler & its formatter - will write only to console
ch = logging.StreamHandler()
logger.addHandler(ch)
formatter = logging.Formatter('%(asctime)s %(levelname)8s%(name)15s: %(message)s')
ch.setFormatter(formatter)
# first, set up some default configuration values
self.initial_configuration()
# next, load in any user-supplied configuration values
# that might over-ride the default values
self.user_configuration(configFile)
# finally, set up all remaining configuration values
# that depend upon either default or user-supplied values
self.complete_configuration()
logger.debug('configuration completed')
def initial_configuration(self):
# to be called before loading any user specific values
# things to put here are
# 1. variables that the user cannot change
# 2. variables that need to be set before loading the user's config file
UTTID_REGEX = '(.*)\..*'
def user_configuration(self,configFile=None):
# get a logger
logger = logging.getLogger("configuration")
# load and parse the provided configFile, if provided
if not configFile:
logger.warn('no user configuration file provided; using only built-in default settings')
return
# load the config file
try:
cfgparser = configparser.ConfigParser()
cfgparser.readfp(open(configFile))
logger.debug('successfully read and parsed user configuration file %s' % configFile)
except:
logger.fatal('error reading user configuration file %s' % configFile)
raise
#work_dir must be provided before initialising other directories
self.work_dir = None
if self.work_dir == None:
try:
self.work_dir = cfgparser.get('Paths', 'work')
except (configparser.NoSectionError, configparser.NoOptionError):
if self.work_dir == None:
logger.critical('Paths:work has no value!')
raise Exception
# default place for some data
self.data_dir = os.path.join(self.work_dir, 'data')
self.keras_dir = os.path.join(self.work_dir, 'keras')
self.gen_dir = os.path.join(self.keras_dir, 'gen')
self.model_dir = os.path.join(self.keras_dir, 'models')
self.stats_dir = os.path.join(self.keras_dir, 'stats')
self.inter_data_dir = os.path.join(self.work_dir, 'inter_module')
self.def_inp_dir = os.path.join(self.inter_data_dir, 'nn_no_silence_lab_norm_425')
self.def_out_dir = os.path.join(self.inter_data_dir, 'nn_norm_mgc_lf0_vuv_bap_187')
impossible_int=int(-99999)
impossible_float=float(-99999.0)
user_options = [
# Paths
('work_dir', self.work_dir, 'Paths','work'),
('data_dir', self.data_dir, 'Paths','data'),
('inp_feat_dir', self.def_inp_dir, 'Paths', 'inp_feat'),
('out_feat_dir', self.def_out_dir, 'Paths', 'out_feat'),
('model_dir', self.model_dir, 'Paths', 'models'),
('stats_dir', self.stats_dir, 'Paths', 'stats'),
('gen_dir' , self.gen_dir, 'Paths', 'gen'),
('file_id_scp', os.path.join(self.data_dir, 'file_id_list.scp'), 'Paths', 'file_id_list'),
('test_id_scp', os.path.join(self.data_dir, 'test_id_list.scp'), 'Paths', 'test_id_list'),
# Input-Output
('inp_dim', 425, 'Input-Output', 'inp_dim'),
('out_dim', 187, 'Input-Output', 'out_dim'),
('inp_file_ext', '.lab', 'Input-Output', 'inp_file_ext'),
('out_file_ext', '.cmp', 'Input-Output', 'out_file_ext'),
('inp_norm', 'MINMAX', 'Input-Output', 'inp_norm'),
('out_norm', 'MINMAX', 'Input-Output', 'out_norm'),
# Architecture
('hidden_layer_type', ['TANH', 'TANH', 'TANH', 'TANH', 'TANH', 'TANH'], 'Architecture', 'hidden_layer_type'),
('hidden_layer_size', [ 1024 , 1024 , 1024 , 1024 , 1024 , 1024], 'Architecture', 'hidden_layer_size'),
('batch_size' , 256, 'Architecture', 'batch_size'),
('num_of_epochs', 1, 'Architecture', 'training_epochs'),
('dropout_rate' , 0.0, 'Architecture', 'dropout_rate'),
('output_layer_type', 'linear', 'Architecture', 'output_layer_type'),
('optimizer' , 'adam', 'Architecture', 'optimizer'),
('loss_function' , 'mse', 'Architecture', 'loss_function'),
# RNN
('sequential_training', False, 'Architecture', 'sequential_training'),
('stateful' , False, 'Architecture', 'stateful'),
('use_high_batch_size', False, 'Architecture', 'use_high_batch_size'),
('training_algo', 1, 'Architecture', 'training_algo'),
('merge_size' , 1, 'Architecture', 'merge_size'),
('seq_length' , 200, 'Architecture', 'seq_length'),
('bucket_range' , 100, 'Architecture', 'bucket_range'),
# Data
('shuffle_data', True, 'Data', 'shuffle_data'),
('train_file_number', impossible_int, 'Data','train_file_number'),
('valid_file_number', impossible_int, 'Data','valid_file_number'),
('test_file_number' , impossible_int, 'Data','test_file_number'),
# Processes
('GenTestList', False, 'Processes', 'GenTestList'),
('NORMDATA' , False, 'Processes', 'NORMDATA'),
('TRAINMODEL' , False, 'Processes', 'TRAINMODEL'),
('TESTMODEL' , False, 'Processes', 'TESTMODEL')
]
# this uses exec(...) which is potentially dangerous since arbitrary code could be executed
for (variable,default,section,option) in user_options:
# default value
value=None
try:
# first, look for a user-set value for this variable in the config file
value = cfgparser.get(section,option)
user_or_default='user'
except (configparser.NoSectionError, configparser.NoOptionError):
# use default value, if there is one
if (default == None) or \
(default == '') or \
((type(default) == int) and (default == impossible_int)) or \
((type(default) == float) and (default == impossible_float)) :
logger.critical('%20s has no value!' % (section+":"+option) )
raise Exception
else:
value = default
user_or_default='default'
if type(default) == str:
exec('self.%s = "%s"' % (variable,value))
elif type(default) == int:
exec('self.%s = int(%s)' % (variable,value))
elif type(default) == float:
exec('self.%s = float(%s)' % (variable,value))
elif type(default) == bool:
exec('self.%s = bool(%s)' % (variable,value))
elif type(default) == list:
exec('self.%s = list(%s)' % (variable,value))
elif type(default) == dict:
exec('self.%s = dict(%s)' % (variable,value))
else:
logger.critical('Variable %s has default value of unsupported type %s',variable,type(default))
raise Exception('Internal error in configuration settings: unsupported default type')
logger.info('%20s has %7s value %s' % (section+":"+option,user_or_default,value) )
def complete_configuration(self):
# to be called after reading any user-specific settings
# because the values set here depend on those user-specific settings
# get a logger
logger = logging.getLogger("configuration")
## create directories if not exists
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
if not os.path.exists(self.stats_dir):
os.makedirs(self.stats_dir)
if not os.path.exists(self.gen_dir):
os.makedirs(self.gen_dir)
# input-output normalization stat files
self.inp_stats_file = os.path.join(self.stats_dir, "input_%d_%s_%d.norm" %(int(self.train_file_number), self.inp_norm, self.inp_dim))
self.out_stats_file = os.path.join(self.stats_dir, "output_%d_%s_%d.norm" %(int(self.train_file_number), self.out_norm, self.out_dim))
# define model file name
if self.sequential_training:
self.combined_model_arch = 'RNN'+str(self.training_algo)
else:
self.combined_model_arch = 'DNN'
self.combined_model_arch += '_'+str(len(self.hidden_layer_size))
self.combined_model_arch += '_'+'_'.join(map(str, self.hidden_layer_size))
self.combined_model_arch += '_'+'_'.join(map(str, self.hidden_layer_type))
self.nnets_file_name = '%s_%d_train_%d_%d_%d_%d_%d_model' \
%(self.combined_model_arch, int(self.shuffle_data),
self.inp_dim, self.out_dim, self.train_file_number, self.batch_size, self.num_of_epochs)
logger.info('model file: %s' % (self.nnets_file_name))
# model files
self.json_model_file = os.path.join(self.model_dir, self.nnets_file_name+'.json')
self.h5_model_file = os.path.join(self.model_dir, self.nnets_file_name+'.h5')
# predicted features directory
self.pred_feat_dir = os.path.join(self.gen_dir, self.nnets_file_name)
if not os.path.exists(self.pred_feat_dir):
os.makedirs(self.pred_feat_dir)
# string.lower for some architecture values
self.output_layer_type = self.output_layer_type.lower()
self.optimizer = self.optimizer.lower()
self.loss_function = self.loss_function.lower()
for i in range(len(self.hidden_layer_type)):
self.hidden_layer_type[i] = self.hidden_layer_type[i].lower()
# set sequential training True if using LSTMs
if 'lstm' in self.hidden_layer_type:
self.sequential_training = True
# set/limit batch size to 25
if self.sequential_training and self.batch_size>50:
if not self.use_high_batch_size:
logger.info('reducing the batch size from %s to 25' % (self.batch_size))
self.batch_size = 25 ## num. of sentences in this case
# rnn params
self.rnn_params = {}
self.rnn_params['merge_size'] = self.merge_size
self.rnn_params['seq_length'] = self.seq_length
self.rnn_params['bucket_range'] = self.bucket_range
self.rnn_params['stateful'] = self.stateful
|
bajibabu/merlin
|
src/keras_lib/configuration.py
|
Python
|
apache-2.0
| 13,717
|
'''/* UVa problem:
*
* Topic:
*
* Level:
*
* Brief problem description:
*
*
*
* Solution Summary:
*
*
*
* Used Resources:
*
*
*
* I hereby certify that I have produced the following solution myself
* using the resources listed above in accordance with the CMPUT 403
* collaboration policy.
*
* --- Dennis Truong
*/'''
while True:
try:
a = int(input().strip())
b = int(input().strip())
print(a*b)
except EOFError:
break
|
DT9/programming-problems
|
2017/uva/403/temp/UVa11462.py
|
Python
|
apache-2.0
| 499
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from typing import Optional, Sequence, Set, Tuple
from flask import current_app, g
from flask_appbuilder.security.sqla import models as sqla_models
from flask_appbuilder.security.sqla.manager import SecurityManager
from flask_appbuilder.security.sqla.models import PermissionView, Role, User
from sqlalchemy import or_
from sqlalchemy.orm import joinedload
from airflow import models
from airflow.exceptions import AirflowException
from airflow.models import DagModel
from airflow.security import permissions
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.session import provide_session
from airflow.www.utils import CustomSQLAInterface
EXISTING_ROLES = {
'Admin',
'Viewer',
'User',
'Op',
'Public',
}
class AirflowSecurityManager(SecurityManager, LoggingMixin):
"""Custom security manager, which introduces an permission model adapted to Airflow"""
###########################################################################
# PERMISSIONS
###########################################################################
# [START security_viewer_perms]
VIEWER_PERMISSIONS = [
(permissions.ACTION_CAN_READ, permissions.RESOURCE_CONFIG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_CODE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_IMPORT_ERROR),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_AUDIT_LOG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_JOB),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_PLUGIN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_SLA_MISS),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_XCOM),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_BROWSE_MENU),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DOCS_LINK),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DOCS_MENU),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_JOB),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_AUDIT_LOG),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_PLUGIN),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_SLA_MISS),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_THIS_FORM_GET, permissions.RESOURCE_RESET_MY_PASSWORD_VIEW),
(permissions.ACTION_CAN_THIS_FORM_POST, permissions.RESOURCE_RESET_MY_PASSWORD_VIEW),
(permissions.ACTION_RESETMYPASSWORD, permissions.RESOURCE_USER_DB_MODELVIEW),
(permissions.ACTION_CAN_THIS_FORM_GET, permissions.RESOURCE_USERINFO_EDIT_VIEW),
(permissions.ACTION_CAN_THIS_FORM_POST, permissions.RESOURCE_USERINFO_EDIT_VIEW),
(permissions.ACTION_USERINFOEDIT, permissions.RESOURCE_USER_DB_MODELVIEW),
(permissions.ACTION_CAN_USERINFO, permissions.RESOURCE_USER_DB_MODELVIEW),
(permissions.ACTION_CAN_USERINFO, permissions.RESOURCE_USER_OID_MODELVIEW),
(permissions.ACTION_CAN_USERINFO, permissions.RESOURCE_USER_LDAP_MODELVIEW),
(permissions.ACTION_CAN_USERINFO, permissions.RESOURCE_USER_OAUTH_MODELVIEW),
(permissions.ACTION_CAN_USERINFO, permissions.RESOURCE_USER_REMOTEUSER_MODELVIEW),
]
# [END security_viewer_perms]
# [START security_user_perms]
USER_PERMISSIONS = [
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG_RUN),
]
# [END security_user_perms]
# [START security_op_perms]
OP_PERMISSIONS = [
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_ADMIN_MENU),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_XCOM),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_XCOM),
]
# [END security_op_perms]
ADMIN_PERMISSIONS = [
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_RESCHEDULE),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_TASK_RESCHEDULE),
]
# global view-menu for dag-level access
DAG_VMS = {permissions.RESOURCE_DAG}
READ_DAG_PERMS = {permissions.ACTION_CAN_READ}
DAG_PERMS = {permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT}
###########################################################################
# DEFAULT ROLE CONFIGURATIONS
###########################################################################
ROLE_CONFIGS = [
{'role': 'Viewer', 'perms': VIEWER_PERMISSIONS},
{
'role': 'User',
'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS,
},
{
'role': 'Op',
'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS,
},
{
'role': 'Admin',
'perms': VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS + ADMIN_PERMISSIONS,
},
]
def __init__(self, appbuilder):
super().__init__(appbuilder)
# Go and fix up the SQLAInterface used from the stock one to our subclass.
# This is needed to support the "hack" where we had to edit
# FieldConverter.conversion_table in place in airflow.www.utils
for attr in dir(self):
if not attr.endswith('view'):
continue
view = getattr(self, attr, None)
if not view or not getattr(view, 'datamodel', None):
continue
view.datamodel = CustomSQLAInterface(view.datamodel.obj)
self.perms = None
def init_role(self, role_name, perms):
"""
Initialize the role with the permissions and related view-menus.
:param role_name:
:param perms:
:return:
"""
role = self.find_role(role_name)
if not role:
role = self.add_role(role_name)
self.add_permissions(role, set(perms))
def add_permissions(self, role, perms):
"""Adds resource permissions to a given role."""
for perm_name, view_name in perms:
perm_view = self.add_permission_view_menu(perm_name, view_name)
self.add_permission_role(role, perm_view)
def delete_role(self, role_name):
"""
Delete the given Role
:param role_name: the name of a role in the ab_role table
"""
session = self.get_session
role = session.query(sqla_models.Role).filter(sqla_models.Role.name == role_name).first()
if role:
self.log.info("Deleting role '%s'", role_name)
session.delete(role)
session.commit()
else:
raise AirflowException(f"Role named '{role_name}' does not exist")
@staticmethod
def get_user_roles(user=None):
"""
Get all the roles associated with the user.
:param user: the ab_user in FAB model.
:return: a list of roles associated with the user.
"""
if user is None:
user = g.user
if user.is_anonymous:
public_role = current_app.appbuilder.config.get('AUTH_ROLE_PUBLIC')
return [current_app.appbuilder.security_manager.find_role(public_role)] if public_role else []
return user.roles
def get_all_permissions_views(self):
"""Returns a set of tuples with the perm name and view menu name"""
perms_views = set()
for role in self.get_user_roles():
perms_views.update(
{(perm_view.permission.name, perm_view.view_menu.name) for perm_view in role.permissions}
)
return perms_views
def get_readable_dags(self, user):
"""Gets the DAGs readable by authenticated user."""
return self.get_accessible_dags([permissions.ACTION_CAN_READ], user)
def get_editable_dags(self, user):
"""Gets the DAGs editable by authenticated user."""
return self.get_accessible_dags([permissions.ACTION_CAN_EDIT], user)
def get_readable_dag_ids(self, user) -> Set[str]:
"""Gets the DAG IDs readable by authenticated user."""
return {dag.dag_id for dag in self.get_readable_dags(user)}
def get_editable_dag_ids(self, user) -> Set[str]:
"""Gets the DAG IDs editable by authenticated user."""
return {dag.dag_id for dag in self.get_editable_dags(user)}
def get_accessible_dag_ids(self, user) -> Set[str]:
"""Gets the DAG IDs editable or readable by authenticated user."""
accessible_dags = self.get_accessible_dags(
[permissions.ACTION_CAN_EDIT, permissions.ACTION_CAN_READ], user
)
return {dag.dag_id for dag in accessible_dags}
@provide_session
def get_accessible_dags(self, user_actions, user, session=None):
"""Generic function to get readable or writable DAGs for authenticated user."""
if user.is_anonymous:
return set()
user_query = (
session.query(User)
.options(
joinedload(User.roles)
.subqueryload(Role.permissions)
.options(joinedload(PermissionView.permission), joinedload(PermissionView.view_menu))
)
.filter(User.id == user.id)
.first()
)
resources = set()
for role in user_query.roles:
for permission in role.permissions:
action = permission.permission.name
if action not in user_actions:
continue
resource = permission.view_menu.name
if resource == permissions.RESOURCE_DAG:
return session.query(DagModel)
if resource.startswith(permissions.RESOURCE_DAG_PREFIX):
resources.add(resource[len(permissions.RESOURCE_DAG_PREFIX) :])
else:
resources.add(resource)
return session.query(DagModel).filter(DagModel.dag_id.in_(resources))
def can_access_some_dags(self, action: str, dag_id: Optional[str] = None) -> bool:
"""Checks if user has read or write access to some dags."""
if dag_id and dag_id != '~':
return self.has_access(action, self.prefixed_dag_id(dag_id))
user = g.user
if action == permissions.ACTION_CAN_READ:
return any(self.get_readable_dags(user))
return any(self.get_editable_dags(user))
def can_read_dag(self, dag_id, user=None) -> bool:
"""Determines whether a user has DAG read access."""
if not user:
user = g.user
prefixed_dag_id = self.prefixed_dag_id(dag_id)
return self._has_view_access(
user, permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG
) or self._has_view_access(user, permissions.ACTION_CAN_READ, prefixed_dag_id)
def can_edit_dag(self, dag_id, user=None) -> bool:
"""Determines whether a user has DAG edit access."""
if not user:
user = g.user
prefixed_dag_id = self.prefixed_dag_id(dag_id)
return self._has_view_access(
user, permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG
) or self._has_view_access(user, permissions.ACTION_CAN_EDIT, prefixed_dag_id)
def prefixed_dag_id(self, dag_id):
"""Returns the permission name for a DAG id."""
if dag_id == permissions.RESOURCE_DAG:
return dag_id
if dag_id.startswith(permissions.RESOURCE_DAG_PREFIX):
return dag_id
return f"{permissions.RESOURCE_DAG_PREFIX}{dag_id}"
def is_dag_resource(self, resource_name):
"""Determines if a permission belongs to a DAG or all DAGs."""
if resource_name == permissions.RESOURCE_DAG:
return True
return resource_name.startswith(permissions.RESOURCE_DAG_PREFIX)
def has_access(self, permission, resource, user=None) -> bool:
"""
Verify whether a given user could perform certain permission
(e.g can_read, can_write) on the given resource.
:param permission: permission on resource (e.g can_read, can_edit).
:type permission: str
:param resource: name of view-menu or resource.
:type resource: str
:param user: user name
:type user: str
:return: a bool whether user could perform certain permission on the resource.
:rtype bool
"""
if not user:
user = g.user
if user.is_anonymous:
return self.is_item_public(permission, resource)
has_access = self._has_view_access(user, permission, resource)
# FAB built-in view access method. Won't work for AllDag access.
if self.is_dag_resource(resource):
if permission == permissions.ACTION_CAN_READ:
has_access |= self.can_read_dag(resource, user)
elif permission == permissions.ACTION_CAN_EDIT:
has_access |= self.can_edit_dag(resource, user)
return has_access
def _get_and_cache_perms(self):
"""Cache permissions-views"""
self.perms = self.get_all_permissions_views()
def _has_role(self, role_name_or_list):
"""Whether the user has this role name"""
if not isinstance(role_name_or_list, list):
role_name_or_list = [role_name_or_list]
return any(r.name in role_name_or_list for r in self.get_user_roles())
def _has_perm(self, permission_name, view_menu_name):
"""Whether the user has this perm"""
if hasattr(self, 'perms') and self.perms is not None:
if (permission_name, view_menu_name) in self.perms:
return True
# rebuild the permissions set
self._get_and_cache_perms()
return (permission_name, view_menu_name) in self.perms
def has_all_dags_access(self):
"""
Has all the dag access in any of the 3 cases:
1. Role needs to be in (Admin, Viewer, User, Op).
2. Has can_read permission on dags view.
3. Has can_edit permission on dags view.
"""
return (
self._has_role(['Admin', 'Viewer', 'Op', 'User'])
or self._has_perm(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG)
or self._has_perm(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG)
)
def clean_perms(self):
"""FAB leaves faulty permissions that need to be cleaned up"""
self.log.debug('Cleaning faulty perms')
sesh = self.get_session
pvms = sesh.query(sqla_models.PermissionView).filter(
or_(
sqla_models.PermissionView.permission == None, # noqa pylint: disable=singleton-comparison
sqla_models.PermissionView.view_menu == None, # noqa pylint: disable=singleton-comparison
)
)
# Since FAB doesn't define ON DELETE CASCADE on these tables, we need
# to delete the _object_ so that SQLA knows to delete the many-to-many
# relationship object too. :(
deleted_count = 0
for pvm in pvms:
sesh.delete(pvm)
deleted_count += 1
sesh.commit()
if deleted_count:
self.log.info('Deleted %s faulty permissions', deleted_count)
def _merge_perm(self, permission_name, view_menu_name):
"""
Add the new (permission, view_menu) to assoc_permissionview_role if it doesn't exist.
It will add the related entry to ab_permission
and ab_view_menu two meta tables as well.
:param permission_name: Name of the permission.
:type permission_name: str
:param view_menu_name: Name of the view-menu
:type view_menu_name: str
:return:
"""
permission = self.find_permission(permission_name)
view_menu = self.find_view_menu(view_menu_name)
permission_view = None
if permission and view_menu:
permission_view = (
self.get_session.query(self.permissionview_model)
.filter_by(permission=permission, view_menu=view_menu)
.first()
)
if not permission_view and permission_name and view_menu_name:
self.add_permission_view_menu(permission_name, view_menu_name)
@provide_session
def create_custom_dag_permission_view(self, session=None):
"""
Workflow:
1. Fetch all the existing (permissions, view-menu) from Airflow DB.
2. Fetch all the existing dag models that are either active or paused.
3. Create both read and write permission view-menus relation for every dags from step 2
4. Find out all the dag specific roles(excluded pubic, admin, viewer, op, user)
5. Get all the permission-vm owned by the user role.
6. Grant all the user role's permission-vm except the all-dag view-menus to the dag roles.
7. Commit the updated permission-vm-role into db
:return: None.
"""
self.log.debug('Fetching a set of all permission, view_menu from FAB meta-table')
def merge_pv(perm, view_menu):
"""Create permission view menu only if it doesn't exist"""
if view_menu and perm and (view_menu, perm) not in all_permission_views:
self._merge_perm(perm, view_menu)
all_permission_views = set()
for permission_view in self.get_session.query(self.permissionview_model).all():
if permission_view.permission and permission_view.view_menu:
all_permission_views.add((permission_view.permission.name, permission_view.view_menu.name))
# Get all the active / paused dags and insert them into a set
all_dags_models = (
session.query(models.DagModel)
.filter(or_(models.DagModel.is_active, models.DagModel.is_paused))
.all()
)
# create can_edit and can_read permissions for every dag(vm)
for dag in all_dags_models:
for perm in self.DAG_PERMS:
merge_pv(perm, self.prefixed_dag_id(dag.dag_id))
# for all the dag-level role, add the permission of viewer
# with the dag view to ab_permission_view
all_roles = self.get_all_roles()
user_role = self.find_role('User')
dag_role = [role for role in all_roles if role.name not in EXISTING_ROLES]
update_perm_views = []
# need to remove all_dag vm from all the existing view-menus
dag_vm = self.find_view_menu(permissions.RESOURCE_DAG)
ab_perm_view_role = sqla_models.assoc_permissionview_role
perm_view = self.permissionview_model
view_menu = self.viewmenu_model
all_perm_view_by_user = (
session.query(ab_perm_view_role)
.join(
perm_view,
perm_view.id == ab_perm_view_role.columns.permission_view_id, # pylint: disable=no-member
)
.filter(ab_perm_view_role.columns.role_id == user_role.id) # pylint: disable=no-member
.join(view_menu)
.filter(perm_view.view_menu_id != dag_vm.id)
)
all_perm_views = {role.permission_view_id for role in all_perm_view_by_user}
for role in dag_role:
# pylint: disable=no-member
# Get all the perm-view of the role
existing_perm_view_by_user = self.get_session.query(ab_perm_view_role).filter(
ab_perm_view_role.columns.role_id == role.id
)
existing_perms_views = {pv.permission_view_id for pv in existing_perm_view_by_user}
missing_perm_views = all_perm_views - existing_perms_views
for perm_view_id in missing_perm_views:
update_perm_views.append({'permission_view_id': perm_view_id, 'role_id': role.id})
if update_perm_views:
self.get_session.execute(
ab_perm_view_role.insert(), update_perm_views # pylint: disable=no-value-for-parameter
)
self.get_session.commit()
def update_admin_perm_view(self):
"""
Admin should have all the permission-views, except the dag views.
because Admin already has Dags permission.
Add the missing ones to the table for admin.
:return: None.
"""
dag_pvs = (
self.get_session.query(sqla_models.ViewMenu)
.filter(sqla_models.ViewMenu.name.like(f"{permissions.RESOURCE_DAG_PREFIX}%"))
.all()
)
pv_ids = [pv.id for pv in dag_pvs]
pvms = (
self.get_session.query(sqla_models.PermissionView)
.filter(~sqla_models.PermissionView.view_menu_id.in_(pv_ids))
.all()
)
pvms = [p for p in pvms if p.permission and p.view_menu]
admin = self.find_role('Admin')
admin.permissions = list(set(admin.permissions) | set(pvms))
self.get_session.commit()
def sync_roles(self):
"""
1. Init the default role(Admin, Viewer, User, Op, public)
with related permissions.
2. Init the custom role(dag-user) with related permissions.
:return: None.
"""
# Create global all-dag VM
self.create_perm_vm_for_all_dag()
# Create default user role.
for config in self.ROLE_CONFIGS:
role = config['role']
perms = config['perms']
self.init_role(role, perms)
self.create_custom_dag_permission_view()
# init existing roles, the rest role could be created through UI.
self.update_admin_perm_view()
self.clean_perms()
def sync_resource_permissions(self, perms=None):
"""Populates resource-based permissions."""
if not perms:
return
for action, resource in perms:
self.add_view_menu(resource)
self.add_permission_view_menu(action, resource)
def sync_perm_for_dag(self, dag_id, access_control=None):
"""
Sync permissions for given dag id. The dag id surely exists in our dag bag
as only / refresh button or cli.sync_perm will call this function
:param dag_id: the ID of the DAG whose permissions should be updated
:type dag_id: str
:param access_control: a dict where each key is a rolename and
each value is a set() of permission names (e.g.,
{'can_read'}
:type access_control: dict
:return:
"""
prefixed_dag_id = self.prefixed_dag_id(dag_id)
for dag_perm in self.DAG_PERMS:
perm_on_dag = self.find_permission_view_menu(dag_perm, prefixed_dag_id)
if perm_on_dag is None:
self.add_permission_view_menu(dag_perm, prefixed_dag_id)
if access_control:
self._sync_dag_view_permissions(prefixed_dag_id, access_control)
def _sync_dag_view_permissions(self, dag_id, access_control):
"""Set the access policy on the given DAG's ViewModel.
:param dag_id: the ID of the DAG whose permissions should be updated
:type dag_id: str
:param access_control: a dict where each key is a rolename and
each value is a set() of permission names (e.g.,
{'can_read'}
:type access_control: dict
"""
prefixed_dag_id = self.prefixed_dag_id(dag_id)
def _get_or_create_dag_permission(perm_name):
dag_perm = self.find_permission_view_menu(perm_name, prefixed_dag_id)
if not dag_perm:
self.log.info("Creating new permission '%s' on view '%s'", perm_name, prefixed_dag_id)
dag_perm = self.add_permission_view_menu(perm_name, prefixed_dag_id)
return dag_perm
def _revoke_stale_permissions(dag_view):
existing_dag_perms = self.find_permissions_view_menu(dag_view)
for perm in existing_dag_perms:
non_admin_roles = [role for role in perm.role if role.name != 'Admin']
for role in non_admin_roles:
target_perms_for_role = access_control.get(role.name, {})
if perm.permission.name not in target_perms_for_role:
self.log.info(
"Revoking '%s' on DAG '%s' for role '%s'",
perm.permission,
prefixed_dag_id,
role.name,
)
self.del_permission_role(role, perm)
dag_view = self.find_view_menu(prefixed_dag_id)
if dag_view:
_revoke_stale_permissions(dag_view)
for rolename, perms in access_control.items():
role = self.find_role(rolename)
if not role:
raise AirflowException(
"The access_control mapping for DAG '{}' includes a role "
"named '{}', but that role does not exist".format(dag_id, rolename)
)
perms = set(perms)
invalid_perms = perms - self.DAG_PERMS
if invalid_perms:
raise AirflowException(
"The access_control map for DAG '{}' includes the following "
"invalid permissions: {}; The set of valid permissions "
"is: {}".format(prefixed_dag_id, invalid_perms, self.DAG_PERMS)
)
for perm_name in perms:
dag_perm = _get_or_create_dag_permission(perm_name)
self.add_permission_role(role, dag_perm)
def create_perm_vm_for_all_dag(self):
"""Create perm-vm if not exist and insert into FAB security model for all-dags."""
# create perm for global logical dag
for dag_vm in self.DAG_VMS:
for perm in self.DAG_PERMS:
self._merge_perm(permission_name=perm, view_menu_name=dag_vm)
def check_authorization(
self, perms: Optional[Sequence[Tuple[str, str]]] = None, dag_id: Optional[str] = None
) -> bool:
"""Checks that the logged in user has the specified permissions."""
if not perms:
return True
for perm in perms:
if perm in (
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
):
can_access_all_dags = self.has_access(*perm)
if can_access_all_dags:
continue
action = perm[0]
if self.can_access_some_dags(action, dag_id):
continue
return False
elif not self.has_access(*perm):
return False
return True
|
airbnb/airflow
|
airflow/www/security.py
|
Python
|
apache-2.0
| 29,547
|
#!/usr/bin/env python
############################################################################
#
# Copyright 2019 Samsung Electronics All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
############################################################################
from __future__ import print_function
import os
import sys
import struct
import string
# Temporary file to estimate the static RAM size.
STATIC_RAM_ESTIMATION = 'temp_static_ram_estimation_file'
def roundup_power_two(size):
size = size - 1
size |= size >> 1
size |= size >> 2
size |= size >> 4
size |= size >> 8
size |= size >> 16
size = size + 1
return size
def check_optimize_config(file_name):
with open(file_name, 'r+') as f:
lines = f.readlines()
return any([True if 'CONFIG_OPTIMIZE_APP_RELOAD_TIME=y' in line and not line.startswith('#') else False for line in lines ])
def get_static_ram_size(bin_type):
textsize = 0
rosize = 0
datasize = 0
bsssize = 0
ram_fp = open(STATIC_RAM_ESTIMATION, 'rb')
if bin_type == BIN :
# First line is not used for calculating RAM size. So throw away it.
ram_fp.readline()
ram_array = ram_fp.readline()
size_array = ram_array.split('\t')
static_ram_size = size_array[SIZE_CMD_SUMMATION_INDEX]
elif bin_type == ELF :
line = ram_fp.readline()
while line:
words = line.split('.')
if len(words) > 1:
words = words[1].split()
if len(words) > 1:
section = words[0]
size = int(words[4], 16)
if section == 'text':
textsize = size
elif section == 'rodata':
rosize = size
elif section == 'data':
datasize = size
elif section == 'bss':
bsssize = size
break
line = ram_fp.readline()
# If CONFIG_OPTIMIZE_APP_RELOAD_TIME is enabled, then we will make a copy
# of the data section inside the ro section and it will be used in
# reload time. So, we add datasize to rosize to make place for data section.
cfg_path = os.getenv('TOPDIR') + '/.config'
if check_optimize_config(cfg_path) == True:
rosize = rosize + datasize;
rosize = roundup_power_two(rosize)
textsize = roundup_power_two(textsize)
static_ram_size = textsize + rosize + datasize + bsssize
else : #Not supported.
print("Error : Not supported Binary Type")
sys.exit(1)
ram_fp.close()
os.remove(STATIC_RAM_ESTIMATION)
return static_ram_size
############################################################################
#
# header information :
#
# total header size is 61bytes.
# +--------------------------------------------------------------------------
# | Header size | Binary type | Compression | Binary priority | Binary size|
# | (2bytes) | (1byte) | (1byte) | (1byte) | (4bytes) |
# +--------------------------------------------------------------------------
# ----------------------------------------------------------------------
# | Binary name | Binary version | Binary ram size | Binary stack size |
# | (16bytes) | (16bytes) | (4bytes) | (4bytes) |
# ----------------------------------------------------------------------
# ------------------------------ +
# | Kernel Version |Jump address |
# | (8bytes) | (4bytes) |
# -------------------------------+
#
# parameter information :
#
# argv[1] is file path of binary file.
# argv[2] is file type.
# argv[3] is kernel version.
# argv[4] is binary name.
# argv[5] is binary version.
# argv[6] is a dynamic ram size required to run this binary.
# argv[7] is main task stack size.
# argv[8] is main task priority.
# argv[9] is compression type
# argv[10] is block size for compression
#
############################################################################
file_path = sys.argv[1]
binary_type = sys.argv[2]
kernel_ver = sys.argv[3]
binary_name = sys.argv[4]
binary_ver = sys.argv[5]
dynamic_ram_size = sys.argv[6]
main_stack_size = sys.argv[7]
main_priority = sys.argv[8]
comp_enabled = sys.argv[9]
comp_blk_size = sys.argv[10]
# This path is only for dbuild.
elf_path_for_bin_type = 'root/tizenrt/build/output/bin/tinyara'
# Path to directory of this file
mkbinheader_path = os.path.dirname(__file__)
SIZE_OF_HEADERSIZE = 2
SIZE_OF_BINTYPE = 1
SIZE_OF_COMFLAG = 1
SIZE_OF_MAINPRIORITY = 1
SIZE_OF_BINSIZE = 4
SIZE_OF_BINNAME = 16
SIZE_OF_BINVER = 16
SIZE_OF_BINRAMSIZE = 4
SIZE_OF_MAINSTACKSIZE = 4
SIZE_OF_KERNELVER = 8
SIZE_OF_JUMPADDR = 4
header_size = SIZE_OF_HEADERSIZE + SIZE_OF_BINTYPE + SIZE_OF_COMFLAG + SIZE_OF_MAINPRIORITY + SIZE_OF_BINSIZE + SIZE_OF_BINNAME + SIZE_OF_BINVER + SIZE_OF_BINRAMSIZE + SIZE_OF_MAINSTACKSIZE + SIZE_OF_KERNELVER + SIZE_OF_JUMPADDR
ELF = 1
BIN = 2
COMP_NONE = 0
COMP_LZMA = 1
COMP_MINIZ = 2
COMP_MAX = COMP_MINIZ
# In size command on linux, 4th value is the summation of text, data and bss.
# We will use this value for elf.
SIZE_CMD_SUMMATION_INDEX = 3
if int(main_stack_size) >= int(dynamic_ram_size) :
print("Error : Dynamic ram size should be bigger than Main stack size.")
print("Dynamic ram size : %d, Main stack size : %d" %(int(dynamic_ram_size), int(main_stack_size)))
sys.exit(1)
with open(file_path, 'rb') as fp:
# binary data copy to 'data'
data = fp.read()
file_size = fp.tell()
fp.close()
if binary_type == 'bin' or binary_type == 'BIN' :
bin_type = BIN
elif binary_type == 'elf' or binary_type == 'ELF' :
bin_type = ELF
else : # Not supported.
bin_type = 0
print("Error : Not supported Binary Type")
sys.exit(1)
# Calculate RAM size
# Dynamic RAM size : user input of argv[6]
# Static RAM size : Extract from size command in linux(ONLY for elf)
if bin_type == BIN :
os.system('size ' + elf_path_for_bin_type + ' > ' + STATIC_RAM_ESTIMATION)
elif bin_type == ELF :
os.system('readelf -S ' + file_path + ' > ' + STATIC_RAM_ESTIMATION)
else : #Not supported.
print("Error : Not supported Binary Type")
sys.exit(1)
if 0 < int(main_priority) <= 255 :
main_priority = int(main_priority)
else :
print("Error : This binary priority is not valid")
sys.exit(1)
static_ram_size = get_static_ram_size(bin_type)
cfg_path = os.getenv('TOPDIR') + '/.config'
if check_optimize_config(cfg_path) == True:
binary_ram_size = int(dynamic_ram_size)
else:
binary_ram_size = int(static_ram_size) + int(dynamic_ram_size)
binary_ram_size = roundup_power_two(binary_ram_size)
# based on comp_enabled, check if we need to compress binary.
# If yes, assign to bin_comp value for compression algorithm to use.
# Else, assign 0 to bin_comp to represent no compression
if 0 < int(comp_enabled) <= COMP_MAX :
bin_comp = int(comp_enabled)
else :
bin_comp = 0
# Compress data according to Compression Algorithm represented by bin_comp
# Run mkcompressimg tool with provided options. Read output compressed file into data.
if bin_comp > COMP_NONE :
fp_tmp = open("tmp", 'wb+')
fp_tmp.write(data)
fp_tmp.close()
os.system(mkbinheader_path + '/compression/mkcompressimg ' + comp_blk_size + ' ' + comp_enabled + ' tmp' + ' tmp_comp')
fp_tmp = open("tmp_comp", 'rb')
data = fp_tmp.read()
file_size = fp_tmp.tell()
fp_tmp.close()
os.system('rm tmp tmp_comp')
fp = open(file_path, 'wb')
fp.write(struct.pack('H', header_size))
fp.write(struct.pack('B', bin_type))
fp.write(struct.pack('B', bin_comp))
fp.write(struct.pack('B', main_priority))
fp.write(struct.pack('I', file_size))
fp.write('{:{}{}.{}}'.format(binary_name, '<', SIZE_OF_BINNAME, SIZE_OF_BINNAME - 1).replace(' ','\0'))
fp.write('{:{}{}.{}}'.format(binary_ver, '<', SIZE_OF_BINVER, SIZE_OF_BINVER - 1).replace(' ','\0'))
fp.write(struct.pack('I', binary_ram_size))
fp.write(struct.pack('I', int(main_stack_size)))
fp.write('{:{}{}.{}}'.format(kernel_ver, '<', SIZE_OF_KERNELVER, SIZE_OF_KERNELVER - 1).replace(' ','\0'))
# parsing _vector_start address from elf information.
# _vector_start is only for ARM architecture. so it
# operates in the ARM architecture.
if bin_type == BIN :
os.system('readelf -s ' + elf_path_for_bin_type + ' | grep _vector_start > addr_file')
addr_fp = open("addr_file", 'rb')
jump_addr = addr_fp.read()
addr_fp.close()
os.remove('addr_file')
addr_data = jump_addr.split(' ')
addr_s = '0x%s' %addr_data[3]
addr = int(addr_s, 0)
else :
addr = 0
fp.write(struct.pack('I', addr))
fp.write(data)
fp.close()
|
chanijjani/TizenRT
|
os/tools/mkbinheader.py
|
Python
|
apache-2.0
| 9,562
|
from modularodm import Q
from rest_framework import generics, permissions as drf_permissions
from rest_framework.exceptions import ValidationError, NotFound, PermissionDenied
from framework.auth.oauth_scopes import CoreScopes
from api.base import generic_bulk_views as bulk_views
from api.base import permissions as base_permissions
from api.base.filters import ODMFilterMixin
from api.base.views import JSONAPIBaseView
from api.base.views import BaseLinkedList
from api.base.views import LinkedNodesRelationship
from api.base.views import LinkedRegistrationsRelationship
from api.base.utils import get_object_or_error, is_bulk_request, get_user_auth
from api.collections.serializers import (
CollectionSerializer,
CollectionDetailSerializer,
CollectionNodeLinkSerializer,
)
from api.nodes.serializers import NodeSerializer
from api.registrations.serializers import RegistrationSerializer
from api.nodes.permissions import (
ContributorOrPublic,
ReadOnlyIfRegistration,
ContributorOrPublicForPointers,
)
from website.exceptions import NodeStateError
from osf.models import Collection, NodeRelation
from website.util.permissions import ADMIN
class CollectionMixin(object):
"""Mixin with convenience methods for retrieving the current collection based on the
current URL. By default, fetches the current node based on the collection_id kwarg.
"""
serializer_class = CollectionSerializer
node_lookup_url_kwarg = 'collection_id'
def get_node(self, check_object_permissions=True):
node = get_object_or_error(
Collection,
self.kwargs[self.node_lookup_url_kwarg],
display_name='collection'
)
# Nodes that are folders/collections are treated as a separate resource, so if the client
# requests a non-collection through a collection endpoint, we return a 404
if not node.is_collection:
raise NotFound
# May raise a permission denied
if check_object_permissions:
self.check_object_permissions(self.request, node)
return node
class CollectionList(JSONAPIBaseView, bulk_views.BulkUpdateJSONAPIView, bulk_views.BulkDestroyJSONAPIView, bulk_views.ListBulkCreateJSONAPIView, ODMFilterMixin):
"""Organizer Collections organize projects and components. *Writeable*.
Paginated list of Project Organizer Collections ordered by their `date_modified`.
Each resource contains the full representation of the project organizer collection, meaning additional
requests to an individual Organizer Collection's detail view are not necessary.
The Project Organizer is a tool to allow the user to make Collections of projects, components, and registrations
for whatever purpose the user might want to organize them. They make node_links to any Node that a user has
read access to. Collections through this API do not nest. Currently Collections are private to any individual user,
though that could change one day.
##Collection Attributes
OSF Organizer Collection entities have the "nodes" `type`.
name type description
=================================================================================
title string title of Organizer Collection
date_created iso8601 timestamp timestamp that the collection was created
date_modified iso8601 timestamp timestamp when the collection was last updated
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Actions
###Creating New Organizer Collections
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "collections", # required
"attributes": {
"title": {title}, # required
}
}
}
Success: 201 CREATED + collection representation
New Organizer Collections are created by issuing a POST request to this endpoint. The `title` field is
mandatory. All other fields not listed above will be ignored. If the Organizer Collection creation is successful
the API will return a 201 response with the representation of the new node in the body.
For the new Collection's canonical URL, see the `/links/self` field of the response.
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
Organizer Collections may be filtered by their `title`, which is a string field and will be filtered using simple
substring matching.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.ORGANIZER_COLLECTIONS_BASE_READ]
required_write_scopes = [CoreScopes.ORGANIZER_COLLECTIONS_BASE_WRITE]
serializer_class = CollectionSerializer
view_category = 'collections'
view_name = 'collection-list'
model_class = Collection
ordering = ('-date_modified', ) # default ordering
# overrides ODMFilterMixin
def get_default_odm_query(self):
base_query = (
Q('is_deleted', 'ne', True)
)
user = self.request.user
if not user.is_anonymous:
permission_query = Q('creator', 'eq', user)
else:
permission_query = Q('is_public', 'eq', True)
query = base_query & permission_query
return query
# overrides ListBulkCreateJSONAPIView, BulkUpdateJSONAPIView
def get_queryset(self):
# For bulk requests, queryset is formed from request body.
if is_bulk_request(self.request):
query = Q('_id', 'in', [coll['id'] for coll in self.request.data])
auth = get_user_auth(self.request)
collections = Collection.find(query)
for collection in collections:
if not collection.can_edit(auth):
raise PermissionDenied
return collections
else:
query = self.get_query_from_request()
return Collection.find(query)
# overrides ListBulkCreateJSONAPIView, BulkUpdateJSONAPIView, BulkDestroyJSONAPIView
def get_serializer_class(self):
"""
Use CollectionDetailSerializer which requires 'id'
"""
if self.request.method in ('PUT', 'PATCH', 'DELETE'):
return CollectionDetailSerializer
else:
return CollectionSerializer
# overrides ListBulkCreateJSONAPIView
def perform_create(self, serializer):
"""Create a node.
:param serializer:
"""
# On creation, make sure that current user is the creator
user = self.request.user
serializer.save(creator=user)
# overrides BulkDestroyJSONAPIView
def allow_bulk_destroy_resources(self, user, resource_list):
"""User must have admin permissions to delete nodes."""
for node in resource_list:
if not node.has_permission(user, ADMIN):
return False
return True
# Overrides BulkDestroyJSONAPIView
def perform_destroy(self, instance):
auth = get_user_auth(self.request)
try:
instance.remove_node(auth=auth)
except NodeStateError as err:
raise ValidationError(err.message)
instance.save()
class CollectionDetail(JSONAPIBaseView, generics.RetrieveUpdateDestroyAPIView, CollectionMixin):
"""Details about Organizer Collections. *Writeable*.
The Project Organizer is a tool to allow the user to make Collections of projects, components, and registrations
for whatever purpose the user might want to organize them. They make node_links to any Node that a user has
read access to. Collections through this API do not nest. Currently Collections are private to any individual user,
though that could change one day.
##Collection Attributes
OSF Organizer Collection entities have the "nodes" `type`.
name type description
=================================================================================
title string title of Organizer Collection
date_created iso8601 timestamp timestamp that the collection was created
date_modified iso8601 timestamp timestamp when the collection was last updated
##Relationships
###Node links
Node links are pointers or aliases to nodes. This relationship lists all of the nodes that the Organizer Collection
is pointing to. New node links can be created with this collection.
##Links
self: the canonical api endpoint of this node
html: this node's page on the OSF website
##Actions
###Update
Method: PUT / PATCH
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "nodes", # required
"id": {node_id}, # required
"attributes": {
"title": {title}, # mandatory
}
}
}
Success: 200 OK + node representation
To update an Organizer Collection, issue either a PUT or a PATCH request against the `/links/self` URL.
The `title` field is mandatory if you PUT and optional if you PATCH, though there's no reason to PATCH if you aren't
changing the name. Non-string values will be accepted and stringified, but we make no promises about the
stringification output. So don't do that.
###Delete
Method: DELETE
URL: /links/self
Params: <none>
Success: 204 No Content
To delete a node, issue a DELETE request against `/links/self`. A successful delete will return a 204 No Content
response. Attempting to delete a node you do not own will result in a 403 Forbidden.
##Query Params
*None*.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
ContributorOrPublic,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.ORGANIZER_COLLECTIONS_BASE_READ]
required_write_scopes = [CoreScopes.ORGANIZER_COLLECTIONS_BASE_WRITE]
serializer_class = CollectionDetailSerializer
view_category = 'collections'
view_name = 'collection-detail'
# overrides RetrieveUpdateDestroyAPIView
def get_object(self):
return self.get_node()
# overrides RetrieveUpdateDestroyAPIView
def perform_destroy(self, instance):
auth = get_user_auth(self.request)
node = self.get_object()
try:
node.remove_node(auth=auth)
except NodeStateError as err:
raise ValidationError(err.message)
node.save()
class LinkedNodesList(BaseLinkedList, CollectionMixin):
"""List of nodes linked to this node. *Read-only*.
Linked nodes are the project/component nodes pointed to by node links. This view will probably replace node_links in the near future.
<!--- Copied Spiel from NodeDetail -->
On the front end, nodes are considered 'projects' or 'components'. The difference between a project and a component
is that a project is the top-level node, and components are children of the project. There is also a [category
field](/v2/#osf-node-categories) that includes 'project' as an option. The categorization essentially determines
which icon is displayed by the node in the front-end UI and helps with search organization. Top-level nodes may have
a category other than project, and children nodes may have a category of project.
##Linked Node Attributes
<!--- Copied Attributes from NodeDetail -->
OSF Node entities have the "nodes" `type`.
name type description
=================================================================================
title string title of project or component
description string description of the node
category string node category, must be one of the allowed values
date_created iso8601 timestamp timestamp that the node was created
date_modified iso8601 timestamp timestamp when the node was last updated
tags array of strings list of tags that describe the node
registration boolean is this is a registration?
collection boolean is this node a collection of other nodes?
public boolean has this node been made publicly-visible?
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
Nodes may be filtered by their `title`, `category`, `description`, `public`, `registration`, or `tags`. `title`,
`description`, and `category` are string fields and will be filtered using simple substring matching. `public` and
`registration` are booleans, and can be filtered using truthy values, such as `true`, `false`, `0`, or `1`. Note
that quoting `true` or `false` in the query will cause the match to fail regardless. `tags` is an array of simple strings.
#This Request/Response
"""
serializer_class = NodeSerializer
view_category = 'collections'
view_name = 'linked-nodes'
ordering = ('-date_modified',)
def get_queryset(self):
return super(LinkedNodesList, self).get_queryset().exclude(type='osf.registration')
# overrides APIView
def get_parser_context(self, http_request):
"""
Tells parser that we are creating a relationship
"""
res = super(LinkedNodesList, self).get_parser_context(http_request)
res['is_relationship'] = True
return res
class LinkedRegistrationsList(BaseLinkedList, CollectionMixin):
"""List of registrations linked to this node. *Read-only*.
Linked registrations are the registration nodes pointed to by node links.
<!--- Copied Spiel from RegistrationDetail -->
Registrations are read-only snapshots of a project. This view shows details about the given registration.
Each resource contains the full representation of the registration, meaning additional requests to an individual
registration's detail view are not necessary. A withdrawn registration will display a limited subset of information,
namely, title, description, date_created, registration, withdrawn, date_registered, withdrawal_justification, and
registration supplement. All other fields will be displayed as null. Additionally, the only relationships permitted
to be accessed for a withdrawn registration are the contributors - other relationships will return a 403.
##Linked Registration Attributes
<!--- Copied Attributes from RegistrationDetail -->
Registrations have the "registrations" `type`.
name type description
=======================================================================================================
title string title of the registered project or component
description string description of the registered node
category string bode category, must be one of the allowed values
date_created iso8601 timestamp timestamp that the node was created
date_modified iso8601 timestamp timestamp when the node was last updated
tags array of strings list of tags that describe the registered node
current_user_can_comment boolean Whether the current user is allowed to post comments
current_user_permissions array of strings list of strings representing the permissions for the current user on this node
fork boolean is this project a fork?
registration boolean has this project been registered? (always true - may be deprecated in future versions)
collection boolean is this registered node a collection? (always false - may be deprecated in future versions)
node_license object details of the license applied to the node
year string date range of the license
copyright_holders array of strings holders of the applied license
public boolean has this registration been made publicly-visible?
withdrawn boolean has this registration been withdrawn?
date_registered iso8601 timestamp timestamp that the registration was created
embargo_end_date iso8601 timestamp when the embargo on this registration will be lifted (if applicable)
withdrawal_justification string reasons for withdrawing the registration
pending_withdrawal boolean is this registration pending withdrawal?
pending_withdrawal_approval boolean is this registration pending approval?
pending_embargo_approval boolean is the associated Embargo awaiting approval by project admins?
registered_meta dictionary registration supplementary information
registration_supplement string registration template
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
Nodes may be filtered by their `title`, `category`, `description`, `public`, `registration`, or `tags`. `title`,
`description`, and `category` are string fields and will be filtered using simple substring matching. `public` and
`registration` are booleans, and can be filtered using truthy values, such as `true`, `false`, `0`, or `1`. Note
that quoting `true` or `false` in the query will cause the match to fail regardless. `tags` is an array of simple strings.
#This Request/Response
"""
serializer_class = RegistrationSerializer
view_category = 'collections'
view_name = 'linked-registrations'
ordering = ('-date_modified',)
def get_queryset(self):
return super(LinkedRegistrationsList, self).get_queryset().filter(type='osf.registration')
# overrides APIView
def get_parser_context(self, http_request):
"""
Tells parser that we are creating a relationship
"""
res = super(LinkedRegistrationsList, self).get_parser_context(http_request)
res['is_relationship'] = True
return res
class NodeLinksList(JSONAPIBaseView, bulk_views.BulkDestroyJSONAPIView, bulk_views.ListBulkCreateJSONAPIView, CollectionMixin):
"""Node Links to other nodes. *Writeable*.
# Deprecated
The use of /collections/ID/node_links/ is deprecated in favor of linked_nodes, linked_registrations or a combination of both.
## Known Issue
Linked nodes of type 'registrations' will be returned with an error 'Not found.' in the {embeds: target_node: {}} object.
Node Links act as pointers to other nodes. Unlike Forks, they are not copies of nodes;
Node Links are a direct reference to the node that they point to.
##Node Link Attributes
*None*
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Actions
###Create
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "node_links", # required
},
'relationships': {
'target_node': {
'data': {
'type': 'nodes',
'id': '<node_id>'
}
}
}
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
ContributorOrPublic,
ReadOnlyIfRegistration,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_LINKS_READ]
required_write_scopes = [CoreScopes.NODE_LINKS_WRITE]
serializer_class = CollectionNodeLinkSerializer
view_category = 'collections'
view_name = 'node-pointers'
model_class = NodeRelation
ordering = ('-date_modified',)
def get_queryset(self):
return self.get_node().node_relations.select_related('child').filter(child__is_deleted=False).exclude(child__type='osf.collection')
# Overrides BulkDestroyJSONAPIView
def perform_destroy(self, instance):
auth = get_user_auth(self.request)
node = self.get_node()
try:
node.rm_pointer(instance, auth=auth)
except ValueError as err: # pointer doesn't belong to node
raise ValidationError(err.message)
node.save()
# overrides ListCreateAPIView
def get_parser_context(self, http_request):
"""
Tells parser that we are creating a relationship
"""
res = super(NodeLinksList, self).get_parser_context(http_request)
res['is_relationship'] = True
return res
class NodeLinksDetail(JSONAPIBaseView, generics.RetrieveDestroyAPIView, CollectionMixin):
"""Node Link details. *Writeable*.
Node Links act as pointers to other nodes. Unlike Forks, they are not copies of nodes;
Node Links are a direct reference to the node that they point to.
##Attributes
*None*
##Relationships
##Links
self: the canonical api endpoint of this node
##Actions
###Delete
Method: DELETE
URL: /links/self
Params: <none>
Success: 204 No Content
To delete a node_link, issue a DELETE request against `/links/self`. A successful delete will return a 204 No Content
response. Attempting to delete a node you do not own will result in a 403 Forbidden.
##Query Params
*None*.
#This Request/Response
"""
permission_classes = (
ContributorOrPublicForPointers,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ReadOnlyIfRegistration,
)
required_read_scopes = [CoreScopes.NODE_LINKS_READ]
required_write_scopes = [CoreScopes.NODE_LINKS_WRITE]
serializer_class = CollectionNodeLinkSerializer
view_category = 'nodes'
view_name = 'node-pointer-detail'
# overrides RetrieveAPIView
def get_object(self):
node_link_lookup_url_kwarg = 'node_link_id'
node_link = get_object_or_error(
NodeRelation,
self.kwargs[node_link_lookup_url_kwarg],
'node link'
)
# May raise a permission denied
self.kwargs['node_id'] = self.kwargs['collection_id']
self.check_object_permissions(self.request, node_link)
return node_link
# overrides DestroyAPIView
def perform_destroy(self, instance):
auth = get_user_auth(self.request)
node = self.get_node()
pointer = self.get_object()
try:
node.rm_pointer(pointer, auth=auth)
except ValueError as err: # pointer doesn't belong to node
raise ValidationError(err.message)
node.save()
class CollectionLinkedNodesRelationship(LinkedNodesRelationship, CollectionMixin):
""" Relationship Endpoint for Collection -> Linked Node relationships
Used to set, remove, update and retrieve the ids of the linked nodes attached to this collection. For each id, there
exists a node link that contains that node.
##Actions
###Create
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_nodes", # required
"id": <node_id> # required
}]
}
Success: 201
This requires both edit permission on the collection, and for the user that is
making the request to be able to read the nodes requested. Data can be contain any number of
node identifiers. This will create a node_link for all node_ids in the request that
do not currently have a corresponding node_link in this collection.
###Update
Method: PUT || PATCH
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_nodes", # required
"id": <node_id> # required
}]
}
Success: 200
This requires both edit permission on the collection, and for the user that is
making the request to be able to read the nodes requested. Data can be contain any number of
node identifiers. This will replace the contents of the node_links for this collection with
the contents of the request. It will delete all node links that don't have a node_id in the data
array, create node links for the node_ids that don't currently have a node id, and do nothing
for node_ids that already have a corresponding node_link. This means a update request with
{"data": []} will remove all node_links in this collection
###Destroy
Method: DELETE
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_nodes", # required
"id": <node_id> # required
}]
}
Success: 204
This requires edit permission on the node. This will delete any node_links that have a
corresponding node_id in the request.
"""
view_category = 'collections'
view_name = 'collection-node-pointer-relationship'
class CollectionLinkedRegistrationsRelationship(LinkedRegistrationsRelationship, CollectionMixin):
""" Relationship Endpoint for Collection -> Linked Registration relationships
Used to set, remove, update and retrieve the ids of the linked registrations attached to this collection. For each id, there
exists a node link that contains that node.
##Actions
###Create
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_registrations", # required
"id": <node_id> # required
}]
}
Success: 201
This requires both edit permission on the collection, and for the user that is
making the request to be able to read the registrations requested. Data can contain any number of
node identifiers. This will create a node_link for all node_ids in the request that
do not currently have a corresponding node_link in this collection.
###Update
Method: PUT || PATCH
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_regisrations", # required
"id": <node_id> # required
}]
}
Success: 200
This requires both edit permission on the collection, and for the user that is
making the request to be able to read the registrations requested. Data can contain any number of
node identifiers. This will replace the contents of the node_links for this collection with
the contents of the request. It will delete all node links that don't have a node_id in the data
array, create node links for the node_ids that don't currently have a node id, and do nothing
for node_ids that already have a corresponding node_link. This means a update request with
{"data": []} will remove all node_links in this collection
###Destroy
Method: DELETE
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_registrations", # required
"id": <node_id> # required
}]
}
Success: 204
This requires edit permission on the node. This will delete any node_links that have a
corresponding node_id in the request.
"""
view_category = 'collections'
view_name = 'collection-registration-pointer-relationship'
|
caneruguz/osf.io
|
api/collections/views.py
|
Python
|
apache-2.0
| 30,089
|
# -*- coding: utf-8 -*-
import pytest
import threading
import requests
from tests.testserver.server import Server, consume_socket_content
from .utils import override_environ
def echo_response_handler(sock):
"""Simple handler that will take request and echo it back to requester."""
request_content = consume_socket_content(sock, timeout=0.5)
text_200 = (
b'HTTP/1.1 200 OK\r\n'
b'Content-Length: %d\r\n\r\n'
b'%s'
) % (len(request_content), request_content)
sock.send(text_200)
def test_chunked_upload():
"""can safely send generators"""
close_server = threading.Event()
server = Server.basic_response_server(wait_to_close_event=close_server)
data = iter([b'a', b'b', b'c'])
with server as (host, port):
url = 'http://{}:{}/'.format(host, port)
r = requests.post(url, data=data, stream=True)
close_server.set() # release server block
assert r.status_code == 200
assert r.request.headers['Transfer-Encoding'] == 'chunked'
def test_chunked_encoding_error():
"""get a ChunkedEncodingError if the server returns a bad response"""
def incomplete_chunked_response_handler(sock):
request_content = consume_socket_content(sock, timeout=0.5)
# The server never ends the request and doesn't provide any valid chunks
sock.send(b"HTTP/1.1 200 OK\r\n" +
b"Transfer-Encoding: chunked\r\n")
return request_content
close_server = threading.Event()
server = Server(incomplete_chunked_response_handler)
with server as (host, port):
url = 'http://{}:{}/'.format(host, port)
with pytest.raises(requests.exceptions.ChunkedEncodingError):
r = requests.get(url)
close_server.set() # release server block
def test_chunked_upload_uses_only_specified_host_header():
"""Ensure we use only the specified Host header for chunked requests."""
close_server = threading.Event()
server = Server(echo_response_handler, wait_to_close_event=close_server)
data = iter([b'a', b'b', b'c'])
custom_host = 'sample-host'
with server as (host, port):
url = 'http://{}:{}/'.format(host, port)
r = requests.post(url, data=data, headers={'Host': custom_host}, stream=True)
close_server.set() # release server block
expected_header = b'Host: %s\r\n' % custom_host.encode('utf-8')
assert expected_header in r.content
assert r.content.count(b'Host: ') == 1
def test_chunked_upload_doesnt_skip_host_header():
"""Ensure we don't omit all Host headers with chunked requests."""
close_server = threading.Event()
server = Server(echo_response_handler, wait_to_close_event=close_server)
data = iter([b'a', b'b', b'c'])
with server as (host, port):
expected_host = '{}:{}'.format(host, port)
url = 'http://{}:{}/'.format(host, port)
r = requests.post(url, data=data, stream=True)
close_server.set() # release server block
expected_header = b'Host: %s\r\n' % expected_host.encode('utf-8')
assert expected_header in r.content
assert r.content.count(b'Host: ') == 1
def test_conflicting_content_lengths():
"""Ensure we correctly throw an InvalidHeader error if multiple
conflicting Content-Length headers are returned.
"""
def multiple_content_length_response_handler(sock):
request_content = consume_socket_content(sock, timeout=0.5)
sock.send(b"HTTP/1.1 200 OK\r\n" +
b"Content-Type: text/plain\r\n" +
b"Content-Length: 16\r\n" +
b"Content-Length: 32\r\n\r\n" +
b"-- Bad Actor -- Original Content\r\n")
return request_content
close_server = threading.Event()
server = Server(multiple_content_length_response_handler)
with server as (host, port):
url = 'http://{}:{}/'.format(host, port)
with pytest.raises(requests.exceptions.InvalidHeader):
r = requests.get(url)
close_server.set()
def test_digestauth_401_count_reset_on_redirect():
"""Ensure we correctly reset num_401_calls after a successful digest auth,
followed by a 302 redirect to another digest auth prompt.
See https://github.com/psf/requests/issues/1979.
"""
text_401 = (b'HTTP/1.1 401 UNAUTHORIZED\r\n'
b'Content-Length: 0\r\n'
b'WWW-Authenticate: Digest nonce="6bf5d6e4da1ce66918800195d6b9130d"'
b', opaque="372825293d1c26955496c80ed6426e9e", '
b'realm="me@kennethreitz.com", qop=auth\r\n\r\n')
text_302 = (b'HTTP/1.1 302 FOUND\r\n'
b'Content-Length: 0\r\n'
b'Location: /\r\n\r\n')
text_200 = (b'HTTP/1.1 200 OK\r\n'
b'Content-Length: 0\r\n\r\n')
expected_digest = (b'Authorization: Digest username="user", '
b'realm="me@kennethreitz.com", '
b'nonce="6bf5d6e4da1ce66918800195d6b9130d", uri="/"')
auth = requests.auth.HTTPDigestAuth('user', 'pass')
def digest_response_handler(sock):
# Respond to initial GET with a challenge.
request_content = consume_socket_content(sock, timeout=0.5)
assert request_content.startswith(b"GET / HTTP/1.1")
sock.send(text_401)
# Verify we receive an Authorization header in response, then redirect.
request_content = consume_socket_content(sock, timeout=0.5)
assert expected_digest in request_content
sock.send(text_302)
# Verify Authorization isn't sent to the redirected host,
# then send another challenge.
request_content = consume_socket_content(sock, timeout=0.5)
assert b'Authorization:' not in request_content
sock.send(text_401)
# Verify Authorization is sent correctly again, and return 200 OK.
request_content = consume_socket_content(sock, timeout=0.5)
assert expected_digest in request_content
sock.send(text_200)
return request_content
close_server = threading.Event()
server = Server(digest_response_handler, wait_to_close_event=close_server)
with server as (host, port):
url = 'http://{}:{}/'.format(host, port)
r = requests.get(url, auth=auth)
# Verify server succeeded in authenticating.
assert r.status_code == 200
# Verify Authorization was sent in final request.
assert 'Authorization' in r.request.headers
assert r.request.headers['Authorization'].startswith('Digest ')
# Verify redirect happened as we expected.
assert r.history[0].status_code == 302
close_server.set()
def test_digestauth_401_only_sent_once():
"""Ensure we correctly respond to a 401 challenge once, and then
stop responding if challenged again.
"""
text_401 = (b'HTTP/1.1 401 UNAUTHORIZED\r\n'
b'Content-Length: 0\r\n'
b'WWW-Authenticate: Digest nonce="6bf5d6e4da1ce66918800195d6b9130d"'
b', opaque="372825293d1c26955496c80ed6426e9e", '
b'realm="me@kennethreitz.com", qop=auth\r\n\r\n')
expected_digest = (b'Authorization: Digest username="user", '
b'realm="me@kennethreitz.com", '
b'nonce="6bf5d6e4da1ce66918800195d6b9130d", uri="/"')
auth = requests.auth.HTTPDigestAuth('user', 'pass')
def digest_failed_response_handler(sock):
# Respond to initial GET with a challenge.
request_content = consume_socket_content(sock, timeout=0.5)
assert request_content.startswith(b"GET / HTTP/1.1")
sock.send(text_401)
# Verify we receive an Authorization header in response, then
# challenge again.
request_content = consume_socket_content(sock, timeout=0.5)
assert expected_digest in request_content
sock.send(text_401)
# Verify the client didn't respond to second challenge.
request_content = consume_socket_content(sock, timeout=0.5)
assert request_content == b''
return request_content
close_server = threading.Event()
server = Server(digest_failed_response_handler, wait_to_close_event=close_server)
with server as (host, port):
url = 'http://{}:{}/'.format(host, port)
r = requests.get(url, auth=auth)
# Verify server didn't authenticate us.
assert r.status_code == 401
assert r.history[0].status_code == 401
close_server.set()
def test_digestauth_only_on_4xx():
"""Ensure we only send digestauth on 4xx challenges.
See https://github.com/psf/requests/issues/3772.
"""
text_200_chal = (b'HTTP/1.1 200 OK\r\n'
b'Content-Length: 0\r\n'
b'WWW-Authenticate: Digest nonce="6bf5d6e4da1ce66918800195d6b9130d"'
b', opaque="372825293d1c26955496c80ed6426e9e", '
b'realm="me@kennethreitz.com", qop=auth\r\n\r\n')
auth = requests.auth.HTTPDigestAuth('user', 'pass')
def digest_response_handler(sock):
# Respond to GET with a 200 containing www-authenticate header.
request_content = consume_socket_content(sock, timeout=0.5)
assert request_content.startswith(b"GET / HTTP/1.1")
sock.send(text_200_chal)
# Verify the client didn't respond with auth.
request_content = consume_socket_content(sock, timeout=0.5)
assert request_content == b''
return request_content
close_server = threading.Event()
server = Server(digest_response_handler, wait_to_close_event=close_server)
with server as (host, port):
url = 'http://{}:{}/'.format(host, port)
r = requests.get(url, auth=auth)
# Verify server didn't receive auth from us.
assert r.status_code == 200
assert len(r.history) == 0
close_server.set()
_schemes_by_var_prefix = [
('http', ['http']),
('https', ['https']),
('all', ['http', 'https']),
]
_proxy_combos = []
for prefix, schemes in _schemes_by_var_prefix:
for scheme in schemes:
_proxy_combos.append(("{}_proxy".format(prefix), scheme))
_proxy_combos += [(var.upper(), scheme) for var, scheme in _proxy_combos]
@pytest.mark.parametrize("var,scheme", _proxy_combos)
def test_use_proxy_from_environment(httpbin, var, scheme):
url = "{}://httpbin.org".format(scheme)
fake_proxy = Server() # do nothing with the requests; just close the socket
with fake_proxy as (host, port):
proxy_url = "socks5://{}:{}".format(host, port)
kwargs = {var: proxy_url}
with override_environ(**kwargs):
# fake proxy's lack of response will cause a ConnectionError
with pytest.raises(requests.exceptions.ConnectionError):
requests.get(url)
# the fake proxy received a request
assert len(fake_proxy.handler_results) == 1
# it had actual content (not checking for SOCKS protocol for now)
assert len(fake_proxy.handler_results[0]) > 0
def test_redirect_rfc1808_to_non_ascii_location():
path = u'š'
expected_path = b'%C5%A1'
redirect_request = [] # stores the second request to the server
def redirect_resp_handler(sock):
consume_socket_content(sock, timeout=0.5)
location = u'//{}:{}/{}'.format(host, port, path)
sock.send(
b'HTTP/1.1 301 Moved Permanently\r\n'
b'Content-Length: 0\r\n'
b'Location: ' + location.encode('utf8') + b'\r\n'
b'\r\n'
)
redirect_request.append(consume_socket_content(sock, timeout=0.5))
sock.send(b'HTTP/1.1 200 OK\r\n\r\n')
close_server = threading.Event()
server = Server(redirect_resp_handler, wait_to_close_event=close_server)
with server as (host, port):
url = u'http://{}:{}'.format(host, port)
r = requests.get(url=url, allow_redirects=True)
assert r.status_code == 200
assert len(r.history) == 1
assert r.history[0].status_code == 301
assert redirect_request[0].startswith(b'GET /' + expected_path + b' HTTP/1.1')
assert r.url == u'{}/{}'.format(url, expected_path.decode('ascii'))
close_server.set()
def test_fragment_not_sent_with_request():
"""Verify that the fragment portion of a URI isn't sent to the server."""
def response_handler(sock):
req = consume_socket_content(sock, timeout=0.5)
sock.send(
b'HTTP/1.1 200 OK\r\n'
b'Content-Length: '+bytes(len(req))+b'\r\n'
b'\r\n'+req
)
close_server = threading.Event()
server = Server(response_handler, wait_to_close_event=close_server)
with server as (host, port):
url = 'http://{}:{}/path/to/thing/#view=edit&token=hunter2'.format(host, port)
r = requests.get(url)
raw_request = r.content
assert r.status_code == 200
headers, body = raw_request.split(b'\r\n\r\n', 1)
status_line, headers = headers.split(b'\r\n', 1)
assert status_line == b'GET /path/to/thing/ HTTP/1.1'
for frag in (b'view', b'edit', b'token', b'hunter2'):
assert frag not in headers
assert frag not in body
close_server.set()
def test_fragment_update_on_redirect():
"""Verify we only append previous fragment if one doesn't exist on new
location. If a new fragment is encountered in a Location header, it should
be added to all subsequent requests.
"""
def response_handler(sock):
consume_socket_content(sock, timeout=0.5)
sock.send(
b'HTTP/1.1 302 FOUND\r\n'
b'Content-Length: 0\r\n'
b'Location: /get#relevant-section\r\n\r\n'
)
consume_socket_content(sock, timeout=0.5)
sock.send(
b'HTTP/1.1 302 FOUND\r\n'
b'Content-Length: 0\r\n'
b'Location: /final-url/\r\n\r\n'
)
consume_socket_content(sock, timeout=0.5)
sock.send(
b'HTTP/1.1 200 OK\r\n\r\n'
)
close_server = threading.Event()
server = Server(response_handler, wait_to_close_event=close_server)
with server as (host, port):
url = 'http://{}:{}/path/to/thing/#view=edit&token=hunter2'.format(host, port)
r = requests.get(url)
raw_request = r.content
assert r.status_code == 200
assert len(r.history) == 2
assert r.history[0].request.url == url
# Verify we haven't overwritten the location with our previous fragment.
assert r.history[1].request.url == 'http://{}:{}/get#relevant-section'.format(host, port)
# Verify previous fragment is used and not the original.
assert r.url == 'http://{}:{}/final-url/#relevant-section'.format(host, port)
close_server.set()
|
psf/requests
|
tests/test_lowlevel.py
|
Python
|
apache-2.0
| 14,885
|
# coding: utf-8
"""
Kubeflow Pipelines API
This file contains REST API specification for Kubeflow Pipelines. The file is autogenerated from the swagger definition.
Contact: kubeflow-pipelines@google.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kfp_server_api.configuration import Configuration
class ReportRunMetricsResponseReportRunMetricResult(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'metric_name': 'str',
'metric_node_id': 'str',
'status': 'ReportRunMetricsResponseReportRunMetricResultStatus',
'message': 'str'
}
attribute_map = {
'metric_name': 'metric_name',
'metric_node_id': 'metric_node_id',
'status': 'status',
'message': 'message'
}
def __init__(self, metric_name=None, metric_node_id=None, status=None, message=None, local_vars_configuration=None): # noqa: E501
"""ReportRunMetricsResponseReportRunMetricResult - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._metric_name = None
self._metric_node_id = None
self._status = None
self._message = None
self.discriminator = None
if metric_name is not None:
self.metric_name = metric_name
if metric_node_id is not None:
self.metric_node_id = metric_node_id
if status is not None:
self.status = status
if message is not None:
self.message = message
@property
def metric_name(self):
"""Gets the metric_name of this ReportRunMetricsResponseReportRunMetricResult. # noqa: E501
Output. The name of the metric. # noqa: E501
:return: The metric_name of this ReportRunMetricsResponseReportRunMetricResult. # noqa: E501
:rtype: str
"""
return self._metric_name
@metric_name.setter
def metric_name(self, metric_name):
"""Sets the metric_name of this ReportRunMetricsResponseReportRunMetricResult.
Output. The name of the metric. # noqa: E501
:param metric_name: The metric_name of this ReportRunMetricsResponseReportRunMetricResult. # noqa: E501
:type metric_name: str
"""
self._metric_name = metric_name
@property
def metric_node_id(self):
"""Gets the metric_node_id of this ReportRunMetricsResponseReportRunMetricResult. # noqa: E501
Output. The ID of the node which reports the metric. # noqa: E501
:return: The metric_node_id of this ReportRunMetricsResponseReportRunMetricResult. # noqa: E501
:rtype: str
"""
return self._metric_node_id
@metric_node_id.setter
def metric_node_id(self, metric_node_id):
"""Sets the metric_node_id of this ReportRunMetricsResponseReportRunMetricResult.
Output. The ID of the node which reports the metric. # noqa: E501
:param metric_node_id: The metric_node_id of this ReportRunMetricsResponseReportRunMetricResult. # noqa: E501
:type metric_node_id: str
"""
self._metric_node_id = metric_node_id
@property
def status(self):
"""Gets the status of this ReportRunMetricsResponseReportRunMetricResult. # noqa: E501
:return: The status of this ReportRunMetricsResponseReportRunMetricResult. # noqa: E501
:rtype: ReportRunMetricsResponseReportRunMetricResultStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ReportRunMetricsResponseReportRunMetricResult.
:param status: The status of this ReportRunMetricsResponseReportRunMetricResult. # noqa: E501
:type status: ReportRunMetricsResponseReportRunMetricResultStatus
"""
self._status = status
@property
def message(self):
"""Gets the message of this ReportRunMetricsResponseReportRunMetricResult. # noqa: E501
Output. The detailed message of the error of the reporting. # noqa: E501
:return: The message of this ReportRunMetricsResponseReportRunMetricResult. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this ReportRunMetricsResponseReportRunMetricResult.
Output. The detailed message of the error of the reporting. # noqa: E501
:param message: The message of this ReportRunMetricsResponseReportRunMetricResult. # noqa: E501
:type message: str
"""
self._message = message
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ReportRunMetricsResponseReportRunMetricResult):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ReportRunMetricsResponseReportRunMetricResult):
return True
return self.to_dict() != other.to_dict()
|
kubeflow/pipelines
|
backend/api/python_http_client/kfp_server_api/models/report_run_metrics_response_report_run_metric_result.py
|
Python
|
apache-2.0
| 6,705
|
# -*- coding: utf-8 -*-
"""SQLite database plugin related functions and classes for testing."""
import os
from plaso.parsers import sqlite
from plaso.storage.fake import writer as fake_writer
from tests.parsers import test_lib
class SQLitePluginTestCase(test_lib.ParserTestCase):
"""SQLite database plugin test case."""
def _OpenDatabaseFile(self, path_segments, wal_path_segments=None):
"""Opens a SQLite database file.
Args:
path_segments (list[str]): path segments inside the test data directory.
wal_path_segments (list[str]): path segments inside the test data
directory of the SQLite WAL file.
Returns:
tuple: containing:
file_entry (dfvfs.FileEntry): file entry of the SQLite database file.
SQLiteDatabase: SQLite database file.
Raises:
SkipTest: if the path inside the test data directory does not exist and
the test should be skipped.
"""
file_entry = self._GetTestFileEntry(path_segments)
wal_file_entry = None
if wal_path_segments:
wal_file_entry = self._GetTestFileEntry(wal_path_segments)
database = sqlite.SQLiteDatabase(file_entry.name)
file_object = file_entry.GetFileObject()
if not wal_file_entry:
database.Open(file_object)
else:
wal_file_object = wal_file_entry.GetFileObject()
# Seek file_object to 0 so we can re-open the database with WAL file.
file_object.seek(0, os.SEEK_SET)
database.Open(file_object, wal_file_object=wal_file_object)
return file_entry, database
def _ParseDatabaseFileWithPlugin(
self, path_segments, plugin, knowledge_base_values=None,
wal_path_segments=None):
"""Parses a file as a SQLite database with a specific plugin.
This method will first test if a SQLite database contains the required
tables and columns using plugin.CheckRequiredTablesAndColumns() and then
extracts events using plugin.Process().
Args:
path_segments (list[str]): path segments inside the test data directory.
plugin (SQLitePlugin): SQLite database plugin.
knowledge_base_values (Optional[dict[str, object]]): knowledge base
values.
wal_path_segments (list[str]): path segments inside the test data
directory of the SQLite WAL file.
Returns:
FakeStorageWriter: storage writer.
Raises:
SkipTest: if the path inside the test data directory does not exist and
the test should be skipped.
"""
storage_writer = fake_writer.FakeStorageWriter()
storage_writer.Open()
file_entry, database = self._OpenDatabaseFile(
path_segments, wal_path_segments=wal_path_segments)
required_tables_and_column_exist = plugin.CheckRequiredTablesAndColumns(
database)
self.assertTrue(required_tables_and_column_exist)
parser_mediator = self._CreateParserMediator(
storage_writer, file_entry=file_entry,
knowledge_base_values=knowledge_base_values)
parser_mediator.SetFileEntry(file_entry)
# AppendToParserChain needs to be run after SetFileEntry.
parser_mediator.AppendToParserChain(plugin)
try:
cache = sqlite.SQLiteCache()
plugin.Process(parser_mediator, cache=cache, database=database)
finally:
database.Close()
return storage_writer
|
joachimmetz/plaso
|
tests/parsers/sqlite_plugins/test_lib.py
|
Python
|
apache-2.0
| 3,324
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for parse_layer_parameters module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.contrib import slim
from tensorflow.contrib.receptive_field.python.util import graph_compute_order
from tensorflow.contrib.receptive_field.python.util import parse_layer_parameters
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def create_test_network(placeholder_resolution, convert_variables_to_constants):
"""Convolutional neural network for test.
Args:
placeholder_resolution: Resolution to use for input placeholder. Used for
height and width dimensions.
convert_variables_to_constants: Whether to convert variables to constants.
Returns:
name_to_node: Dict keyed by node name, each entry containing the node's
NodeDef.
"""
g = ops.Graph()
sess = session.Session(graph=g)
with g.as_default():
# An input test image with unknown spatial resolution.
x = array_ops.placeholder(
dtypes.float32, (1, placeholder_resolution, placeholder_resolution, 1),
name='input_image')
# Left branch before first addition.
l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
# Right branch before first addition.
l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]], name='L2_pad')
l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID')
l3 = slim.max_pool2d(l2, [3, 3], stride=2, scope='L3', padding='SAME')
# First addition.
l4 = nn.relu(l1 + l3, name='L4_relu')
# Left branch after first addition.
l5 = slim.conv2d(l4, 1, [1, 1], stride=2, scope='L5', padding='SAME')
# Right branch after first addition.
l6 = slim.conv2d(l4, 1, [3, 3], stride=2, scope='L6', padding='SAME')
# Final addition.
gen_math_ops.add(l5, l6, name='L7_add')
if convert_variables_to_constants:
sess.run(variables.global_variables_initializer())
graph_def = graph_util.convert_variables_to_constants(
sess, g.as_graph_def(), ['L7_add'])
else:
graph_def = g.as_graph_def()
name_to_node = graph_compute_order.parse_graph_nodes(graph_def)
return name_to_node
class ParseLayerParametersTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(('NonePlaceholder', None, False),
('224Placeholder', 224, False),
('NonePlaceholderVarAsConst', None, True),
('224PlaceholderVarAsConst', 224, True))
def testParametersAreParsedCorrectly(self, placeholder_resolution,
convert_variables_to_constants):
"""Checks parameters from create_test_network() are parsed correctly."""
name_to_node = create_test_network(placeholder_resolution,
convert_variables_to_constants)
# L1.
l1_node_name = 'L1/Conv2D'
l1_params = parse_layer_parameters.get_layer_params(
name_to_node[l1_node_name], name_to_node)
expected_l1_params = (1, 1, 4, 4, 0, 0, 0, 0)
self.assertEqual(l1_params, expected_l1_params)
# L2 padding.
l2_pad_name = 'L2_pad'
l2_pad_params = parse_layer_parameters.get_layer_params(
name_to_node[l2_pad_name], name_to_node)
expected_l2_pad_params = (1, 1, 1, 1, 1, 1, 1, 1)
self.assertEqual(l2_pad_params, expected_l2_pad_params)
# L2.
l2_node_name = 'L2/Conv2D'
l2_params = parse_layer_parameters.get_layer_params(
name_to_node[l2_node_name], name_to_node)
expected_l2_params = (3, 3, 2, 2, 0, 0, 0, 0)
self.assertEqual(l2_params, expected_l2_params)
# L3.
l3_node_name = 'L3/MaxPool'
# - Without knowing input size.
l3_params = parse_layer_parameters.get_layer_params(
name_to_node[l3_node_name], name_to_node)
expected_l3_params = (3, 3, 2, 2, None, None, None, None)
self.assertEqual(l3_params, expected_l3_params)
# - Input size is even.
l3_even_params = parse_layer_parameters.get_layer_params(
name_to_node[l3_node_name], name_to_node, input_resolution=[4, 4])
expected_l3_even_params = (3, 3, 2, 2, 0, 0, 1, 1)
self.assertEqual(l3_even_params, expected_l3_even_params)
# - Input size is odd.
l3_odd_params = parse_layer_parameters.get_layer_params(
name_to_node[l3_node_name], name_to_node, input_resolution=[5, 5])
expected_l3_odd_params = (3, 3, 2, 2, 1, 1, 2, 2)
self.assertEqual(l3_odd_params, expected_l3_odd_params)
# L4.
l4_node_name = 'L4_relu'
l4_params = parse_layer_parameters.get_layer_params(
name_to_node[l4_node_name], name_to_node)
expected_l4_params = (1, 1, 1, 1, 0, 0, 0, 0)
self.assertEqual(l4_params, expected_l4_params)
# L5.
l5_node_name = 'L5/Conv2D'
l5_params = parse_layer_parameters.get_layer_params(
name_to_node[l5_node_name], name_to_node)
expected_l5_params = (1, 1, 2, 2, 0, 0, 0, 0)
self.assertEqual(l5_params, expected_l5_params)
# L6.
l6_node_name = 'L6/Conv2D'
# - Without knowing input size.
l6_params = parse_layer_parameters.get_layer_params(
name_to_node[l6_node_name], name_to_node)
expected_l6_params = (3, 3, 2, 2, None, None, None, None)
self.assertEqual(l6_params, expected_l6_params)
# - Input size is even.
l6_even_params = parse_layer_parameters.get_layer_params(
name_to_node[l6_node_name], name_to_node, input_resolution=[4, 4])
expected_l6_even_params = (3, 3, 2, 2, 0, 0, 1, 1)
self.assertEqual(l6_even_params, expected_l6_even_params)
# - Input size is odd.
l6_odd_params = parse_layer_parameters.get_layer_params(
name_to_node[l6_node_name], name_to_node, input_resolution=[5, 5])
expected_l6_odd_params = (3, 3, 2, 2, 1, 1, 2, 2)
self.assertEqual(l6_odd_params, expected_l6_odd_params)
# L7.
l7_node_name = 'L7_add'
l7_params = parse_layer_parameters.get_layer_params(
name_to_node[l7_node_name], name_to_node)
expected_l7_params = (1, 1, 1, 1, 0, 0, 0, 0)
self.assertEqual(l7_params, expected_l7_params)
if __name__ == '__main__':
test.main()
|
chemelnucfin/tensorflow
|
tensorflow/contrib/receptive_field/python/util/parse_layer_parameters_test.py
|
Python
|
apache-2.0
| 7,276
|
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
from contextlib import contextmanager
from textwrap import dedent
from pants.base.build_environment import get_buildroot
from pants.testutil.file_test_util import exact_files
from pants.testutil.pants_run_integration_test import PantsRunIntegrationTest
from pants.util.dirutil import safe_open
_NAMESPACE = dedent(
"""
namespace go thrifttest.duck
"""
)
_DUCK_STRUCT = dedent(
"""
struct Duck {
1: optional string quack,
}
"""
)
_FEEDER_STRUCT_TEMPLATE = dedent(
"""
service Feeder {{
void feed(1:{include}Duck duck),
}}
"""
)
class GoThriftGenIntegrationTest(PantsRunIntegrationTest):
@contextmanager
def _create_thrift_project(self, thrift_files):
with self.temporary_sourcedir() as srcdir:
for path, content in thrift_files.items():
with safe_open(os.path.join(srcdir, path), "w") as fp:
fp.write(content)
with safe_open(os.path.join(srcdir, "src/thrift/thrifttest/BUILD"), "w") as fp:
fp.write(
dedent(
"""
go_thrift_library(
name='fleem',
sources=['*.thrift'],
)
"""
).strip()
)
with safe_open(os.path.join(srcdir, "src/go/usethrift/example.go"), "w") as fp:
fp.write(
dedent(
"""
package usethrift
import "thrifttest/duck"
func whatevs(f duck.Feeder) string {
d := duck.NewDuck()
f.Feed(d)
return d.GetQuack()
}
"""
).strip()
)
with safe_open(os.path.join(srcdir, "src/go/usethrift/BUILD"), "w") as fp:
fp.write(
dedent(
"""
go_library(
dependencies=[
'{srcdir}/src/thrift/thrifttest:fleem'
]
)
""".format(
srcdir=os.path.relpath(srcdir, get_buildroot())
)
).strip()
)
with safe_open(
os.path.join(srcdir, "3rdparty/go/github.com/apache/thrift/BUILD"), "w"
) as fp:
fp.write("go_remote_library(rev='0.10.0', pkg='lib/go/thrift')")
config = {
"gen.go-thrift": {
"thrift_import_target": os.path.join(
os.path.relpath(srcdir, get_buildroot()),
"3rdparty/go/github.com/apache/thrift:lib/go/thrift",
),
"thrift_import": "github.com/apache/thrift/lib/go/thrift",
}
}
yield srcdir, config
def test_go_thrift_gen_single(self):
# Compile with one thrift file.
thrift_files = {
"src/thrift/thrifttest/duck.thrift": _NAMESPACE
+ _DUCK_STRUCT
+ _FEEDER_STRUCT_TEMPLATE.format(include=""),
}
with self.temporary_workdir() as workdir:
with self._create_thrift_project(thrift_files) as (srcdir, config):
args = ["compile", os.path.join(srcdir, "src/go/usethrift")]
pants_run = self.run_pants_with_workdir(args, workdir, config=config)
self.assert_success(pants_run)
# Fetch the hash for task impl version.
go_thrift_contents = [
p
for p in os.listdir(os.path.join(workdir, "gen", "go-thrift"))
if p != "current"
] # Ignore the 'current' symlink.
self.assertEqual(len(go_thrift_contents), 1)
hash_dir = go_thrift_contents[0]
target_dir = os.path.relpath(
os.path.join(srcdir, "src/thrift/thrifttest/fleem"), get_buildroot()
)
root = os.path.join(
workdir,
"gen",
"go-thrift",
hash_dir,
target_dir.replace(os.path.sep, "."),
"current",
)
self.assertEqual(
sorted(
[
"src/go/thrifttest/duck/duck-consts.go",
"src/go/thrifttest/duck/duck.go",
"src/go/thrifttest/duck/GoUnusedProtection__.go",
"src/go/thrifttest/duck/feeder-remote/feeder-remote.go",
]
),
sorted(exact_files(root)),
)
def test_go_thrift_gen_multi(self):
# Compile with a namespace split across thrift files.
duck_include = dedent(
"""
include "thrifttest/duck.thrift"
"""
)
thrift_files = {
"src/thrift/thrifttest/duck.thrift": _NAMESPACE + _DUCK_STRUCT,
"src/thrift/thrifttest/feeder.thrift": _NAMESPACE
+ duck_include
+ _FEEDER_STRUCT_TEMPLATE.format(include="duck."),
}
with self.temporary_workdir() as workdir:
with self._create_thrift_project(thrift_files) as (srcdir, config):
args = ["compile", os.path.join(srcdir, "src/go/usethrift")]
pants_run = self.run_pants_with_workdir(args, workdir, config=config)
self.assert_success(pants_run)
|
tdyas/pants
|
contrib/go/tests/python/pants_test/contrib/go/tasks/test_go_thrift_gen_integration.py
|
Python
|
apache-2.0
| 6,005
|
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various utilities.
Current contents:
- Text colorization using ANSI color codes
- A class to construct and manage paths under a root path.
- A helper to manage running subprocesses, wrapping the subprocess module.
"""
__authors__ = [
# alphabetical order by last name, please
'"David Anderson" <dave@natulte.net>',
]
import os.path
import re
try:
import cStringIO as StringIO
except ImportError:
import StringIO
import subprocess
import threading
import error
import log
class Error(error.Error):
pass
class SubprocessFailed(Error):
"""A subprocess returned a non-zero error code."""
pass
# The magic escape sequence understood by modern terminal emulators to
# configure fore/background colors and other basic text display
# settings.
_ANSI_ESCAPE = '\x1b[%dm'
_ANSI_ESCAPE_RE = re.compile(r'\x1b\[\d+m')
# Some internal non-color settings that we use.
_RESET = 0 # Reset to terminal defaults.
_BOLD = 1 # Brighter colors.
# ANSI color codes.
RED = 31
GREEN = 32
WHITE = 37
def _ansi_escape(code):
return _ANSI_ESCAPE % code
def colorize(text, color, bold=False):
"""Colorize some text using ANSI color codes.
Note that while ANSI color codes look good in a terminal they look
like noise in log files unless viewed in an ANSI color capable
viewer (such as 'less -R').
Args:
text: The text to colorize.
color: One of the color symbols from this module.
bold: If True, make the color brighter.
Returns:
The input text string, appropriately sprinkled with color
codes. Colors are reset to terminal defaults after the input
text.
"""
bold = _ansi_escape(_BOLD) if bold else ''
return '%s%s%s%s' % (bold, _ansi_escape(color),
text, _ansi_escape(_RESET))
def decolorize(text):
"""Remove ANSI color codes from text."""
return _ANSI_ESCAPE_RE.sub('', text)
class Paths(object):
"""A helper to construct and check paths under a given root."""
def __init__(self, root):
"""Initializer.
Args:
root: The root of all paths this instance will consider.
"""
self._root = os.path.abspath(
os.path.expandvars(os.path.expanduser(root)))
def path(self, path=''):
"""Construct and return a path under the path root.
Args:
path: The desired path string relative to the root.
Returns:
The absolute path corresponding to the relative input path.
"""
assert not os.path.isabs(path)
return os.path.abspath(os.path.join(self._root, path))
def exists(self, path=''):
"""Check for the existence of a path under the path root.
Does not discriminate on the path type (ie. it could be a
directory, a file, a symbolic link...), just checks for the
existence of the path.
Args:
path: The path string relative to the root.
Returns:
True if the path exists, False otherwise.
"""
return os.path.exists(self.path(path))
class _PipeAdapter(threading.Thread):
"""A thread that connects one file-like object to another"""
def __init__(self, pipe, logfile):
threading.Thread.__init__(self)
self.pipe, self.logfile = pipe, logfile
self.setDaemon(True)
self.start()
def run(self):
try:
while True:
data = self.pipe.read(512) # Small to retain interactivity
if not data:
return
self.logfile.write(data)
except (EOFError, OSError):
pass
def run(argv, cwd=None, capture=False, split_capture=True, stdin=None):
"""Run the given command and optionally return its output.
Note that if you set capture=True, the command's output is
buffered in memory. Output capture should only be used with
commands that output small amounts of data. O(kB) is fine, O(MB)
is starting to push it a little.
Args:
argv: A list containing the name of the program to run, followed
by its argument vector.
cwd: Run the program from this directory.
capture: If True, capture the program's stdout stream. If False,
stdout will output to sys.stdout.
split_capture: If True, return the captured output as a list of
lines. Else, return as a single unaltered string.
stdin: The string to feed to the program's stdin stream.
Returns:
If capture is True, a string containing the combined
stdout/stderr output of the program. If capture is False,
nothing is returned.
Raises:
SubprocessFailed: The subprocess exited with a non-zero exit code.
"""
log.debug(colorize('# ' + ' '.join(argv), WHITE, bold=True))
process = subprocess.Popen(argv,
shell=False,
cwd=cwd,
stdin=(subprocess.PIPE if stdin else None),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Spin up threads to capture stdout and stderr. Depending on the
# value of the capture parameter, stdout is pushed either into the
# log or into a string for processing. stderr always goes
# into the log.
#
# Threads are necessary because all of writing to stdin, reading
# from stdout and reading from stderr must all happen
# simultaneously. Otherwise, there is a risk that one of the pipes
# will fill up, causing the subprocess and us to deadlock. So,
# threads are used to keep the pipes safely flowing.
stdout = StringIO.StringIO() if capture else log.FileLikeLogger()
out_adapter = _PipeAdapter(process.stdout, stdout)
err_adapter = _PipeAdapter(process.stderr, log.FileLikeLogger())
if stdin:
process.stdin.write(stdin)
out_adapter.join()
err_adapter.join()
process.wait()
if process.returncode != 0:
if capture:
raise SubprocessFailed('Process %s failed with output: %s' %
(argv[0], stdout.getvalue()))
else:
raise SubprocessFailed('Process %s failed' % argv[0])
if capture:
out = stdout.getvalue()
stdout.close()
if split_capture:
out = out.strip().split('\n')
return out
|
MatthewWilkes/mw4068-packaging
|
scripts/release/util.py
|
Python
|
apache-2.0
| 6,624
|
import sys
import time
import random
import commands
import userinterface.Client as Client
from taskbuffer.JobSpec import JobSpec
from taskbuffer.FileSpec import FileSpec
if len(sys.argv)>1:
site = sys.argv[1]
cloud = None
else:
site = None
cloud = 'US'
#cloud = 'TW'
#Recent changes (BNL migration to LFC?) forvce the cloud to be specified
cloud = 'US'
datasetName = 'panda.destDB.%s' % commands.getoutput('uuidgen')
destName = 'BNL_ATLAS_2'
files = {
'EVNT.023986._00001.pool.root.1':None,
#'EVNT.023989._00001.pool.root.1':None,
}
jobList = []
index = 0
for lfn in files.keys():
index += 1
job = JobSpec()
job.jobDefinitionID = (time.time()) % 10000
job.jobName = "%s_%d" % (commands.getoutput('uuidgen'),index)
job.AtlasRelease = 'Atlas-14.2.20'
job.homepackage = 'AtlasProduction/14.2.20.1'
job.transformation = 'csc_simul_reco_trf.py'
job.destinationDBlock = datasetName
job.destinationSE = destName
job.computingSite = site
job.prodDBlock = 'mc08.105031.Jimmy_jetsJ2.evgen.EVNT.e347_tid023986'
#job.prodDBlock = 'mc08.105034.Jimmy_jetsJ5.evgen.EVNT.e347_tid023989'
job.prodSourceLabel = 'test'
job.processingType = 'test'
job.currentPriority = 10000
job.cloud = cloud
fileI = FileSpec()
fileI.dataset = job.prodDBlock
fileI.prodDBlock = job.prodDBlock
fileI.lfn = lfn
fileI.type = 'input'
job.addFile(fileI)
fileD = FileSpec()
fileD.dataset = 'ddo.000001.Atlas.Ideal.DBRelease.v050601'
fileD.prodDBlock = 'ddo.000001.Atlas.Ideal.DBRelease.v050601'
fileD.lfn = 'DBRelease-5.6.1.tar.gz'
fileD.type = 'input'
job.addFile(fileD)
fileOA = FileSpec()
fileOA.lfn = "%s.AOD.pool.root" % job.jobName
fileOA.destinationDBlock = job.destinationDBlock
fileOA.destinationSE = job.destinationSE
fileOA.dataset = job.destinationDBlock
fileOA.destinationDBlockToken = 'ATLASDATADISK'
fileOA.type = 'output'
job.addFile(fileOA)
fileOE = FileSpec()
fileOE.lfn = "%s.ESD.pool.root" % job.jobName
fileOE.destinationDBlock = job.destinationDBlock
fileOE.destinationSE = job.destinationSE
fileOE.dataset = job.destinationDBlock
fileOE.destinationDBlockToken = 'ATLASDATADISK'
fileOE.type = 'output'
job.addFile(fileOE)
fileOL = FileSpec()
fileOL.lfn = "%s.job.log.tgz" % job.jobName
fileOL.destinationDBlock = job.destinationDBlock
fileOL.destinationSE = job.destinationSE
fileOL.dataset = job.destinationDBlock
fileOL.destinationDBlockToken = 'ATLASDATADISK'
fileOL.type = 'log'
job.addFile(fileOL)
job.jobParameters="%s %s 30 500 3 ATLAS-GEO-02-01-00 3 3 QGSP_BERT jobConfig.VertexPosFastIDKiller.py FastSimulationJobTransforms/FastCaloSimAddCellsRecConfig.py,NoTrackSlimming.py %s OFF NONE NONE %s NONE" % (fileI.lfn, fileOA.lfn, fileD.lfn, fileOE.lfn)
jobList.append(job)
s,o = Client.submitJobs(jobList)
print "---------------------"
print s
for x in o:
print "PandaID=%s" % x[0]
|
RRCKI/panda-server
|
pandaserver/test/testSimulReco14.py
|
Python
|
apache-2.0
| 3,166
|
# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_config import cfg
import requests
import six
from six.moves.urllib import parse as urlparse
from ceilometer.i18n import _
from ceilometer.openstack.common import log
CONF = cfg.CONF
CONF.import_opt('http_timeout', 'ceilometer.service')
LOG = log.getLogger(__name__)
class OpencontrailAPIFailed(Exception):
pass
class AnalyticsAPIBaseClient(object):
"""Opencontrail Base Statistics REST API Client."""
def __init__(self, endpoint, data):
self.endpoint = endpoint
self.data = data or {}
def request(self, path, fqdn_uuid, data=None):
req_data = copy.copy(self.data)
if data:
req_data.update(data)
req_params = self._get_req_params(data=req_data)
url = urlparse.urljoin(self.endpoint, path + fqdn_uuid)
self._log_req(url, req_params)
resp = requests.get(url, **req_params)
self._log_res(resp)
if resp.status_code != 200:
raise OpencontrailAPIFailed(
_('Opencontrail API returned %(status)s %(reason)s') %
{'status': resp.status_code, 'reason': resp.reason})
return resp
def _get_req_params(self, data=None):
req_params = {
'headers': {
'Accept': 'application/json'
},
'data': data,
'allow_redirects': False,
'timeout': CONF.http_timeout,
}
return req_params
@staticmethod
def _log_req(url, req_params):
if not CONF.debug:
return
curl_command = ['REQ: curl -i -X GET ']
params = []
for name, value in six.iteritems(req_params['data']):
params.append("%s=%s" % (name, value))
curl_command.append('"%s?%s" ' % (url, '&'.join(params)))
for name, value in six.iteritems(req_params['headers']):
curl_command.append('-H "%s: %s" ' % (name, value))
LOG.debug(''.join(curl_command))
@staticmethod
def _log_res(resp):
if not CONF.debug:
return
dump = ['RES: \n', 'HTTP %.1f %s %s\n' % (resp.raw.version,
resp.status_code,
resp.reason)]
dump.extend('%s: %s\n' % (k, v)
for k, v in six.iteritems(resp.headers))
dump.append('\n')
if resp.content:
dump.extend([resp.content, '\n'])
LOG.debug(''.join(dump))
class NetworksAPIClient(AnalyticsAPIBaseClient):
"""Opencontrail Statistics REST API Client."""
def get_vm_statistics(self, fqdn_uuid, data=None):
"""Get statistics of a virtual-machines.
URL:
{endpoint}/analytics/uves/virtual-machine/{fqdn_uuid}
"""
path = '/analytics/uves/virtual-machine/'
resp = self.request(path, fqdn_uuid, data)
return resp.json()
class Client(object):
def __init__(self, endpoint, data=None):
self.networks = NetworksAPIClient(endpoint, data)
|
Juniper/ceilometer
|
ceilometer/network/statistics/opencontrail/client.py
|
Python
|
apache-2.0
| 3,667
|
#!/usr/bin/env python
__author__ = 'greg'
from cassandra.cluster import Cluster
import numpy
import matplotlib.pyplot as plt
import datetime
import csv
import bisect
import random
import json
import matplotlib.pyplot as plt
def index(a, x):
'Locate the leftmost value exactly equal to x'
i = bisect.bisect_left(a, x)
if i != len(a) and a[i] == x:
return i
raise ValueError
# load subject data from CSV
subjects_index = {}
with open('/home/greg/Documents/subject_species_all.csv', 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in reader:
subjects_index[row[1]] = row[2]
def unix_time(dt):
epoch = datetime.datetime.utcfromtimestamp(0)
delta = dt - epoch
return delta.total_seconds()
def unix_time_millis(dt):
return long(unix_time(dt) * 1000.0)
cluster = Cluster()
cassandra_session = cluster.connect('serengeti')
ips = []
for ii,row in enumerate(cassandra_session.execute("select * from classifications where id =1")):
try:
index(ips,row.user_ip)
except ValueError:
bisect.insort(ips,row.user_ip)
# ips.add(row.user_ip)
if ii == 100000:
break
animal_accuracy = []
for ip in random.sample(ips,500):
true_blank = 0.
false_blank = 0.
true_animal = 0.
false_animal = 0.
for classification in cassandra_session.execute("select * from ip_classifications where id =1 and user_ip='"+str(ip)+"'"):
zooniverse_id = classification.zooniverse_id
annotatons = json.loads(classification.annotations)
nothing = "nothing" in annotatons[-1]
if subjects_index[zooniverse_id]=="blank":
if nothing:
true_blank += 1
else:
false_animal += 1
else:
if nothing:
false_blank += 1
else:
true_animal += 1
if (true_animal+false_blank) == 0:
continue
animal_accuracy.append(true_animal/(true_animal+false_blank))
plt.hist(animal_accuracy,50,cumulative=True,normed=1)
plt.show()
|
zooniverse/aggregation
|
experimental/algorithms/blanks/user_analysis.py
|
Python
|
apache-2.0
| 2,084
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from salaryzenaggr.constants import * # noqa
from salaryzenaggr import fetchers
from salaryzenaggr.fetchers import rest_api
def _get_currency_name(currency_id):
currency_ids = {
'840': CURRENCY_USD,
'978': CURRENCY_EURO,
}
return currency_ids[currency_id] if currency_id in currency_ids else None
class AlfaCurrencyXmlFetcher(rest_api.XmlRestApiFetcher):
def get_supported_banks(self):
return [BANK_ALFA, BANK_CBR]
def get_supported_currencies(self):
return [CURRENCY_USD, CURRENCY_EURO]
def fetch_data(self, data, currencies=None, from_date=None):
alfa_bank_url = 'http://alfabank.ru/_/_currency.xml'
response = self._fetch_url(alfa_bank_url)
data_type = DATA_TYPE_CURRENT
for rate_tag in response.getElementsByTagName('rates'):
rate_type = rate_tag.getAttribute('type')
if rate_type not in ['non-cash', 'cb']:
continue
items = rate_tag.getElementsByTagName('item')
for item in items:
currency = _get_currency_name(item.getAttribute('currency-id'))
if currency and currency in currencies:
bank = BANK_CBR if rate_type == 'cb' else BANK_ALFA
write_data = functools.partial(fetchers.write_data_set,
result=data,
bank=bank,
currency=currency,
data_type=data_type)
if item.getAttribute('value'):
write_data(exchange_type=EXCHANGE_RATE, value=item.getAttribute('value'))
else:
write_data(exchange_type=EXCHANGE_SELL, value=item.getAttribute('value-selling'))
write_data(exchange_type=EXCHANGE_BUY, value=item.getAttribute('value-buying'))
|
Frostman/SalaryZenAggregator_Old
|
salaryzenaggr/fetchers/alfa_currency_xml.py
|
Python
|
apache-2.0
| 2,577
|
# Copyright 2014 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_utils import versionutils
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova import utils
from nova.virt import hardware
NULLABLE_STRING_FIELDS = ['name', 'checksum', 'owner',
'container_format', 'disk_format']
NULLABLE_INTEGER_FIELDS = ['size', 'virtual_size']
@base.NovaObjectRegistry.register
class ImageMeta(base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: updated ImageMetaProps
# Version 1.2: ImageMetaProps version 1.2
# Version 1.3: ImageMetaProps version 1.3
# Version 1.4: ImageMetaProps version 1.4
# Version 1.5: ImageMetaProps version 1.5
# Version 1.6: ImageMetaProps version 1.6
# Version 1.7: ImageMetaProps version 1.7
# Version 1.8: ImageMetaProps version 1.8
VERSION = '1.8'
# These are driven by what the image client API returns
# to Nova from Glance. This is defined in the glance
# code glance/api/v2/images.py get_base_properties()
# method. A few things are currently left out:
# self, file, schema - Nova does not appear to ever use
# these field; locations - modelling the arbitrary
# data in the 'metadata' subfield is non-trivial as
# there's no clear spec.
#
# TODO(ft): In version 2.0, these fields should be nullable:
# name, checksum, owner, size, virtual_size, container_format, disk_format
#
fields = {
'id': fields.UUIDField(),
'name': fields.StringField(),
'status': fields.StringField(),
'visibility': fields.StringField(),
'protected': fields.FlexibleBooleanField(),
'checksum': fields.StringField(),
'owner': fields.StringField(),
'size': fields.IntegerField(),
'virtual_size': fields.IntegerField(),
'container_format': fields.StringField(),
'disk_format': fields.StringField(),
'created_at': fields.DateTimeField(nullable=True),
'updated_at': fields.DateTimeField(nullable=True),
'tags': fields.ListOfStringsField(),
'direct_url': fields.StringField(),
'min_ram': fields.IntegerField(),
'min_disk': fields.IntegerField(),
'properties': fields.ObjectField('ImageMetaProps'),
}
@classmethod
def from_dict(cls, image_meta):
"""Create instance from image metadata dict
:param image_meta: image metadata dictionary
Creates a new object instance, initializing from the
properties associated with the image metadata instance
:returns: an ImageMeta instance
"""
if image_meta is None:
image_meta = {}
# We must turn 'properties' key dict into an object
# so copy image_meta to avoid changing original
image_meta = copy.deepcopy(image_meta)
image_meta["properties"] = \
objects.ImageMetaProps.from_dict(
image_meta.get("properties", {}))
# Some fields are nullable in Glance DB schema, but was not marked that
# in ImageMeta initially by mistake. To keep compatibility with compute
# nodes which are run with previous versions these fields are still
# not nullable in ImageMeta, but the code below converts None to
# approppriate empty values.
for fld in NULLABLE_STRING_FIELDS:
if fld in image_meta and image_meta[fld] is None:
image_meta[fld] = ''
for fld in NULLABLE_INTEGER_FIELDS:
if fld in image_meta and image_meta[fld] is None:
image_meta[fld] = 0
return cls(**image_meta)
@classmethod
def from_instance(cls, instance):
"""Create instance from instance system metadata
:param instance: Instance object
Creates a new object instance, initializing from the
system metadata "image_*" properties associated with
instance
:returns: an ImageMeta instance
"""
sysmeta = utils.instance_sys_meta(instance)
image_meta = utils.get_image_from_system_metadata(sysmeta)
return cls.from_dict(image_meta)
@base.NovaObjectRegistry.register
class ImageMetaProps(base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: added os_require_quiesce field
# Version 1.2: added img_hv_type and img_hv_requested_version fields
# Version 1.3: HVSpec version 1.1
# Version 1.4: added hw_vif_multiqueue_enabled field
# Version 1.5: added os_admin_user field
# Version 1.6: Added 'lxc' and 'uml' enum types to DiskBusField
# Version 1.7: added img_config_drive field
# Version 1.8: Added 'lxd' to hypervisor types
VERSION = '1.8'
def obj_make_compatible(self, primitive, target_version):
super(ImageMetaProps, self).obj_make_compatible(primitive,
target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 7):
primitive.pop('img_config_drive', None)
if target_version < (1, 5):
primitive.pop('os_admin_user', None)
if target_version < (1, 4):
primitive.pop('hw_vif_multiqueue_enabled', None)
if target_version < (1, 2):
primitive.pop('img_hv_type', None)
primitive.pop('img_hv_requested_version', None)
if target_version < (1, 1):
primitive.pop('os_require_quiesce', None)
if target_version < (1, 6):
bus = primitive.get('hw_disk_bus', None)
if bus in ('lxc', 'uml'):
raise exception.ObjectActionError(
action='obj_make_compatible',
reason='hw_disk_bus=%s not supported in version %s' % (
bus, target_version))
# Maximum number of NUMA nodes permitted for the guest topology
NUMA_NODES_MAX = 128
# 'hw_' - settings affecting the guest virtual machine hardware
# 'img_' - settings affecting the use of images by the compute node
# 'os_' - settings affecting the guest operating system setup
fields = {
# name of guest hardware architecture eg i686, x86_64, ppc64
'hw_architecture': fields.ArchitectureField(),
# used to decide to expand root disk partition and fs to full size of
# root disk
'hw_auto_disk_config': fields.StringField(),
# whether to display BIOS boot device menu
'hw_boot_menu': fields.FlexibleBooleanField(),
# name of the CDROM bus to use eg virtio, scsi, ide
'hw_cdrom_bus': fields.DiskBusField(),
# preferred number of CPU cores per socket
'hw_cpu_cores': fields.IntegerField(),
# preferred number of CPU sockets
'hw_cpu_sockets': fields.IntegerField(),
# maximum number of CPU cores per socket
'hw_cpu_max_cores': fields.IntegerField(),
# maximum number of CPU sockets
'hw_cpu_max_sockets': fields.IntegerField(),
# maximum number of CPU threads per core
'hw_cpu_max_threads': fields.IntegerField(),
# CPU thread allocation policy
'hw_cpu_policy': fields.CPUAllocationPolicyField(),
# preferred number of CPU threads per core
'hw_cpu_threads': fields.IntegerField(),
# guest ABI version for guest xentools either 1 or 2 (or 3 - depends on
# Citrix PV tools version installed in image)
'hw_device_id': fields.IntegerField(),
# name of the hard disk bus to use eg virtio, scsi, ide
'hw_disk_bus': fields.DiskBusField(),
# allocation mode eg 'preallocated'
'hw_disk_type': fields.StringField(),
# name of the floppy disk bus to use eg fd, scsi, ide
'hw_floppy_bus': fields.DiskBusField(),
# boolean - used to trigger code to inject networking when booting a CD
# image with a network boot image
'hw_ipxe_boot': fields.FlexibleBooleanField(),
# There are sooooooooooo many possible machine types in
# QEMU - several new ones with each new release - that it
# is not practical to enumerate them all. So we use a free
# form string
'hw_machine_type': fields.StringField(),
# One of the magic strings 'small', 'any', 'large'
# or an explicit page size in KB (eg 4, 2048, ...)
'hw_mem_page_size': fields.StringField(),
# Number of guest NUMA nodes
'hw_numa_nodes': fields.IntegerField(),
# Each list entry corresponds to a guest NUMA node and the
# set members indicate CPUs for that node
'hw_numa_cpus': fields.ListOfSetsOfIntegersField(),
# Each list entry corresponds to a guest NUMA node and the
# list value indicates the memory size of that node.
'hw_numa_mem': fields.ListOfIntegersField(),
# boolean 'yes' or 'no' to enable QEMU guest agent
'hw_qemu_guest_agent': fields.FlexibleBooleanField(),
# name of the RNG device type eg virtio
'hw_rng_model': fields.RNGModelField(),
# number of serial ports to create
'hw_serial_port_count': fields.IntegerField(),
# name of the SCSI bus controller eg 'virtio-scsi', 'lsilogic', etc
'hw_scsi_model': fields.SCSIModelField(),
# name of the video adapter model to use, eg cirrus, vga, xen, qxl
'hw_video_model': fields.VideoModelField(),
# MB of video RAM to provide eg 64
'hw_video_ram': fields.IntegerField(),
# name of a NIC device model eg virtio, e1000, rtl8139
'hw_vif_model': fields.VIFModelField(),
# "xen" vs "hvm"
'hw_vm_mode': fields.VMModeField(),
# action to take when watchdog device fires eg reset, poweroff, pause,
# none
'hw_watchdog_action': fields.WatchdogActionField(),
# boolean - If true, this will enable the virtio-multiqueue feature
'hw_vif_multiqueue_enabled': fields.FlexibleBooleanField(),
# if true download using bittorrent
'img_bittorrent': fields.FlexibleBooleanField(),
# Which data format the 'img_block_device_mapping' field is
# using to represent the block device mapping
'img_bdm_v2': fields.FlexibleBooleanField(),
# Block device mapping - the may can be in one or two completely
# different formats. The 'img_bdm_v2' field determines whether
# it is in legacy format, or the new current format. Ideally
# we would have a formal data type for this field instead of a
# dict, but with 2 different formats to represent this is hard.
# See nova/block_device.py from_legacy_mapping() for the complex
# conversion code. So for now leave it as a dict and continue
# to use existing code that is able to convert dict into the
# desired internal BDM formats
'img_block_device_mapping':
fields.ListOfDictOfNullableStringsField(),
# boolean - if True, and image cache set to "some" decides if image
# should be cached on host when server is booted on that host
'img_cache_in_nova': fields.FlexibleBooleanField(),
# Compression level for images. (1-9)
'img_compression_level': fields.IntegerField(),
# hypervisor supported version, eg. '>=2.6'
'img_hv_requested_version': fields.VersionPredicateField(),
# type of the hypervisor, eg kvm, ironic, xen
'img_hv_type': fields.HVTypeField(),
# Whether the image needs/expected config drive
'img_config_drive': fields.ConfigDrivePolicyField(),
# boolean flag to set space-saving or performance behavior on the
# Datastore
'img_linked_clone': fields.FlexibleBooleanField(),
# Image mappings - related to Block device mapping data - mapping
# of virtual image names to device names. This could be represented
# as a formatl data type, but is left as dict for same reason as
# img_block_device_mapping field. It would arguably make sense for
# the two to be combined into a single field and data type in the
# future.
'img_mappings': fields.ListOfDictOfNullableStringsField(),
# image project id (set on upload)
'img_owner_id': fields.StringField(),
# root device name, used in snapshotting eg /dev/<blah>
'img_root_device_name': fields.StringField(),
# boolean - if false don't talk to nova agent
'img_use_agent': fields.FlexibleBooleanField(),
# integer value 1
'img_version': fields.IntegerField(),
# string of username with admin privileges
'os_admin_user': fields.StringField(),
# string of boot time command line arguments for the guest kernel
'os_command_line': fields.StringField(),
# the name of the specific guest operating system distro. This
# is not done as an Enum since the list of operating systems is
# growing incredibly fast, and valid values can be arbitrarily
# user defined. Nova has no real need for strict validation so
# leave it freeform
'os_distro': fields.StringField(),
# boolean - if true, then guest must support disk quiesce
# or snapshot operation will be denied
'os_require_quiesce': fields.FlexibleBooleanField(),
# boolean - if using agent don't inject files, assume someone else is
# doing that (cloud-init)
'os_skip_agent_inject_files_at_boot': fields.FlexibleBooleanField(),
# boolean - if using agent don't try inject ssh key, assume someone
# else is doing that (cloud-init)
'os_skip_agent_inject_ssh': fields.FlexibleBooleanField(),
# The guest operating system family such as 'linux', 'windows' - this
# is a fairly generic type. For a detailed type consider os_distro
# instead
'os_type': fields.OSTypeField(),
}
# The keys are the legacy property names and
# the values are the current preferred names
_legacy_property_map = {
'architecture': 'hw_architecture',
'owner_id': 'img_owner_id',
'vmware_disktype': 'hw_disk_type',
'vmware_image_version': 'img_version',
'vmware_ostype': 'os_distro',
'auto_disk_config': 'hw_auto_disk_config',
'ipxe_boot': 'hw_ipxe_boot',
'xenapi_device_id': 'hw_device_id',
'xenapi_image_compression_level': 'img_compression_level',
'vmware_linked_clone': 'img_linked_clone',
'xenapi_use_agent': 'img_use_agent',
'xenapi_skip_agent_inject_ssh': 'os_skip_agent_inject_ssh',
'xenapi_skip_agent_inject_files_at_boot':
'os_skip_agent_inject_files_at_boot',
'cache_in_nova': 'img_cache_in_nova',
'vm_mode': 'hw_vm_mode',
'bittorrent': 'img_bittorrent',
'mappings': 'img_mappings',
'block_device_mapping': 'img_block_device_mapping',
'bdm_v2': 'img_bdm_v2',
'root_device_name': 'img_root_device_name',
'hypervisor_version_requires': 'img_hv_requested_version',
'hypervisor_type': 'img_hv_type',
}
# TODO(berrange): Need to run this from a data migration
# at some point so we can eventually kill off the compat
def _set_attr_from_legacy_names(self, image_props):
for legacy_key in self._legacy_property_map:
new_key = self._legacy_property_map[legacy_key]
if legacy_key not in image_props:
continue
setattr(self, new_key, image_props[legacy_key])
vmware_adaptertype = image_props.get("vmware_adaptertype")
if vmware_adaptertype == "ide":
setattr(self, "hw_disk_bus", "ide")
elif vmware_adaptertype:
setattr(self, "hw_disk_bus", "scsi")
setattr(self, "hw_scsi_model", vmware_adaptertype)
def _set_numa_mem(self, image_props):
hw_numa_mem = []
hw_numa_mem_set = False
for cellid in range(ImageMetaProps.NUMA_NODES_MAX):
memprop = "hw_numa_mem.%d" % cellid
if memprop not in image_props:
break
hw_numa_mem.append(int(image_props[memprop]))
hw_numa_mem_set = True
del image_props[memprop]
if hw_numa_mem_set:
self.hw_numa_mem = hw_numa_mem
def _set_numa_cpus(self, image_props):
hw_numa_cpus = []
hw_numa_cpus_set = False
for cellid in range(ImageMetaProps.NUMA_NODES_MAX):
cpuprop = "hw_numa_cpus.%d" % cellid
if cpuprop not in image_props:
break
hw_numa_cpus.append(
hardware.parse_cpu_spec(image_props[cpuprop]))
hw_numa_cpus_set = True
del image_props[cpuprop]
if hw_numa_cpus_set:
self.hw_numa_cpus = hw_numa_cpus
def _set_attr_from_current_names(self, image_props):
for key in self.fields:
# The two NUMA fields need special handling to
# un-stringify them correctly
if key == "hw_numa_mem":
self._set_numa_mem(image_props)
elif key == "hw_numa_cpus":
self._set_numa_cpus(image_props)
else:
if key not in image_props:
continue
setattr(self, key, image_props[key])
@classmethod
def from_dict(cls, image_props):
"""Create instance from image properties dict
:param image_props: dictionary of image metdata properties
Creates a new object instance, initializing from a
dictionary of image metadata properties
:returns: an ImageMetaProps instance
"""
obj = cls()
# We look to see if the dict has entries for any
# of the legacy property names first. Then we use
# the current property names. That way if both the
# current and legacy names are set, the value
# associated with the current name takes priority
obj._set_attr_from_legacy_names(image_props)
obj._set_attr_from_current_names(image_props)
return obj
def get(self, name, defvalue=None):
"""Get the value of an attribute
:param name: the attribute to request
:param defvalue: the default value if not set
This returns the value of an attribute if it is currently
set, otherwise it will return None.
This differs from accessing props.attrname, because that
will raise an exception if the attribute has no value set.
So instead of
if image_meta.properties.obj_attr_is_set("some_attr"):
val = image_meta.properties.some_attr
else
val = None
Callers can rely on unconditional access
val = image_meta.properties.get("some_attr")
:returns: the attribute value or None
"""
if not self.obj_attr_is_set(name):
return defvalue
return getattr(self, name)
|
apporc/nova
|
nova/objects/image_meta.py
|
Python
|
apache-2.0
| 19,719
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import datetime
import os
import unittest
import pytz
from pyflink.table import DataTypes, expressions as expr
from pyflink.table.udf import ScalarFunction, udf
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import PyFlinkStreamTableTestCase, \
PyFlinkBlinkStreamTableTestCase, PyFlinkBlinkBatchTableTestCase, \
PyFlinkBatchTableTestCase
class UserDefinedFunctionTests(object):
def test_scalar_function(self):
# test metric disabled.
self.t_env.get_config().get_configuration().set_string('python.metric.enabled', 'false')
# test lambda function
add_one = udf(lambda i: i + 1, result_type=DataTypes.BIGINT())
# test Python ScalarFunction
subtract_one = udf(SubtractOne(), result_type=DataTypes.BIGINT())
# test callable function
add_one_callable = udf(CallablePlus(), result_type=DataTypes.BIGINT())
def partial_func(col, param):
return col + param
# test partial function
import functools
add_one_partial = udf(functools.partial(partial_func, param=1),
result_type=DataTypes.BIGINT())
# check memory limit is set
@udf(result_type=DataTypes.BIGINT())
def check_memory_limit():
assert os.environ['_PYTHON_WORKER_MEMORY_LIMIT'] is not None
return 1
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd', 'e', 'f', 'g'],
[DataTypes.BIGINT(), DataTypes.BIGINT(), DataTypes.BIGINT(), DataTypes.BIGINT(),
DataTypes.BIGINT(), DataTypes.BIGINT(), DataTypes.BIGINT()])
self.t_env.register_table_sink("Results", table_sink)
t = self.t_env.from_elements([(1, 2, 3), (2, 5, 6), (3, 1, 9)], ['a', 'b', 'c'])
t.where(add_one(t.b) <= 3).select(
add_one(t.a), subtract_one(t.b), add(t.a, t.c), add_one_callable(t.a),
add_one_partial(t.a), check_memory_limit(), t.a) \
.execute_insert("Results").wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[2, 1, 4, 2, 2, 1, 1]", "+I[4, 0, 12, 4, 4, 1, 3]"])
def test_chaining_scalar_function(self):
add_one = udf(lambda i: i + 1, result_type=DataTypes.BIGINT())
subtract_one = udf(SubtractOne(), result_type=DataTypes.BIGINT())
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c'],
[DataTypes.BIGINT(), DataTypes.BIGINT(), DataTypes.INT()])
self.t_env.register_table_sink("Results", table_sink)
t = self.t_env.from_elements([(1, 2, 1), (2, 5, 2), (3, 1, 3)], ['a', 'b', 'c'])
t.select(add(add_one(t.a), subtract_one(t.b)), t.c, expr.lit(1)) \
.execute_insert("Results").wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[3, 1, 1]", "+I[7, 2, 1]", "+I[4, 3, 1]"])
def test_udf_in_join_condition(self):
t1 = self.t_env.from_elements([(2, "Hi")], ['a', 'b'])
t2 = self.t_env.from_elements([(2, "Flink")], ['c', 'd'])
f = udf(lambda i: i, result_type=DataTypes.BIGINT())
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd'],
[DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.BIGINT(), DataTypes.STRING()])
self.t_env.register_table_sink("Results", table_sink)
t1.join(t2).where(f(t1.a) == t2.c).execute_insert("Results").wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[2, Hi, 2, Flink]"])
def test_udf_in_join_condition_2(self):
t1 = self.t_env.from_elements([(1, "Hi"), (2, "Hi")], ['a', 'b'])
t2 = self.t_env.from_elements([(2, "Flink")], ['c', 'd'])
f = udf(lambda i: i, result_type=DataTypes.BIGINT())
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd'],
[DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.BIGINT(), DataTypes.STRING()])
self.t_env.register_table_sink("Results", table_sink)
t1.join(t2).where(f(t1.a) == f(t2.c)).execute_insert("Results").wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[2, Hi, 2, Flink]"])
def test_udf_with_constant_params(self):
def udf_with_constant_params(p, null_param, tinyint_param, smallint_param, int_param,
bigint_param, decimal_param, float_param, double_param,
boolean_param, str_param,
date_param, time_param, timestamp_param):
from decimal import Decimal
import datetime
assert null_param is None, 'null_param is wrong value %s' % null_param
assert isinstance(tinyint_param, int), 'tinyint_param of wrong type %s !' \
% type(tinyint_param)
p += tinyint_param
assert isinstance(smallint_param, int), 'smallint_param of wrong type %s !' \
% type(smallint_param)
p += smallint_param
assert isinstance(int_param, int), 'int_param of wrong type %s !' \
% type(int_param)
p += int_param
assert isinstance(bigint_param, int), 'bigint_param of wrong type %s !' \
% type(bigint_param)
p += bigint_param
assert decimal_param == Decimal('1.05'), \
'decimal_param is wrong value %s ' % decimal_param
p += int(decimal_param)
assert isinstance(float_param, float) and float_equal(float_param, 1.23, 1e-06), \
'float_param is wrong value %s ' % float_param
p += int(float_param)
assert isinstance(double_param, float) and float_equal(double_param, 1.98932, 1e-07), \
'double_param is wrong value %s ' % double_param
p += int(double_param)
assert boolean_param is True, 'boolean_param is wrong value %s' % boolean_param
assert str_param == 'flink', 'str_param is wrong value %s' % str_param
assert date_param == datetime.date(year=2014, month=9, day=13), \
'date_param is wrong value %s' % date_param
assert time_param == datetime.time(hour=12, minute=0, second=0), \
'time_param is wrong value %s' % time_param
assert timestamp_param == datetime.datetime(1999, 9, 10, 5, 20, 10), \
'timestamp_param is wrong value %s' % timestamp_param
return p
self.t_env.create_temporary_system_function("udf_with_constant_params",
udf(udf_with_constant_params,
result_type=DataTypes.BIGINT()))
self.t_env.create_temporary_system_function(
"udf_with_all_constant_params", udf(lambda i, j: i + j,
result_type=DataTypes.BIGINT()))
table_sink = source_sink_utils.TestAppendSink(['a', 'b'],
[DataTypes.BIGINT(), DataTypes.BIGINT()])
self.t_env.register_table_sink("Results", table_sink)
t = self.t_env.from_elements([(1, 2, 3), (2, 5, 6), (3, 1, 9)], ['a', 'b', 'c'])
self.t_env.register_table("test_table", t)
self.t_env.sql_query("select udf_with_all_constant_params("
"cast (1 as BIGINT),"
"cast (2 as BIGINT)), "
"udf_with_constant_params(a, "
"cast (null as BIGINT),"
"cast (1 as TINYINT),"
"cast (1 as SMALLINT),"
"cast (1 as INT),"
"cast (1 as BIGINT),"
"cast (1.05 as DECIMAL),"
"cast (1.23 as FLOAT),"
"cast (1.98932 as DOUBLE),"
"true,"
"'flink',"
"cast ('2014-09-13' as DATE),"
"cast ('12:00:00' as TIME),"
"cast ('1999-9-10 05:20:10' as TIMESTAMP))"
" from test_table").insert_into("Results")
self.t_env.execute("test")
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[3, 8]", "+I[3, 9]", "+I[3, 10]"])
def test_overwrite_builtin_function(self):
self.t_env.create_temporary_system_function(
"plus", udf(lambda i, j: i + j - 1,
result_type=DataTypes.BIGINT()))
table_sink = source_sink_utils.TestAppendSink(['a'], [DataTypes.BIGINT()])
self.t_env.register_table_sink("Results", table_sink)
t = self.t_env.from_elements([(1, 2, 3), (2, 5, 6), (3, 1, 9)], ['a', 'b', 'c'])
t.select("plus(a, b)").execute_insert("Results").wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[2]", "+I[6]", "+I[3]"])
def test_open(self):
self.t_env.get_config().get_configuration().set_string('python.metric.enabled', 'true')
subtract = udf(Subtract(), result_type=DataTypes.BIGINT())
table_sink = source_sink_utils.TestAppendSink(
['a', 'b'], [DataTypes.BIGINT(), DataTypes.BIGINT()])
self.t_env.register_table_sink("Results", table_sink)
t = self.t_env.from_elements([(1, 2), (2, 5), (3, 4)], ['a', 'b'])
t.select(t.a, subtract(t.b)).execute_insert("Results").wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[1, 1]", "+I[2, 4]", "+I[3, 3]"])
def test_udf_without_arguments(self):
one = udf(lambda: 1, result_type=DataTypes.BIGINT(), deterministic=True)
two = udf(lambda: 2, result_type=DataTypes.BIGINT(), deterministic=False)
table_sink = source_sink_utils.TestAppendSink(['a', 'b'],
[DataTypes.BIGINT(), DataTypes.BIGINT()])
self.t_env.register_table_sink("Results", table_sink)
t = self.t_env.from_elements([(1, 2), (2, 5), (3, 1)], ['a', 'b'])
t.select(one(), two()).execute_insert("Results").wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[1, 2]", "+I[1, 2]", "+I[1, 2]"])
def test_all_data_types_expression(self):
@udf(result_type=DataTypes.BOOLEAN())
def boolean_func(bool_param):
assert isinstance(bool_param, bool), 'bool_param of wrong type %s !' \
% type(bool_param)
return bool_param
@udf(result_type=DataTypes.TINYINT())
def tinyint_func(tinyint_param):
assert isinstance(tinyint_param, int), 'tinyint_param of wrong type %s !' \
% type(tinyint_param)
return tinyint_param
@udf(result_type=DataTypes.SMALLINT())
def smallint_func(smallint_param):
assert isinstance(smallint_param, int), 'smallint_param of wrong type %s !' \
% type(smallint_param)
assert smallint_param == 32767, 'smallint_param of wrong value %s' % smallint_param
return smallint_param
@udf(result_type=DataTypes.INT())
def int_func(int_param):
assert isinstance(int_param, int), 'int_param of wrong type %s !' \
% type(int_param)
assert int_param == -2147483648, 'int_param of wrong value %s' % int_param
return int_param
@udf(result_type=DataTypes.BIGINT())
def bigint_func(bigint_param):
assert isinstance(bigint_param, int), 'bigint_param of wrong type %s !' \
% type(bigint_param)
return bigint_param
@udf(result_type=DataTypes.BIGINT())
def bigint_func_none(bigint_param):
assert bigint_param is None, 'bigint_param %s should be None!' % bigint_param
return bigint_param
@udf(result_type=DataTypes.FLOAT())
def float_func(float_param):
assert isinstance(float_param, float) and float_equal(float_param, 1.23, 1e-6), \
'float_param is wrong value %s !' % float_param
return float_param
@udf(result_type=DataTypes.DOUBLE())
def double_func(double_param):
assert isinstance(double_param, float) and float_equal(double_param, 1.98932, 1e-7), \
'double_param is wrong value %s !' % double_param
return double_param
@udf(result_type=DataTypes.BYTES())
def bytes_func(bytes_param):
assert bytes_param == b'flink', \
'bytes_param is wrong value %s !' % bytes_param
return bytes_param
@udf(result_type=DataTypes.STRING())
def str_func(str_param):
assert str_param == 'pyflink', \
'str_param is wrong value %s !' % str_param
return str_param
@udf(result_type=DataTypes.DATE())
def date_func(date_param):
from datetime import date
assert date_param == date(year=2014, month=9, day=13), \
'date_param is wrong value %s !' % date_param
return date_param
@udf(result_type=DataTypes.TIME())
def time_func(time_param):
from datetime import time
assert time_param == time(hour=12, minute=0, second=0, microsecond=123000), \
'time_param is wrong value %s !' % time_param
return time_param
@udf(result_type=DataTypes.TIMESTAMP(3))
def timestamp_func(timestamp_param):
from datetime import datetime
assert timestamp_param == datetime(2018, 3, 11, 3, 0, 0, 123000), \
'timestamp_param is wrong value %s !' % timestamp_param
return timestamp_param
@udf(result_type=DataTypes.ARRAY(DataTypes.BIGINT()))
def array_func(array_param):
assert array_param == [[1, 2, 3]], \
'array_param is wrong value %s !' % array_param
return array_param[0]
@udf(result_type=DataTypes.MAP(DataTypes.BIGINT(), DataTypes.STRING()))
def map_func(map_param):
assert map_param == {1: 'flink', 2: 'pyflink'}, \
'map_param is wrong value %s !' % map_param
return map_param
@udf(result_type=DataTypes.DECIMAL(38, 18))
def decimal_func(decimal_param):
from decimal import Decimal
assert decimal_param == Decimal('1000000000000000000.050000000000000000'), \
'decimal_param is wrong value %s !' % decimal_param
return decimal_param
@udf(result_type=DataTypes.DECIMAL(38, 18))
def decimal_cut_func(decimal_param):
from decimal import Decimal
assert decimal_param == Decimal('1000000000000000000.059999999999999999'), \
'decimal_param is wrong value %s !' % decimal_param
return decimal_param
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q'],
[DataTypes.BIGINT(), DataTypes.BIGINT(), DataTypes.TINYINT(),
DataTypes.BOOLEAN(), DataTypes.SMALLINT(), DataTypes.INT(),
DataTypes.FLOAT(), DataTypes.DOUBLE(), DataTypes.BYTES(),
DataTypes.STRING(), DataTypes.DATE(), DataTypes.TIME(),
DataTypes.TIMESTAMP(3), DataTypes.ARRAY(DataTypes.BIGINT()),
DataTypes.MAP(DataTypes.BIGINT(), DataTypes.STRING()),
DataTypes.DECIMAL(38, 18), DataTypes.DECIMAL(38, 18)])
self.t_env.register_table_sink("Results", table_sink)
import datetime
import decimal
t = self.t_env.from_elements(
[(1, None, 1, True, 32767, -2147483648, 1.23, 1.98932,
bytearray(b'flink'), 'pyflink', datetime.date(2014, 9, 13),
datetime.time(hour=12, minute=0, second=0, microsecond=123000),
datetime.datetime(2018, 3, 11, 3, 0, 0, 123000), [[1, 2, 3]],
{1: 'flink', 2: 'pyflink'}, decimal.Decimal('1000000000000000000.05'),
decimal.Decimal('1000000000000000000.05999999999999999899999999999'))],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.BIGINT()),
DataTypes.FIELD("b", DataTypes.BIGINT()),
DataTypes.FIELD("c", DataTypes.TINYINT()),
DataTypes.FIELD("d", DataTypes.BOOLEAN()),
DataTypes.FIELD("e", DataTypes.SMALLINT()),
DataTypes.FIELD("f", DataTypes.INT()),
DataTypes.FIELD("g", DataTypes.FLOAT()),
DataTypes.FIELD("h", DataTypes.DOUBLE()),
DataTypes.FIELD("i", DataTypes.BYTES()),
DataTypes.FIELD("j", DataTypes.STRING()),
DataTypes.FIELD("k", DataTypes.DATE()),
DataTypes.FIELD("l", DataTypes.TIME()),
DataTypes.FIELD("m", DataTypes.TIMESTAMP(3)),
DataTypes.FIELD("n", DataTypes.ARRAY(DataTypes.ARRAY(DataTypes.BIGINT()))),
DataTypes.FIELD("o", DataTypes.MAP(DataTypes.BIGINT(), DataTypes.STRING())),
DataTypes.FIELD("p", DataTypes.DECIMAL(38, 18)),
DataTypes.FIELD("q", DataTypes.DECIMAL(38, 18))]))
t.select(
bigint_func(t.a),
bigint_func_none(t.b),
tinyint_func(t.c),
boolean_func(t.d),
smallint_func(t.e),
int_func(t.f),
float_func(t.g),
double_func(t.h),
bytes_func(t.i),
str_func(t.j),
date_func(t.k),
time_func(t.l),
timestamp_func(t.m),
array_func(t.n),
map_func(t.o),
decimal_func(t.p),
decimal_cut_func(t.q)) \
.execute_insert("Results").wait()
actual = source_sink_utils.results()
# Currently the sink result precision of DataTypes.TIME(precision) only supports 0.
self.assert_equals(actual,
["+I[1, null, 1, true, 32767, -2147483648, 1.23, 1.98932, "
"[102, 108, 105, 110, 107], pyflink, 2014-09-13, "
"12:00:00, 2018-03-11 03:00:00.123, [1, 2, 3], "
"{1=flink, 2=pyflink}, 1000000000000000000.050000000000000000, "
"1000000000000000000.059999999999999999]"])
def test_all_data_types(self):
def boolean_func(bool_param):
assert isinstance(bool_param, bool), 'bool_param of wrong type %s !' \
% type(bool_param)
return bool_param
def tinyint_func(tinyint_param):
assert isinstance(tinyint_param, int), 'tinyint_param of wrong type %s !' \
% type(tinyint_param)
return tinyint_param
def smallint_func(smallint_param):
assert isinstance(smallint_param, int), 'smallint_param of wrong type %s !' \
% type(smallint_param)
assert smallint_param == 32767, 'smallint_param of wrong value %s' % smallint_param
return smallint_param
def int_func(int_param):
assert isinstance(int_param, int), 'int_param of wrong type %s !' \
% type(int_param)
assert int_param == -2147483648, 'int_param of wrong value %s' % int_param
return int_param
def bigint_func(bigint_param):
assert isinstance(bigint_param, int), 'bigint_param of wrong type %s !' \
% type(bigint_param)
return bigint_param
def bigint_func_none(bigint_param):
assert bigint_param is None, 'bigint_param %s should be None!' % bigint_param
return bigint_param
def float_func(float_param):
assert isinstance(float_param, float) and float_equal(float_param, 1.23, 1e-6), \
'float_param is wrong value %s !' % float_param
return float_param
def double_func(double_param):
assert isinstance(double_param, float) and float_equal(double_param, 1.98932, 1e-7), \
'double_param is wrong value %s !' % double_param
return double_param
def bytes_func(bytes_param):
assert bytes_param == b'flink', \
'bytes_param is wrong value %s !' % bytes_param
return bytes_param
def str_func(str_param):
assert str_param == 'pyflink', \
'str_param is wrong value %s !' % str_param
return str_param
def date_func(date_param):
from datetime import date
assert date_param == date(year=2014, month=9, day=13), \
'date_param is wrong value %s !' % date_param
return date_param
def time_func(time_param):
from datetime import time
assert time_param == time(hour=12, minute=0, second=0, microsecond=123000), \
'time_param is wrong value %s !' % time_param
return time_param
def timestamp_func(timestamp_param):
from datetime import datetime
assert timestamp_param == datetime(2018, 3, 11, 3, 0, 0, 123000), \
'timestamp_param is wrong value %s !' % timestamp_param
return timestamp_param
def array_func(array_param):
assert array_param == [[1, 2, 3]], \
'array_param is wrong value %s !' % array_param
return array_param[0]
def map_func(map_param):
assert map_param == {1: 'flink', 2: 'pyflink'}, \
'map_param is wrong value %s !' % map_param
return map_param
def decimal_func(decimal_param):
from decimal import Decimal
assert decimal_param == Decimal('1000000000000000000.050000000000000000'), \
'decimal_param is wrong value %s !' % decimal_param
return decimal_param
def decimal_cut_func(decimal_param):
from decimal import Decimal
assert decimal_param == Decimal('1000000000000000000.059999999999999999'), \
'decimal_param is wrong value %s !' % decimal_param
return decimal_param
self.t_env.create_temporary_system_function(
"boolean_func", udf(boolean_func, result_type=DataTypes.BOOLEAN()))
self.t_env.create_temporary_system_function(
"tinyint_func", udf(tinyint_func, result_type=DataTypes.TINYINT()))
self.t_env.create_temporary_system_function(
"smallint_func", udf(smallint_func, result_type=DataTypes.SMALLINT()))
self.t_env.create_temporary_system_function(
"int_func", udf(int_func, result_type=DataTypes.INT()))
self.t_env.create_temporary_system_function(
"bigint_func", udf(bigint_func, result_type=DataTypes.BIGINT()))
self.t_env.create_temporary_system_function(
"bigint_func_none", udf(bigint_func_none, result_type=DataTypes.BIGINT()))
self.t_env.create_temporary_system_function(
"float_func", udf(float_func, result_type=DataTypes.FLOAT()))
self.t_env.create_temporary_system_function(
"double_func", udf(double_func, result_type=DataTypes.DOUBLE()))
self.t_env.create_temporary_system_function(
"bytes_func", udf(bytes_func, result_type=DataTypes.BYTES()))
self.t_env.create_temporary_system_function(
"str_func", udf(str_func, result_type=DataTypes.STRING()))
self.t_env.create_temporary_system_function(
"date_func", udf(date_func, result_type=DataTypes.DATE()))
self.t_env.create_temporary_system_function(
"time_func", udf(time_func, result_type=DataTypes.TIME()))
self.t_env.create_temporary_system_function(
"timestamp_func", udf(timestamp_func, result_type=DataTypes.TIMESTAMP(3)))
self.t_env.create_temporary_system_function(
"array_func", udf(array_func, result_type=DataTypes.ARRAY(DataTypes.BIGINT())))
self.t_env.create_temporary_system_function(
"map_func", udf(map_func,
result_type=DataTypes.MAP(DataTypes.BIGINT(), DataTypes.STRING())))
self.t_env.register_function(
"decimal_func", udf(decimal_func, result_type=DataTypes.DECIMAL(38, 18)))
self.t_env.register_function(
"decimal_cut_func", udf(decimal_cut_func, result_type=DataTypes.DECIMAL(38, 18)))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q'],
[DataTypes.BIGINT(), DataTypes.BIGINT(), DataTypes.TINYINT(),
DataTypes.BOOLEAN(), DataTypes.SMALLINT(), DataTypes.INT(),
DataTypes.FLOAT(), DataTypes.DOUBLE(), DataTypes.BYTES(),
DataTypes.STRING(), DataTypes.DATE(), DataTypes.TIME(),
DataTypes.TIMESTAMP(3), DataTypes.ARRAY(DataTypes.BIGINT()),
DataTypes.MAP(DataTypes.BIGINT(), DataTypes.STRING()),
DataTypes.DECIMAL(38, 18), DataTypes.DECIMAL(38, 18)])
self.t_env.register_table_sink("Results", table_sink)
import datetime
import decimal
t = self.t_env.from_elements(
[(1, None, 1, True, 32767, -2147483648, 1.23, 1.98932,
bytearray(b'flink'), 'pyflink', datetime.date(2014, 9, 13),
datetime.time(hour=12, minute=0, second=0, microsecond=123000),
datetime.datetime(2018, 3, 11, 3, 0, 0, 123000), [[1, 2, 3]],
{1: 'flink', 2: 'pyflink'}, decimal.Decimal('1000000000000000000.05'),
decimal.Decimal('1000000000000000000.05999999999999999899999999999'))],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.BIGINT()),
DataTypes.FIELD("b", DataTypes.BIGINT()),
DataTypes.FIELD("c", DataTypes.TINYINT()),
DataTypes.FIELD("d", DataTypes.BOOLEAN()),
DataTypes.FIELD("e", DataTypes.SMALLINT()),
DataTypes.FIELD("f", DataTypes.INT()),
DataTypes.FIELD("g", DataTypes.FLOAT()),
DataTypes.FIELD("h", DataTypes.DOUBLE()),
DataTypes.FIELD("i", DataTypes.BYTES()),
DataTypes.FIELD("j", DataTypes.STRING()),
DataTypes.FIELD("k", DataTypes.DATE()),
DataTypes.FIELD("l", DataTypes.TIME()),
DataTypes.FIELD("m", DataTypes.TIMESTAMP(3)),
DataTypes.FIELD("n", DataTypes.ARRAY(DataTypes.ARRAY(DataTypes.BIGINT()))),
DataTypes.FIELD("o", DataTypes.MAP(DataTypes.BIGINT(), DataTypes.STRING())),
DataTypes.FIELD("p", DataTypes.DECIMAL(38, 18)),
DataTypes.FIELD("q", DataTypes.DECIMAL(38, 18))]))
t.select("bigint_func(a), bigint_func_none(b),"
"tinyint_func(c), boolean_func(d),"
"smallint_func(e),int_func(f),"
"float_func(g),double_func(h),"
"bytes_func(i),str_func(j),"
"date_func(k),time_func(l),"
"timestamp_func(m),array_func(n),"
"map_func(o),decimal_func(p),"
"decimal_cut_func(q)") \
.execute_insert("Results").wait()
actual = source_sink_utils.results()
# Currently the sink result precision of DataTypes.TIME(precision) only supports 0.
self.assert_equals(actual,
["+I[1, null, 1, true, 32767, -2147483648, 1.23, 1.98932, "
"[102, 108, 105, 110, 107], pyflink, 2014-09-13, "
"12:00:00, 2018-03-11 03:00:00.123, [1, 2, 3], "
"{1=flink, 2=pyflink}, 1000000000000000000.050000000000000000, "
"1000000000000000000.059999999999999999]"])
def test_create_and_drop_function(self):
t_env = self.t_env
t_env.create_temporary_system_function(
"add_one_func", udf(lambda i: i + 1, result_type=DataTypes.BIGINT()))
t_env.create_temporary_function(
"subtract_one_func", udf(SubtractOne(), result_type=DataTypes.BIGINT()))
self.assert_equals(t_env.list_user_defined_functions(),
['add_one_func', 'subtract_one_func'])
t_env.drop_temporary_system_function("add_one_func")
t_env.drop_temporary_function("subtract_one_func")
self.assert_equals(t_env.list_user_defined_functions(), [])
# decide whether two floats are equal
def float_equal(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
class PyFlinkStreamUserDefinedFunctionTests(UserDefinedFunctionTests,
PyFlinkStreamTableTestCase):
pass
class PyFlinkBatchUserDefinedFunctionTests(PyFlinkBatchTableTestCase):
def test_chaining_scalar_function(self):
add_one = udf(lambda i: i + 1, result_type=DataTypes.BIGINT())
subtract_one = udf(SubtractOne(), result_type=DataTypes.BIGINT())
t = self.t_env.from_elements([(1, 2, 1), (2, 5, 2), (3, 1, 3)], ['a', 'b', 'c'])
t = t.select(add(add_one(t.a), subtract_one(t.b)), t.c, expr.lit(1))
result = self.collect(t)
self.assertEqual(result, ["+I[3, 1, 1]", "+I[7, 2, 1]", "+I[4, 3, 1]"])
class PyFlinkBlinkStreamUserDefinedFunctionTests(UserDefinedFunctionTests,
PyFlinkBlinkStreamTableTestCase):
def test_deterministic(self):
add_one = udf(lambda i: i + 1, result_type=DataTypes.BIGINT())
self.assertTrue(add_one._deterministic)
add_one = udf(lambda i: i + 1, result_type=DataTypes.BIGINT(), deterministic=False)
self.assertFalse(add_one._deterministic)
subtract_one = udf(SubtractOne(), result_type=DataTypes.BIGINT())
self.assertTrue(subtract_one._deterministic)
with self.assertRaises(ValueError, msg="Inconsistent deterministic: False and True"):
udf(SubtractOne(), result_type=DataTypes.BIGINT(), deterministic=False)
self.assertTrue(add._deterministic)
@udf(result_type=DataTypes.BIGINT(), deterministic=False)
def non_deterministic_udf(i):
return i
self.assertFalse(non_deterministic_udf._deterministic)
def test_name(self):
add_one = udf(lambda i: i + 1, result_type=DataTypes.BIGINT())
self.assertEqual("<lambda>", add_one._name)
add_one = udf(lambda i: i + 1, result_type=DataTypes.BIGINT(), name="add_one")
self.assertEqual("add_one", add_one._name)
subtract_one = udf(SubtractOne(), result_type=DataTypes.BIGINT())
self.assertEqual("SubtractOne", subtract_one._name)
subtract_one = udf(SubtractOne(), result_type=DataTypes.BIGINT(), name="subtract_one")
self.assertEqual("subtract_one", subtract_one._name)
self.assertEqual("add", add._name)
@udf(result_type=DataTypes.BIGINT(), name="named")
def named_udf(i):
return i
self.assertEqual("named", named_udf._name)
def test_abc(self):
class UdfWithoutEval(ScalarFunction):
def open(self, function_context):
pass
with self.assertRaises(
TypeError,
msg="Can't instantiate abstract class UdfWithoutEval with abstract methods eval"):
UdfWithoutEval()
def test_invalid_udf(self):
class Plus(object):
def eval(self, col):
return col + 1
with self.assertRaises(
TypeError,
msg="Invalid function: not a function or callable (__call__ is not defined)"):
# test non-callable function
self.t_env.create_temporary_system_function(
"non-callable-udf", udf(Plus(), DataTypes.BIGINT(), DataTypes.BIGINT()))
def test_data_types_only_supported_in_blink_planner(self):
timezone = self.t_env.get_config().get_local_timezone()
local_datetime = pytz.timezone(timezone).localize(
datetime.datetime(1970, 1, 1, 0, 0, 0, 123000))
@udf(result_type=DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE(3))
def local_zoned_timestamp_func(local_zoned_timestamp_param):
assert local_zoned_timestamp_param == local_datetime, \
'local_zoned_timestamp_param is wrong value %s !' % local_zoned_timestamp_param
return local_zoned_timestamp_param
table_sink = source_sink_utils.TestAppendSink(
['a'], [DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE(3)])
self.t_env.register_table_sink("Results", table_sink)
t = self.t_env.from_elements(
[(local_datetime,)],
DataTypes.ROW([DataTypes.FIELD("a", DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE(3))]))
t.select(local_zoned_timestamp_func(local_zoned_timestamp_func(t.a))) \
.execute_insert("Results") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[1970-01-01T00:00:00.123Z]"])
class PyFlinkBlinkBatchUserDefinedFunctionTests(UserDefinedFunctionTests,
PyFlinkBlinkBatchTableTestCase):
pass
# test specify the input_types
@udf(input_types=[DataTypes.BIGINT(), DataTypes.BIGINT()], result_type=DataTypes.BIGINT())
def add(i, j):
return i + j
class SubtractOne(ScalarFunction):
def eval(self, i):
return i - 1
class Subtract(ScalarFunction, unittest.TestCase):
def open(self, function_context):
self.subtracted_value = 1
mg = function_context.get_metric_group()
self.counter = mg.add_group("key", "value").counter("my_counter")
self.counter_sum = 0
def eval(self, i):
# counter
self.counter.inc(i)
self.counter_sum += i
return i - self.subtracted_value
class CallablePlus(object):
def __call__(self, col):
return col + 1
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
aljoscha/flink
|
flink-python/pyflink/table/tests/test_udf.py
|
Python
|
apache-2.0
| 36,215
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs the trainer for the calculator smoketest."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# import google3
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow_fold.loom.calculator_example import calculator_pb2
from tensorflow_fold.loom.calculator_example import helpers
from tensorflow_fold.loom.calculator_example import model
tf.flags.DEFINE_string(
'train_data_path', '',
'TF Record file containing the training dataset of expressions.')
tf.flags.DEFINE_integer(
'batch_size', 1000, 'How many samples to read per batch.')
tf.flags.DEFINE_integer(
'embedding_length', 5,
'How long to make the expression embedding vectors.')
tf.flags.DEFINE_integer(
'max_steps', 1000000,
'The maximum number of batches to run the trainer for.')
# Replication flags:
tf.flags.DEFINE_string('logdir', '/tmp/calculator_smoketest',
'Directory in which to write event logs.')
tf.flags.DEFINE_string('master', '',
'Tensorflow master to use.')
tf.flags.DEFINE_integer('task', 0,
'Task ID of the replica running the training.')
tf.flags.DEFINE_integer('ps_tasks', 0,
'Number of PS tasks in the job.')
FLAGS = tf.flags.FLAGS
def iterate_over_tf_record_protos(table_path, message_type):
while True:
for v in tf.python_io.tf_record_iterator(table_path):
message = message_type()
message.ParseFromString(v)
yield message
def main(unused_argv):
train_iterator = iterate_over_tf_record_protos(
FLAGS.train_data_path, calculator_pb2.CalculatorExpression)
with tf.Graph().as_default():
with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)):
# Build the graph.
global_step = tf.Variable(0, name='global_step', trainable=False)
classifier = model.CalculatorSignClassifier(FLAGS.embedding_length)
variables = classifier.variables()
loss = classifier.loss()
accuracy = classifier.accuracy()
optr = tf.train.GradientDescentOptimizer(0.01)
trainer = optr.minimize(loss, global_step=global_step, var_list=variables)
# Set up the supervisor.
supervisor = tf.train.Supervisor(
logdir=FLAGS.logdir,
is_chief=(FLAGS.task == 0),
save_summaries_secs=10,
save_model_secs=30)
sess = supervisor.PrepareSession(FLAGS.master)
# Run the trainer.
for _ in xrange(FLAGS.max_steps):
batch = [next(train_iterator) for _ in xrange(FLAGS.batch_size)]
_, step, batch_loss, batch_accuracy = sess.run(
[trainer, global_step, loss, accuracy],
feed_dict=classifier.build_feed_dict(batch))
print('step=%d: batch loss=%f accuracy=%f' % (
step, batch_loss, batch_accuracy))
helpers.EmitValues(supervisor, sess, step,
{'Batch Loss': batch_loss,
'Batch Accuracy': batch_accuracy})
if __name__ == '__main__':
tf.app.run()
|
pklfz/fold
|
tensorflow_fold/loom/calculator_example/train.py
|
Python
|
apache-2.0
| 3,708
|
# Copyright (C) 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import fixtures
import shade
import testtools
import yaml
from nodepool import tests
from nodepool.provider_manager import shade_inner_exceptions
class TestShadeIntegration(tests.IntegrationTestCase):
def _cleanup_cloud_config(self):
os.remove(self.clouds_path)
def _use_cloud_config(self, config):
config_dir = fixtures.TempDir()
self.useFixture(config_dir)
self.clouds_path = os.path.join(config_dir.path, 'clouds.yaml')
self.useFixture(fixtures.MonkeyPatch(
'os_client_config.config.CONFIG_FILES',
[self.clouds_path]))
with open(self.clouds_path, 'w') as h:
yaml.safe_dump(config, h)
self.addCleanup(self._cleanup_cloud_config)
def test_nodepool_provider_config(self):
configfile = self.setup_config('integration.yaml')
pool = self.useNodepool(configfile, watermark_sleep=1)
pool.updateConfig()
provider_manager = pool.config.provider_managers['real-provider']
auth_data = {'username': 'real',
'project_id': 'real',
'password': 'real',
'auth_url': 'real'}
self.assertEqual(provider_manager._client.auth, auth_data)
self.assertEqual(provider_manager._client.region_name, 'real-region')
def test_nodepool_osc_config(self):
configfile = self.setup_config('integration_osc.yaml')
auth_data = {'username': 'os_real',
'project_name': 'os_real',
'password': 'os_real',
'auth_url': 'os_real'}
osc_config = {'clouds': {'real-cloud': {'auth': auth_data}}}
self._use_cloud_config(osc_config)
pool = self.useNodepool(configfile, watermark_sleep=1)
pool.updateConfig()
provider_manager = pool.config.provider_managers['real-provider']
self.assertEqual(provider_manager._client.auth, auth_data)
def test_nodepool_osc_config_reload(self):
configfile = self.setup_config('integration_osc.yaml')
auth_data = {'username': 'os_real',
'project_name': 'os_real',
'password': 'os_real',
'auth_url': 'os_real'}
osc_config = {'clouds': {'real-cloud': {'auth': auth_data}}}
self._use_cloud_config(osc_config)
pool = self.useNodepool(configfile, watermark_sleep=1)
pool.updateConfig()
provider_manager = pool.config.provider_managers['real-provider']
self.assertEqual(provider_manager._client.auth, auth_data)
# update the config
auth_data['password'] = 'os_new_real'
os.remove(self.clouds_path)
with open(self.clouds_path, 'w') as h:
yaml.safe_dump(osc_config, h)
pool.updateConfig()
provider_manager = pool.config.provider_managers['real-provider']
self.assertEqual(provider_manager._client.auth, auth_data)
def test_exceptions(self):
log = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
with testtools.ExpectedException(shade.OpenStackCloudException):
with shade_inner_exceptions():
try:
raise Exception("inner test")
except:
raise shade.OpenStackCloudException("outer test")
self.assertTrue('Exception("inner test")' in log.output)
|
Tesora/tesora-nodepool
|
nodepool/tests/test_shade_integration.py
|
Python
|
apache-2.0
| 4,040
|
import os
import sys
import Image
if(len(sys.argv) == 1):
print "Please provide Image folder path as an argument."
print "Usage: python thumbnail_factory.py <FolderPath>"
sys.exit()
rootdir = sys.argv[1]
for root, subFolders, files in os.walk(rootdir):
for filename in files:
if "_thumbnail" in filename:
continue
filepath = os.path.join(root,filename)
thumbnail_path = str(os.path.splitext(filepath)[0]) + "_thumbnail" + str(os.path.splitext(filepath)[1])
out = file(thumbnail_path, "w")
try:
img = Image.open(filepath)
print "Generating Thumbnail for " + filepath
img.thumbnail( (100,100) )
img.save(out, 'JPEG')
except Exception as e:
print "Error for " + filepath + " : " + str(e)
finally:
out.close()
|
Bhamni/utilities
|
deprecated/scripts/thumbnail_factory.py
|
Python
|
apache-2.0
| 865
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_config import fixture as conf_fixture
from tempest import config
class ConfigFixture(conf_fixture.Config):
def __init__(self):
cfg.CONF([], default_config_files=[])
config.register_opts()
super(ConfigFixture, self).__init__()
def setUp(self):
super(ConfigFixture, self).setUp()
self.conf.set_default('build_interval', 10, group='compute')
self.conf.set_default('build_timeout', 10, group='compute')
self.conf.set_default('disable_ssl_certificate_validation', True,
group='identity')
self.conf.set_default('uri', 'http://fake_uri.com/auth',
group='identity')
self.conf.set_default('uri_v3', 'http://fake_uri_v3.com/auth',
group='identity')
self.conf.set_default('neutron', True, group='service_available')
self.conf.set_default('heat', True, group='service_available')
if not os.path.exists(str(os.environ.get('OS_TEST_LOCK_PATH'))):
os.mkdir(str(os.environ.get('OS_TEST_LOCK_PATH')))
lockutils.set_defaults(
lock_path=str(os.environ.get('OS_TEST_LOCK_PATH')),
)
self.conf.set_default('auth_version', 'v2', group='identity')
for config_option in ['username', 'password', 'project_name']:
# Identity group items
self.conf.set_default('admin_' + config_option,
'fake_' + config_option,
group='auth')
class FakePrivate(config.TempestConfigPrivate):
def __init__(self, parse_conf=True, config_path=None):
self._set_attrs()
self.lock_path = cfg.CONF.oslo_concurrency.lock_path
fake_service1_group = cfg.OptGroup(name='fake-service1', title='Fake service1')
FakeService1Group = [
cfg.StrOpt('catalog_type', default='fake-service1'),
cfg.StrOpt('endpoint_type', default='faketype'),
cfg.StrOpt('region', default='fake_region'),
cfg.IntOpt('build_timeout', default=99),
cfg.IntOpt('build_interval', default=9)]
fake_service2_group = cfg.OptGroup(name='fake-service2', title='Fake service2')
FakeService2Group = [
cfg.StrOpt('catalog_type', default='fake-service2'),
cfg.StrOpt('endpoint_type', default='faketype')]
class ServiceClientsConfigFixture(conf_fixture.Config):
def __init__(self):
cfg.CONF([], default_config_files=[])
config._opts.append((fake_service1_group, FakeService1Group))
config._opts.append((fake_service2_group, FakeService2Group))
config.register_opts()
super(ServiceClientsConfigFixture, self).__init__()
def setUp(self):
super(ServiceClientsConfigFixture, self).setUp()
# Debug default values
self.conf.set_default('trace_requests', 'fake_module', 'debug')
# Identity default values
self.conf.set_default('disable_ssl_certificate_validation', True,
group='identity')
self.conf.set_default('ca_certificates_file', '/fake/certificates',
group='identity')
self.conf.set_default('region', 'fake_region', 'identity')
# Identity endpoints
self.conf.set_default('v3_endpoint_type', 'fake_v3_uri', 'identity')
self.conf.set_default('v2_public_endpoint_type', 'fake_v2_public_uri',
'identity')
self.conf.set_default('v2_admin_endpoint_type', 'fake_v2_admin_uri',
'identity')
# Compute default values
self.conf.set_default('build_interval', 88, group='compute')
self.conf.set_default('build_timeout', 8, group='compute')
class ServiceClientsFakePrivate(config.TempestConfigPrivate):
def __init__(self, parse_conf=True, config_path=None):
self._set_attrs()
self.fake_service1 = cfg.CONF['fake-service1']
self.fake_service2 = cfg.CONF['fake-service2']
print('Services registered')
self.lock_path = cfg.CONF.oslo_concurrency.lock_path
|
sebrandon1/tempest
|
tempest/tests/fake_config.py
|
Python
|
apache-2.0
| 4,758
|
#!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example deactivates all active line items custom fields.
To determine which custom fields exist, run get_all_custom_fields.py.
Tags: CustomFieldService.getCustomFieldsByStatement
Tags: CustomFieldService.performCustomFieldAction
"""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
from adspygoogle.dfp import DfpUtils
def main(client):
# Initialize appropriate service.
custom_field_service = client.GetService(
'CustomFieldService', version='v201204')
# Create statement to select only active custom fields that apply to
# line items.
values = [
{
'key': 'entityType',
'value': {
'xsi_type': 'TextValue',
'value': 'LINE_ITEM'
}
}, {
'key': 'isActive',
'value': {
'xsi_type': 'BooleanValue',
'value': 'true'
}
}
]
query = 'WHERE entityType = :entityType and isActive = :isActive'
# Get custom fields by statement.
custom_fields = DfpUtils.GetAllEntitiesByStatementWithService(
custom_field_service, query=query, bind_vars=values)
# Display results.
for custom_field in custom_fields:
print ('Custom field with ID \'%s\' and name \'%s\' will be deactivated.'
% (custom_field['id'], custom_field['name']))
print
print 'Number of custom fields to be deactivated: %s' % len(custom_fields)
if custom_fields:
# Perform action.
result = custom_field_service.PerformCustomFieldAction(
{'type': 'DeactivateCustomFields'},
{'query': query, 'values': values})[0]
# Display results.
if result and int(result['numChanges']) > 0:
print 'Number of custom fields deactivated: %s' % result['numChanges']
else:
print 'No custom fields were deactivated.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = DfpClient(path=os.path.join('..', '..', '..', '..', '..'))
main(dfp_client)
|
donspaulding/adspygoogle
|
examples/adspygoogle/dfp/v201204/custom_field_service/deactivate_all_line_item_custom_fields.py
|
Python
|
apache-2.0
| 2,861
|
from keystone.manage2 import base
from keystone.manage2 import common
from keystone.backends import models
@common.arg('--id',
required=False,
help='a unique identifier used in URLs')
@common.arg('--name',
required=True,
help='a unique username used for authentication')
@common.arg('--email',
required=False,
help='a unique email address')
@common.arg('--password',
required=True,
help='used for authentication')
@common.arg('--tenant-id',
required=False,
help='default tenant ID')
@common.arg('--disabled',
action='store_true',
required=False,
default=False,
help="create the user in a disabled state (users are enabled by "
"default)")
class Command(base.BaseBackendCommand):
"""Creates a new user, enabled by default.
Optionally, specify a default tenant for the user.
The user is enabled by default, but can be disabled upon creation as
well.
"""
# pylint: disable=E1101,R0913
def create_user(self, name, password, id=None, email=None, tenant_id=None,
enabled=True):
self.get_tenant(tenant_id)
obj = models.User()
obj.id = id
obj.name = name
obj.password = password
obj.email = email
obj.enabled = enabled
obj.tenant_id = tenant_id
return self.user_manager.create(obj)
def run(self, args):
"""Process argparse args, and print results to stdout"""
user = self.create_user(id=args.id, name=args.name,
password=args.password, email=args.email,
tenant_id=args.tenant_id, enabled=(not args.disabled))
print user.id
|
HugoKuo/keystone-essex3
|
keystone/manage2/commands/create_user.py
|
Python
|
apache-2.0
| 1,662
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import sys
from google.api_core.protobuf_helpers import get_messages
from google.cloud.videointelligence_v1beta2.proto import video_intelligence_pb2
from google.longrunning import operations_pb2
from google.protobuf import any_pb2
from google.protobuf import duration_pb2
from google.protobuf import timestamp_pb2
from google.rpc import status_pb2
_shared_modules = [operations_pb2, any_pb2, duration_pb2, timestamp_pb2, status_pb2]
_local_modules = [video_intelligence_pb2]
names = []
for module in _shared_modules: # pragma: NO COVER
for name, message in get_messages(module).items():
setattr(sys.modules[__name__], name, message)
names.append(name)
for module in _local_modules:
for name, message in get_messages(module).items():
message.__module__ = "google.cloud.videointelligence_v1beta2.types"
setattr(sys.modules[__name__], name, message)
names.append(name)
__all__ = tuple(sorted(names))
|
tseaver/google-cloud-python
|
videointelligence/google/cloud/videointelligence_v1beta2/types.py
|
Python
|
apache-2.0
| 1,597
|
import gym
from gym.spaces import Box, Discrete
import numpy as np
class SimpleCorridor(gym.Env):
"""Example of a custom env in which you have to walk down a corridor.
You can configure the length of the corridor via the env config."""
def __init__(self, config):
self.end_pos = config["corridor_length"]
self.cur_pos = 0
self.action_space = Discrete(2)
self.observation_space = Box(0.0, 999.0, shape=(1, ), dtype=np.float32)
def set_corridor_length(self, length):
self.end_pos = length
print("Updated corridor length to {}".format(length))
def reset(self):
self.cur_pos = 0.0
return [self.cur_pos]
def step(self, action):
assert action in [0, 1], action
if action == 0 and self.cur_pos > 0:
self.cur_pos -= 1.0
elif action == 1:
self.cur_pos += 1.0
done = self.cur_pos >= self.end_pos
return [self.cur_pos], 1 if done else 0, done, {}
|
richardliaw/ray
|
rllib/examples/env/simple_corridor.py
|
Python
|
apache-2.0
| 995
|
# Copyright 2012 Twitter
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
from django.db import models
from django.utils.timezone import now
class Log(models.Model):
uuid = models.CharField(max_length=62, primary_key=True, unique=True, default=lambda: str(uuid.uuid1()))
timestamp = models.FloatField()
action = models.CharField(max_length=20)
data = models.TextField()
udid = models.CharField(max_length=32)
api_version = models.CharField(max_length=6)
app_version = models.IntegerField()
bundle_version = models.CharField(max_length=20)
app_key = models.CharField(max_length=36)
platform = models.CharField(max_length=12, default='iOS')
class Meta(object):
unique_together = (('timestamp', 'udid'),)
def __unicode__(self):
return str((
self.timestamp,
self.action,
self.data,
self.udid,
self.api_version,
self.app_version,
self.bundle_version,
self.app_key,
))
class ViewHour(models.Model):
app_id = models.IntegerField()
timestamp = models.DateTimeField()
views = models.IntegerField()
platform = models.CharField(max_length=12, default='iOS')
class Meta(object):
unique_together = (('app_id', 'timestamp', 'platform'),)
def __unicode__(self):
return str((self.app_id, self.timestamp, self.views, self.platform))
class ViewSlugHour(models.Model):
app_id = models.IntegerField()
timestamp = models.DateTimeField()
slug = models.TextField()
views = models.IntegerField()
platform = models.CharField(max_length=12, default='iOS')
class Meta(object):
unique_together = (('app_id', 'timestamp', 'slug', 'platform'),)
def __unicode__(self):
return str((self.app_id, self.timestamp, self.slug, self.views,
self.platform))
class ViewDay(models.Model):
app_id = models.IntegerField()
timestamp = models.DateTimeField()
views = models.IntegerField()
platform = models.CharField(max_length=12, default='iOS')
class Meta(object):
unique_together = (('app_id', 'timestamp', 'platform'),)
def __unicode__(self):
return str((self.app_id, self.timestamp, self.views, self.platform))
class ViewSlugDay(models.Model):
app_id = models.IntegerField()
timestamp = models.DateTimeField()
slug = models.TextField()
views = models.IntegerField()
platform = models.CharField(max_length=12, default='iOS')
class Meta(object):
unique_together = (('app_id', 'timestamp', 'slug', 'platform'),)
def __unicode__(self):
return str((self.app_id, self.timestamp, self.slug, self.views,
self.platform))
class ViewMonth(models.Model):
app_id = models.IntegerField()
timestamp = models.DateTimeField()
views = models.IntegerField()
platform = models.CharField(max_length=12, default='iOS')
class Meta(object):
unique_together = (('app_id', 'timestamp', 'platform'),)
def __unicode__(self):
return str((self.app_id, self.timestamp, self.views, self.platform))
class ViewSlugMonth(models.Model):
app_id = models.IntegerField()
timestamp = models.DateTimeField()
slug = models.TextField()
views = models.IntegerField()
platform = models.CharField(max_length=12, default='iOS')
class Meta(object):
unique_together = (('app_id', 'timestamp', 'slug', 'platform'),)
def __unicode__(self):
return str((self.app_id, self.timestamp, self.slug, self.views,
self.platform))
class ViewYear(models.Model):
app_id = models.IntegerField()
timestamp = models.DateTimeField()
views = models.IntegerField()
platform = models.CharField(max_length=12, default='iOS')
class Meta(object):
unique_together = (('app_id', 'timestamp', 'platform'),)
def __unicode__(self):
return str((self.app_id, self.timestamp, self.views, self.platform))
class ViewSlugYear(models.Model):
app_id = models.IntegerField()
timestamp = models.DateTimeField()
slug = models.TextField()
views = models.IntegerField()
platform = models.CharField(max_length=12, default='iOS')
class Meta(object):
unique_together = (('app_id', 'timestamp', 'slug', 'platform'),)
def __unicode__(self):
return str((self.app_id, self.timestamp, self.slug, self.views,
self.platform))
class UniqueHour(models.Model):
app_id = models.IntegerField()
timestamp = models.DateTimeField()
udid = models.CharField(max_length=32)
new = models.BooleanField(default=True)
platform = models.CharField(max_length=12, default='iOS')
class Meta(object):
unique_together = (('app_id', 'timestamp', 'udid', 'platform'),)
def __unicode__(self):
return str((self.app_id, self.timestamp, self.udid, self.new,
self.platform))
class UniqueDay(models.Model):
app_id = models.IntegerField()
timestamp = models.DateTimeField()
udid = models.CharField(max_length=32)
new = models.BooleanField(default=True)
platform = models.CharField(max_length=12, default='iOS')
class Meta(object):
unique_together = (('app_id', 'timestamp', 'udid', 'platform'),)
def __unicode__(self):
return str((self.app_id, self.timestamp, self.udid, self.new,
self.platform))
class UniqueMonth(models.Model):
app_id = models.IntegerField()
timestamp = models.DateTimeField()
udid = models.CharField(max_length=32)
new = models.BooleanField(default=True)
platform = models.CharField(max_length=12, default='iOS')
class Meta(object):
unique_together = (('app_id', 'timestamp', 'udid', 'platform'),)
def __unicode__(self):
return str((self.app_id, self.timestamp, self.udid, self.new,
self.platform))
class UniqueYear(models.Model):
app_id = models.IntegerField()
timestamp = models.DateTimeField()
udid = models.CharField(max_length=32)
new = models.BooleanField(default=True)
platform = models.CharField(max_length=12, default='iOS')
class Meta(object):
unique_together = (('app_id', 'timestamp', 'udid', 'platform'),)
def __unicode__(self):
return str((self.app_id, self.timestamp, self.udid, self.new,
self.platform))
class UniqueAllTime(models.Model):
app_id = models.IntegerField()
timestamp = models.DateTimeField(default=now)
udid = models.CharField(max_length=32)
platform = models.CharField(max_length=12, default='iOS')
class Meta(object):
unique_together = (('app_id', 'udid', 'platform'),)
def __unicode__(self):
return str((self.app_id, self.udid, self.platform))
|
clutchio/clutch
|
stats/models.py
|
Python
|
apache-2.0
| 7,305
|
#!/usr/bin/env python3
# Copyright 2020 Anapaya Systems
import http.server
import time
import threading
from plumbum import cmd
from acceptance.common import base
from acceptance.common import docker
from acceptance.common import scion
class Test(base.TestBase):
"""
Constructs a simple Hidden Paths topology with one core, four leaf ASes and
two hidden path groups.
AS 1-ff00:0:1 is core.
AS 1-ff00:0:2, 1-ff00:0:3, 1-ff00:0:4, 1-ff00:0:5 are leaves.
We use the shortnames AS1, AS2, etc. for the ASes above.
The two hidden paths groups are owned by the registry AS, and indexed
according to who the writer AS is. The groups are as follows:
Group ff00:0:2-3 contains the following roles:
Registry: AS2
Writer: AS3
Client: AS5
Group ff00:0:2-4 contains the following roles
Registry: AS2
Writer: AS4
Client: AS5
We test for connectivity between all pairs of ASes in the same group.
Testing is done using showpaths with JSON output.
Additionally, we test that the ASes in different groups cannot talk
to each other. Thus, the tests are:
Expect connectivity:
AS2 <-> AS3, AS2 <-> AS5, AS3 <-> AS5 (Group ff00:0:2-3)
AS2 <-> AS4, AS2 <-> AS5, AS4 <-> AS5 (Group ff00:0:2-4)
Expect no connectivity:
AS3 <-> AS4 (Group ff00:0:2-3 to group ff00:0:2-4)
"""
def main(self):
if not self.nested_command:
try:
self.setup()
time.sleep(20)
self._run()
finally:
self.teardown()
def setup(self):
self.setup_prepare()
http_server_port = 9099
as_numbers = ["2", "3", "4", "5"]
# HTTP configuration server runs on 0.0.0.0 and needs to be reachable from
# every daemon and control service. There is one host IP on every AS bridge.
# We use this IP for the configuration download URLs.
server_ips = {
"2": "172.20.0.49",
"3": "172.20.0.57",
"4": "172.20.0.65",
"5": "172.20.0.73",
}
# XXX(lukedirtwalker): The ports below are the dynamic QUIC server
# ports. Thanks to the docker setup they are setup consistently so we
# can use them. Optimally we would define a static server port inside
# the CS and use that one instead.
control_addresses = {
"2": "172.20.0.51:32768",
"3": "172.20.0.59:32768",
"4": "172.20.0.67:32768",
"5": "172.20.0.75:32768",
}
# Each AS participating in hidden paths has their own hidden paths configuration file.
hp_configs = {
"2": "hp_groups_as2_as5.yml",
"3": "hp_groups_as3.yml",
"4": "hp_groups_as4.yml",
"5": "hp_groups_as2_as5.yml",
}
# Edit all the configuration files of daemons and control services with
# the computed configuration URL
for as_number in as_numbers:
hp_config_url = "http://%s:%d/acceptance/hidden_paths/testdata/%s" % (
server_ips[as_number], http_server_port, hp_configs[as_number])
daemon_path = self.test_state.artifacts / "gen" / ("ASff00_0_%s" % as_number) \
/ "sd.toml"
scion.update_toml({"sd.hidden_path_groups": hp_config_url}, [daemon_path])
control_id = "cs1-ff00_0_%s-1" % as_number
control_path = self.test_state.artifacts / "gen" / ("ASff00_0_%s" % as_number) \
/ ("%s.toml" % control_id)
scion.update_toml({"path.hidden_paths_cfg": hp_config_url}, [control_path])
# For simplicity, expose the services in all hidden paths ASes,
# even though some don't need the registration service.
as_dir_path = self.test_state.artifacts / "gen" / ("ASff00_0_%s" % as_number)
topology_update = {
"hidden_segment_lookup_service.%s.addr" % control_id:
control_addresses[as_number],
"hidden_segment_registration_service.%s.addr" % control_id:
control_addresses[as_number],
}
topology_file = as_dir_path / "topology.json"
scion.update_json(topology_update, [topology_file])
server = http.server.HTTPServer(
("0.0.0.0", http_server_port), http.server.SimpleHTTPRequestHandler)
server_thread = threading.Thread(target=configuration_server, args=[server])
server_thread.start()
self.setup_start()
time.sleep(4) # Give applications time to download configurations
self._testers = {
"2": "tester_1-ff00_0_2",
"3": "tester_1-ff00_0_3",
"4": "tester_1-ff00_0_4",
"5": "tester_1-ff00_0_5",
}
self._ases = {
"2": "1-ff00:0:2",
"3": "1-ff00:0:3",
"4": "1-ff00:0:4",
"5": "1-ff00:0:5",
}
server.shutdown()
def _run(self):
# Group 3
self._showpaths_bidirectional("2", "3", 0)
self._showpaths_bidirectional("2", "5", 0)
self._showpaths_bidirectional("3", "5", 0)
# Group 4
self._showpaths_bidirectional("2", "4", 0)
self._showpaths_bidirectional("2", "5", 0)
self._showpaths_bidirectional("4", "5", 0)
# Group 3 X 4
self._showpaths_bidirectional("3", "4", 1)
def _showpaths_bidirectional(self, source: str, destination: str, retcode: int):
self._showpaths_run(source, destination, retcode)
self._showpaths_run(destination, source, retcode)
def _showpaths_run(self, source_as: str, destination_as: str, retcode: int):
print(cmd.docker("exec", "-t", self._testers[source_as], "scion",
"sp", self._ases[destination_as],
"--timeout", "2s",
retcode=retcode))
def configuration_server(server):
print("HTTP configuration server starting on %s:%d." % server.server_address)
server.serve_forever()
print("HTTP configuration server closed.")
if __name__ == "__main__":
base.register_commands(Test)
Test.test_state = base.TestState(scion.SCIONDocker(), docker.Compose())
Test.run()
|
netsec-ethz/scion
|
acceptance/hidden_paths/test.py
|
Python
|
apache-2.0
| 6,367
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Utils related to keras metrics.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import weakref
from enum import Enum
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend
from tensorflow.python.keras.utils import losses_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.keras.utils.generic_utils import to_list
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.util import tf_decorator
NEG_INF = -1e10
class Reduction(Enum):
"""Types of metrics reduction.
Contains the following values:
* `SUM`: Scalar sum of weighted values.
* `SUM_OVER_BATCH_SIZE`: Scalar sum of weighted values divided by
number of elements.
* `WEIGHTED_MEAN`: Scalar sum of weighted values divided by sum of weights.
"""
SUM = 'sum'
SUM_OVER_BATCH_SIZE = 'sum_over_batch_size'
WEIGHTED_MEAN = 'weighted_mean'
def update_state_wrapper(update_state_fn):
"""Decorator to wrap metric `update_state()` with `add_update()`.
Args:
update_state_fn: function that accumulates metric statistics.
Returns:
Decorated function that wraps `update_state_fn()` with `add_update()`.
"""
def decorated(metric_obj, *args, **kwargs):
"""Decorated function with `add_update()`."""
strategy = distribution_strategy_context.get_strategy()
# TODO(b/142574744): Remove this check if a better solution is found for
# declaring keras Metric outside of TPUStrategy and then updating it per
# replica.
for weight in metric_obj.weights:
if (backend.is_tpu_strategy(strategy) and
not strategy.extended.variable_created_in_scope(weight)
and not distribution_strategy_context.in_cross_replica_context()):
raise ValueError(
'Trying to run metric.update_state in replica context when '
'the metric was not created in TPUStrategy scope. '
'Make sure the keras Metric is created in TPUstrategy scope. ')
with tf_utils.graph_context_for_symbolic_tensors(*args, **kwargs):
update_op = update_state_fn(*args, **kwargs)
if update_op is not None: # update_op will be None in eager execution.
metric_obj.add_update(update_op)
return update_op
return tf_decorator.make_decorator(update_state_fn, decorated)
def result_wrapper(result_fn):
"""Decorator to wrap metric `result()` function in `merge_call()`.
Result computation is an idempotent operation that simply calculates the
metric value using the state variables.
If metric state variables are distributed across replicas/devices and
`result()` is requested from the context of one device - This function wraps
`result()` in a distribution strategy `merge_call()`. With this,
the metric state variables will be aggregated across devices.
Args:
result_fn: function that computes the metric result.
Returns:
Decorated function that wraps `result_fn()` in distribution strategy
`merge_call()`.
"""
def decorated(metric_obj, *args):
"""Decorated function with merge_call."""
has_strategy = distribution_strategy_context.has_strategy()
replica_context = distribution_strategy_context.get_replica_context()
if not has_strategy or replica_context is None:
result_t = array_ops.identity(result_fn(*args))
else:
# TODO(psv): Test distribution of metrics using different distribution
# strategies.
# Creating a wrapper for merge_fn. merge_call invokes the given merge_fn
# with distribution object as the first parameter. We create a wrapper
# here so that the result function need not have that parameter.
def merge_fn_wrapper(distribution, merge_fn, *args):
# We will get `PerReplica` merge function. Taking the first one as all
# are identical copies of the function that we had passed below.
result = distribution.experimental_local_results(merge_fn)[0](*args)
# Wrapping result in identity so that control dependency between
# update_op from `update_state` and result works in case result returns
# a tensor.
return array_ops.identity(result)
# Wrapping result in merge_call. merge_call is used when we want to leave
# replica mode and compute a value in cross replica mode.
result_t = replica_context.merge_call(
merge_fn_wrapper, args=(result_fn,) + args)
# We are saving the result op here to be used in train/test execution
# functions. This basically gives the result op that was generated with a
# control dep to the updates for these workflows.
metric_obj._call_result = result_t
return result_t
return tf_decorator.make_decorator(result_fn, decorated)
def weakmethod(method):
"""Creates a weak reference to the bound method."""
cls = method.im_class
func = method.im_func
instance_ref = weakref.ref(method.im_self)
@functools.wraps(method)
def inner(*args, **kwargs):
return func.__get__(instance_ref(), cls)(*args, **kwargs)
del method
return inner
def assert_thresholds_range(thresholds):
if thresholds is not None:
invalid_thresholds = [t for t in thresholds if t is None or t < 0 or t > 1]
if invalid_thresholds:
raise ValueError(
'Threshold values must be in [0, 1]. Invalid values: {}'.format(
invalid_thresholds))
def parse_init_thresholds(thresholds, default_threshold=0.5):
if thresholds is not None:
assert_thresholds_range(to_list(thresholds))
thresholds = to_list(default_threshold if thresholds is None else thresholds)
return thresholds
class ConfusionMatrix(Enum):
TRUE_POSITIVES = 'tp'
FALSE_POSITIVES = 'fp'
TRUE_NEGATIVES = 'tn'
FALSE_NEGATIVES = 'fn'
class AUCCurve(Enum):
"""Type of AUC Curve (ROC or PR)."""
ROC = 'ROC'
PR = 'PR'
@staticmethod
def from_str(key):
if key in ('pr', 'PR'):
return AUCCurve.PR
elif key in ('roc', 'ROC'):
return AUCCurve.ROC
else:
raise ValueError('Invalid AUC curve value "%s".' % key)
class AUCSummationMethod(Enum):
"""Type of AUC summation method.
https://en.wikipedia.org/wiki/Riemann_sum)
Contains the following values:
* 'interpolation': Applies mid-point summation scheme for `ROC` curve. For
`PR` curve, interpolates (true/false) positives but not the ratio that is
precision (see Davis & Goadrich 2006 for details).
* 'minoring': Applies left summation for increasing intervals and right
summation for decreasing intervals.
* 'majoring': Applies right summation for increasing intervals and left
summation for decreasing intervals.
"""
INTERPOLATION = 'interpolation'
MAJORING = 'majoring'
MINORING = 'minoring'
@staticmethod
def from_str(key):
if key in ('interpolation', 'Interpolation'):
return AUCSummationMethod.INTERPOLATION
elif key in ('majoring', 'Majoring'):
return AUCSummationMethod.MAJORING
elif key in ('minoring', 'Minoring'):
return AUCSummationMethod.MINORING
else:
raise ValueError('Invalid AUC summation method value "%s".' % key)
def update_confusion_matrix_variables(variables_to_update,
y_true,
y_pred,
thresholds,
top_k=None,
class_id=None,
sample_weight=None,
multi_label=False,
label_weights=None):
"""Returns op to update the given confusion matrix variables.
For every pair of values in y_true and y_pred:
true_positive: y_true == True and y_pred > thresholds
false_negatives: y_true == True and y_pred <= thresholds
true_negatives: y_true == False and y_pred <= thresholds
false_positive: y_true == False and y_pred > thresholds
The results will be weighted and added together. When multiple thresholds are
provided, we will repeat the same for every threshold.
For estimation of these metrics over a stream of data, the function creates an
`update_op` operation that updates the given variables.
If `sample_weight` is `None`, weights default to 1.
Use weights of 0 to mask values.
Args:
variables_to_update: Dictionary with 'tp', 'fn', 'tn', 'fp' as valid keys
and corresponding variables to update as values.
y_true: A `Tensor` whose shape matches `y_pred`. Will be cast to `bool`.
y_pred: A floating point `Tensor` of arbitrary shape and whose values are in
the range `[0, 1]`.
thresholds: A float value, float tensor, python list, or tuple of float
thresholds in `[0, 1]`, or NEG_INF (used when top_k is set).
top_k: Optional int, indicates that the positive labels should be limited to
the top k predictions.
class_id: Optional int, limits the prediction and labels to the class
specified by this argument.
sample_weight: Optional `Tensor` whose rank is either 0, or the same rank as
`y_true`, and must be broadcastable to `y_true` (i.e., all dimensions must
be either `1`, or the same as the corresponding `y_true` dimension).
multi_label: Optional boolean indicating whether multidimensional
prediction/labels should be treated as multilabel responses, or flattened
into a single label. When True, the valus of `variables_to_update` must
have a second dimension equal to the number of labels in y_true and
y_pred, and those tensors must not be RaggedTensors.
label_weights: (optional) tensor of non-negative weights for multilabel
data. The weights are applied when calculating TP, FP, FN, and TN without
explicit multilabel handling (i.e. when the data is to be flattened).
Returns:
Update op.
Raises:
ValueError: If `y_pred` and `y_true` have mismatched shapes, or if
`sample_weight` is not `None` and its shape doesn't match `y_pred`, or if
`variables_to_update` contains invalid keys.
"""
if multi_label and label_weights is not None:
raise ValueError('`label_weights` for multilabel data should be handled '
'outside of `update_confusion_matrix_variables` when '
'`multi_label` is True.')
if variables_to_update is None:
return
if not any(
key for key in variables_to_update if key in list(ConfusionMatrix)):
raise ValueError(
'Please provide at least one valid confusion matrix '
'variable to update. Valid variable key options are: "{}". '
'Received: "{}"'.format(
list(ConfusionMatrix), variables_to_update.keys()))
variable_dtype = list(variables_to_update.values())[0].dtype
y_true = math_ops.cast(y_true, dtype=variable_dtype)
y_pred = math_ops.cast(y_pred, dtype=variable_dtype)
thresholds = ops.convert_to_tensor_v2_with_dispatch(
thresholds, dtype=variable_dtype)
num_thresholds = thresholds.shape[0]
if multi_label:
one_thresh = math_ops.equal(
math_ops.cast(1, dtype=dtypes.int32),
array_ops.rank(thresholds),
name='one_set_of_thresholds_cond')
else:
[y_pred,
y_true], _ = ragged_assert_compatible_and_get_flat_values([y_pred, y_true],
sample_weight)
one_thresh = math_ops.cast(True, dtype=dtypes.bool)
invalid_keys = [
key for key in variables_to_update if key not in list(ConfusionMatrix)
]
if invalid_keys:
raise ValueError(
'Invalid keys: {}. Valid variable key options are: "{}"'.format(
invalid_keys, list(ConfusionMatrix)))
with ops.control_dependencies([
check_ops.assert_greater_equal(
y_pred,
math_ops.cast(0.0, dtype=y_pred.dtype),
message='predictions must be >= 0'),
check_ops.assert_less_equal(
y_pred,
math_ops.cast(1.0, dtype=y_pred.dtype),
message='predictions must be <= 1')
]):
if sample_weight is None:
y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(
y_pred, y_true)
else:
sample_weight = math_ops.cast(sample_weight, dtype=variable_dtype)
y_pred, y_true, sample_weight = (
losses_utils.squeeze_or_expand_dimensions(
y_pred, y_true, sample_weight=sample_weight))
y_pred.shape.assert_is_compatible_with(y_true.shape)
if top_k is not None:
y_pred = _filter_top_k(y_pred, top_k)
if class_id is not None:
y_true = y_true[..., class_id]
y_pred = y_pred[..., class_id]
pred_shape = array_ops.shape(y_pred)
num_predictions = pred_shape[0]
if y_pred.shape.ndims == 1:
num_labels = 1
else:
num_labels = gen_math_ops.Prod(input=pred_shape[1:], axis=0)
thresh_label_tile = control_flow_ops.cond(
one_thresh, lambda: num_labels,
lambda: math_ops.cast(1, dtype=dtypes.int32))
# Reshape predictions and labels, adding a dim for thresholding.
if multi_label:
predictions_extra_dim = array_ops.expand_dims(y_pred, 0)
labels_extra_dim = array_ops.expand_dims(
math_ops.cast(y_true, dtype=dtypes.bool), 0)
else:
# Flatten predictions and labels when not multilabel.
predictions_extra_dim = array_ops.reshape(y_pred, [1, -1])
labels_extra_dim = array_ops.reshape(
math_ops.cast(y_true, dtype=dtypes.bool), [1, -1])
# Tile the thresholds for every prediction.
if multi_label:
thresh_pretile_shape = [num_thresholds, 1, -1]
thresh_tiles = [1, num_predictions, thresh_label_tile]
data_tiles = [num_thresholds, 1, 1]
else:
thresh_pretile_shape = [num_thresholds, -1]
thresh_tiles = [1, num_predictions * num_labels]
data_tiles = [num_thresholds, 1]
thresh_tiled = array_ops.tile(
array_ops.reshape(thresholds, thresh_pretile_shape),
array_ops.stack(thresh_tiles))
# Tile the predictions for every threshold.
preds_tiled = array_ops.tile(predictions_extra_dim, data_tiles)
# Compare predictions and threshold.
pred_is_pos = math_ops.greater(preds_tiled, thresh_tiled)
# Tile labels by number of thresholds
label_is_pos = array_ops.tile(labels_extra_dim, data_tiles)
if sample_weight is not None:
sample_weight = weights_broadcast_ops.broadcast_weights(
math_ops.cast(sample_weight, dtype=variable_dtype), y_pred)
weights_tiled = array_ops.tile(
array_ops.reshape(sample_weight, thresh_tiles), data_tiles)
else:
weights_tiled = None
if label_weights is not None and not multi_label:
label_weights = array_ops.expand_dims(label_weights, 0)
label_weights = weights_broadcast_ops.broadcast_weights(label_weights,
y_pred)
label_weights_tiled = array_ops.tile(
array_ops.reshape(label_weights, thresh_tiles), data_tiles)
if weights_tiled is None:
weights_tiled = label_weights_tiled
else:
weights_tiled = math_ops.multiply(weights_tiled, label_weights_tiled)
update_ops = []
def weighted_assign_add(label, pred, weights, var):
label_and_pred = math_ops.cast(
math_ops.logical_and(label, pred), dtype=var.dtype)
if weights is not None:
label_and_pred *= math_ops.cast(weights, dtype=var.dtype)
return var.assign_add(math_ops.reduce_sum(label_and_pred, 1))
loop_vars = {
ConfusionMatrix.TRUE_POSITIVES: (label_is_pos, pred_is_pos),
}
update_tn = ConfusionMatrix.TRUE_NEGATIVES in variables_to_update
update_fp = ConfusionMatrix.FALSE_POSITIVES in variables_to_update
update_fn = ConfusionMatrix.FALSE_NEGATIVES in variables_to_update
if update_fn or update_tn:
pred_is_neg = math_ops.logical_not(pred_is_pos)
loop_vars[ConfusionMatrix.FALSE_NEGATIVES] = (label_is_pos, pred_is_neg)
if update_fp or update_tn:
label_is_neg = math_ops.logical_not(label_is_pos)
loop_vars[ConfusionMatrix.FALSE_POSITIVES] = (label_is_neg, pred_is_pos)
if update_tn:
loop_vars[ConfusionMatrix.TRUE_NEGATIVES] = (label_is_neg, pred_is_neg)
for matrix_cond, (label, pred) in loop_vars.items():
if matrix_cond in variables_to_update:
update_ops.append(
weighted_assign_add(label, pred, weights_tiled,
variables_to_update[matrix_cond]))
return control_flow_ops.group(update_ops)
def _filter_top_k(x, k):
"""Filters top-k values in the last dim of x and set the rest to NEG_INF.
Used for computing top-k prediction values in dense labels (which has the same
shape as predictions) for recall and precision top-k metrics.
Args:
x: tensor with any dimensions.
k: the number of values to keep.
Returns:
tensor with same shape and dtype as x.
"""
_, top_k_idx = nn_ops.top_k(x, k, sorted=False)
top_k_mask = math_ops.reduce_sum(
array_ops.one_hot(top_k_idx, array_ops.shape(x)[-1], axis=-1), axis=-2)
return x * top_k_mask + NEG_INF * (1 - top_k_mask)
def ragged_assert_compatible_and_get_flat_values(values, mask=None):
"""If ragged, it checks the compatibility and then returns the flat_values.
Note: If two tensors are dense, it does not check their compatibility.
Note: Although two ragged tensors with different ragged ranks could have
identical overall rank and dimension sizes and hence be compatible,
we do not support those cases.
Args:
values: A list of potentially ragged tensor of the same ragged_rank.
mask: A potentially ragged tensor of the same ragged_rank as elements in
Values.
Returns:
A tuple in which the first element is the list of tensors and the second
is the mask tensor. ([Values], mask). Mask and the element in Values
are equal to the flat_values of the input arguments (if they were ragged).
"""
if isinstance(values, list):
is_all_ragged = \
all(isinstance(rt, ragged_tensor.RaggedTensor) for rt in values)
is_any_ragged = \
any(isinstance(rt, ragged_tensor.RaggedTensor) for rt in values)
else:
is_all_ragged = isinstance(values, ragged_tensor.RaggedTensor)
is_any_ragged = is_all_ragged
if (is_all_ragged and
((mask is None) or isinstance(mask, ragged_tensor.RaggedTensor))):
to_be_stripped = False
if not isinstance(values, list):
values = [values]
to_be_stripped = True
# NOTE: we leave the flat_values compatibility to
# tf.TensorShape `assert_is_compatible_with`
# check if both dynamic dimensions are equal and then use the flat_values.
nested_row_split_list = [rt.nested_row_splits for rt in values]
assertion_list = _assert_splits_match(nested_row_split_list)
# if both are ragged sample_weights also should be ragged with same dims.
if isinstance(mask, ragged_tensor.RaggedTensor):
assertion_list_for_mask = _assert_splits_match(
[nested_row_split_list[0], mask.nested_row_splits])
with ops.control_dependencies(assertion_list_for_mask):
mask = array_ops.expand_dims(mask.flat_values, -1)
# values has at least 1 element.
flat_values = []
for value in values:
with ops.control_dependencies(assertion_list):
flat_values.append(array_ops.expand_dims(value.flat_values, -1))
values = flat_values[0] if to_be_stripped else flat_values
elif is_any_ragged:
raise TypeError('One of the inputs does not have acceptable types.')
# values are empty or value are not ragged and mask is ragged.
elif isinstance(mask, ragged_tensor.RaggedTensor):
raise TypeError('Ragged mask is not allowed with non-ragged inputs.')
return values, mask
def _assert_splits_match(nested_splits_lists):
"""Checks that the given splits lists are identical.
Performs static tests to ensure that the given splits lists are identical,
and returns a list of control dependency op tensors that check that they are
fully identical.
Args:
nested_splits_lists: A list of nested_splits_lists, where each split_list is
a list of `splits` tensors from a `RaggedTensor`, ordered from outermost
ragged dimension to innermost ragged dimension.
Returns:
A list of control dependency op tensors.
Raises:
ValueError: If the splits are not identical.
"""
error_msg = 'Inputs must have identical ragged splits'
for splits_list in nested_splits_lists:
if len(splits_list) != len(nested_splits_lists[0]):
raise ValueError(error_msg)
return [
check_ops.assert_equal(s1, s2, message=error_msg) # pylint: disable=g-complex-comprehension
for splits_list in nested_splits_lists[1:]
for (s1, s2) in zip(nested_splits_lists[0], splits_list)
]
|
annarev/tensorflow
|
tensorflow/python/keras/utils/metrics_utils.py
|
Python
|
apache-2.0
| 21,928
|
import json
import warnings
import h5py
import numpy as np
from .. import options
from ..database import db
from .braindata import BrainData, VolumeData, VertexData, _hash
default_cmap = options.config.get("basic", "default_cmap")
def normalize(data):
if isinstance(data, tuple):
if len(data) == 3:
if data[0].dtype == np.uint8:
return VolumeRGB(data[0][...,0], data[0][...,1], data[0][...,2], *data[1:])
return Volume(*data)
elif len(data) == 2:
return Vertex(*data)
else:
raise TypeError("Invalid input for Dataview")
elif isinstance(data, Dataview):
return data
else:
raise TypeError("Invalid input for Dataview")
def _from_hdf_data(h5, name, xfmname=None, **kwargs):
"""Decodes a __hash named node from an HDF file into the
constituent Vertex or Volume object"""
dnode = h5.get("/data/%s"%name)
if dnode is None:
dnode = h5.get(name)
subj = dnode.attrs['subject']
#support old style xfmname saving as attribute
if xfmname is None and 'xfmname' in dnode.attrs:
xfmname = dnode.attrs['xfmname']
mask = None
if "mask" in dnode.attrs:
if dnode.attrs['mask'].startswith("__"):
mask = h5['/subjects/%s/transforms/%s/masks/%s'%(dnode.attrs['subject'], xfmname, dnode.attrs['mask'])].value
else:
mask = dnode.attrs['mask']
#support old style RGB volumes
if dnode.dtype == np.uint8 and dnode.shape[-1] in (3, 4):
alpha = None
if dnode.shape[-1] == 4:
alpha = dnode[..., 3]
if xfmname is None:
return VertexRGB(dnode[...,0], dnode[...,1], dnode[...,2], subj,
alpha=alpha, **kwargs)
return VolumeRGB(dnode[...,0], dnode[...,1], dnode[...,2], subj, xfmname,
alpha=alpha, mask=mask, **kwargs)
if xfmname is None:
return Vertex(dnode, subj, **kwargs)
return Volume(dnode, subj, xfmname, mask=mask, **kwargs)
def _from_hdf_view(h5, data, xfmname=None, vmin=None, vmax=None, **kwargs):
try:
basestring
strcls = (unicode, str)
except NameError:
strcls = str
if isinstance(data, strcls):
return _from_hdf_data(h5, data, xfmname=xfmname, vmin=vmin, vmax=vmax, **kwargs)
if len(data) == 2:
dim1 = _from_hdf_data(h5, data[0], xfmname=xfmname[0])
dim2 = _from_hdf_data(h5, data[1], xfmname=xfmname[1])
cls = Vertex2D if isinstance(dim1, Vertex) else Volume2D
return cls(dim1, dim2, vmin=vmin[0], vmin2=vmin[1],
vmax=vmax[0], vmax2=vmax[1], **kwargs)
elif len(data) == 4:
red, green, blue = [_from_hdf_data(h5, d, xfmname=xfmname) for d in data[:3]]
alpha = None
if data[3] is not None:
alpha = _from_hdf_data(h5, data[3], xfmname=xfmname)
cls = VertexRGB if isinstance(red, Vertex) else VolumeRGB
return cls(red, green, blue, alpha=alpha, **kwargs)
else:
raise ValueError("Invalid Dataview specification")
class Dataview(object):
def __init__(self, cmap=None, vmin=None, vmax=None, description="", state=None,
cvmin=None,cvmax=None,cvthr=False,**kwargs):
"""
MOAR HELP PLEASE. or maybe not. Is this even visible in inherited classes?
cvmin : float,optional
Minimum value for curvature colormap. Defaults to config file value.
cvmax : float, optional
Maximum value for background curvature colormap. Defaults to config file value.
cvthr : bool,optional
Apply threshold to background curvature
"""
if self.__class__ == Dataview:
raise TypeError('Cannot directly instantiate Dataview objects')
self.cmap = cmap if cmap is not None else default_cmap
self.vmin = vmin
self.vmax = vmax
self.state = state
self.attrs = kwargs
if 'priority' not in self.attrs:
self.attrs['priority'] = 1
self.description = description
def copy(self, *args, **kwargs):
kwargs.update(self.attrs)
return self.__class__(*args,
cmap=self.cmap,
vmin=self.vmin,
vmax=self.vmax,
description=self.description,
state=self.state,
**kwargs)
@property
def priority(self):
return self.attrs['priority']
@priority.setter
def priority(self, value):
self.attrs['priority'] = value
def to_json(self, simple=False):
if simple:
return dict()
sdict = dict(
state=self.state,
attrs=self.attrs.copy(),
desc=self.description)
try:
sdict.update(dict(
cmap=[self.cmap],
vmin=[self.vmin if self.vmin is not None else np.percentile(np.nan_to_num(self.data), 1)],
vmax=[self.vmax if self.vmax is not None else np.percentile(np.nan_to_num(self.data), 99)]
))
except AttributeError:
pass
return sdict
@staticmethod
def from_hdf(node):
data = json.loads(node[0])
desc = node[1]
try:
cmap = json.loads(node[2])
except:
cmap = node[2]
vmin = json.loads(node[3])
vmax = json.loads(node[4])
state = json.loads(node[5])
attrs = json.loads(node[6])
try:
xfmname = json.loads(node[7])
except ValueError:
xfmname = None
if not isinstance(vmin, list):
vmin = [vmin]
if not isinstance(vmax, list):
vmax = [vmax]
if not isinstance(cmap, list):
cmap = [cmap]
if len(data) == 1:
xfm = None if xfmname is None else xfmname[0]
return _from_hdf_view(node.file, data[0], xfmname=xfm, cmap=cmap[0], description=desc,
vmin=vmin[0], vmax=vmax[0], state=state, **attrs)
else:
views = [_from_hdf_view(node.file, d, xfmname=x) for d, x in zip(data, xfname)]
raise NotImplementedError
def _write_hdf(self, h5, name="data", data=None, xfmname=None):
views = h5.require_group("/views")
view = views.require_dataset(name, (8,), h5py.special_dtype(vlen=str))
view[0] = json.dumps(data)
view[1] = self.description
try:
view[2] = json.dumps([self.cmap])
view[3] = json.dumps([self.vmin])
view[4] = json.dumps([self.vmax])
except AttributeError:
#For VolumeRGB/Vertex, there is no cmap/vmin/vmax
view[2] = None
view[3:5] = "null"
view[5] = json.dumps(self.state)
view[6] = json.dumps(self.attrs)
view[7] = json.dumps(xfmname)
return view
@property
def raw(self):
from matplotlib import colors, cm, pyplot as plt
import glob, os
# Get colormap from matplotlib or pycortex colormaps
## -- redundant code, here and in cortex/quicklflat.py -- ##
if isinstance(self.cmap,(str,unicode)):
if not self.cmap in cm.__dict__:
# unknown colormap, test whether it's in pycortex colormaps
cmapdir = options.config.get('webgl', 'colormaps')
colormaps = glob.glob(os.path.join(cmapdir, "*.png"))
colormaps = dict(((os.path.split(c)[1][:-4],c) for c in colormaps))
if not self.cmap in colormaps:
raise Exception('Unkown color map!')
I = plt.imread(colormaps[self.cmap])
cmap = colors.ListedColormap(np.squeeze(I))
# Register colormap while we're at it
cm.register_cmap(self.cmap,cmap)
else:
cmap = cm.get_cmap(self.cmap)
elif isinstance(self.cmap,colors.Colormap):
cmap = self.cmap
# Normalize colors according to vmin, vmax
norm = colors.Normalize(self.vmin, self.vmax)
cmapper = cm.ScalarMappable(norm=norm, cmap=cmap)
color_data = cmapper.to_rgba(self.data.flatten()).reshape(self.data.shape+(4,))
# rollaxis puts the last color dimension first, to allow output of separate channels: r,g,b,a = dataset.raw
return np.rollaxis(color_data, -1)
class Multiview(Dataview):
def __init__(self, views, description=""):
for view in views:
if not isinstance(view, Dataview):
raise TypeError("Must be a View object!")
raise NotImplementedError
self.views = views
def uniques(self, collapse=False):
for view in self.views:
for sv in view.uniques(collapse=collapse):
yield sv
class Volume(VolumeData, Dataview):
def __init__(self, data, subject, xfmname, mask=None,
cmap=None, vmin=None, vmax=None, description="", **kwargs):
super(Volume, self).__init__(data, subject, xfmname, mask=mask,
cmap=cmap, vmin=vmin, vmax=vmax, description=description, **kwargs)
def _write_hdf(self, h5, name="data"):
datanode = VolumeData._write_hdf(self, h5)
viewnode = Dataview._write_hdf(self, h5, name=name,
data=[self.name], xfmname=[self.xfmname])
return viewnode
@property
def raw(self):
r, g, b, a = super(Volume, self).raw
return VolumeRGB(r, g, b, self.subject, self.xfmname, a,
description=self.description, state=self.state, **self.attrs)
class Vertex(VertexData, Dataview):
def __init__(self, data, subject, cmap=None, vmin=None, vmax=None, description="", **kwargs):
super(Vertex, self).__init__(data, subject, cmap=cmap, vmin=vmin, vmax=vmax,
description=description, **kwargs)
def _write_hdf(self, h5, name="data"):
datanode = VertexData._write_hdf(self, h5)
viewnode = Dataview._write_hdf(self, h5, name=name, data=[self.name])
return viewnode
@property
def raw(self):
r, g, b, a = super(Vertex, self).raw
return VertexRGB(r, g, b, self.subject, a,
description=self.description, state=self.state, **self.attrs)
from .viewRGB import VolumeRGB, VertexRGB
from .view2D import Volume2D, Vertex2D
|
CVML/pycortex
|
cortex/dataset/views.py
|
Python
|
bsd-2-clause
| 10,357
|
# Simple test of probabilistic modelling
# Calculates time to break through assuming plug flow
from simplehydro import *
from monte_carlo import *
from conversion import *
import matplotlib.pyplot as plt
x = 10 # Distance, x (m)
n = 0.3 # Effective porosity, n (-)
K = 1e-7 # Hydraulic conductivity, K (m/s)
H = 1 # Head where x=0 (m)
h = 0 # Head where x=x (m)
i = gradient2(H, h, x)
# Deterministic
v = velocity2(K, i, n)
Breakthrough = x / v
print('Deterministic breakthrough in ' + str(round(secsToDays(Breakthrough), 2)) + ' days')
# Probabilistic
I = 100001 # Number of iterations
pK = Normal(1e-7, 1e-8, I) # Input distrubution for K
results = []
for iteration in range(I):
results.append(velocity2(pK[iteration], i, n))
results = [round(secsToDays(x/v), 2) for v in results] # Calculate breakthroughs
#print(results)
# the histogram of the data
n, bins, patches = plt.hist(results, 50, normed=1, facecolor='green', alpha=0.75)
# add a 'best fit' line
#l = plt.plot(bins, results, 'r--', linewidth=1)
plt.xlabel('Breakthrough (days)')
plt.ylabel('Probability density')
plt.axis([0, max(results), 0, 200/I])
plt.grid(True)
plt.show()
|
tachylyte/HydroGeoPy
|
EXAMPLES/exampleMonteCarloUni.py
|
Python
|
bsd-2-clause
| 1,242
|
import os
from . import CoreSettings
class Base17Settings(CoreSettings):
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
@property
def DATABASES(self):
return {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(self.BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
|
rapilabs/django-classy-settings
|
cbs/base/django17.py
|
Python
|
bsd-2-clause
| 1,472
|
#
# This file is part of pyasn1-modules software.
#
# Created by Russ Housley
# Copyright (c) 2019, Vigil Security, LLC
# License: http://snmplabs.com/pyasn1/license.html
#
import sys
import unittest
from pyasn1.codec.der.decoder import decode as der_decoder
from pyasn1.codec.der.encoder import encode as der_encoder
from pyasn1.type import univ
from pyasn1_modules import pem
from pyasn1_modules import rfc4010
from pyasn1_modules import rfc5280
from pyasn1_modules import rfc5652
from pyasn1_modules import rfc5751
class EnvelopedDataTestCase(unittest.TestCase):
env_data_pem_text = """\
MIIFewYJKoZIhvcNAQcDoIIFbDCCBWgCAQIxUqJQAgEEMCMEEKBBI2KxDUPS5TCo
RCEDJo4YDzIwMTkwOTEyMTIwMDAwWjAMBgoqgxqMmkQHAQEBBBipFE2DxCLAx2Og
E53Jt21V8kAoscU7K3wwggUNBgkqhkiG9w0BBwEwHAYIKoMajJpEAQQEEEJPR1VT
SVZfQk9HVVNJViGAggTgc8exehjJD/gtEOIrg6tK5Emaa4PJ7l8f+EtyDD/ffQay
XVAGz2MXUIQMEzmSLrnsr9NEyXvxGpvcsi7mV8tDxZU0YuyhA/C/HMh7EaBKG1hj
C7xNw+IRIUxrbRJakMQbzMWWYJupC5zRu4/Ge9i+JVOGgES2E0L5LZSZ53wmnHA0
ols1PHl3F3Z2QM3CkewqA3NP1waXQ0XXb0Oyl6Gq12B7ksm7euPWA3KctEjfYBD6
nBT6wQd57rAMeFTk5aceWd2Sb/0xMpjfCg6GzX8pAWVEU8LqTvVmlSWdx3f3fAtU
giZ+gx7jNY8A6duln8zvMQn3mtPDCa50GzSrAx8JreHRWSDr3Dp8EfJzUgfy7dWl
I9xs5bh1TMkEMk+AHWQ5sBXTZkDgVAS5m1mIbXe7dzuxKsfGxjWu1eyy9J77mtOG
o9aAOqYfxv/I8YQcgWHTeQcIO39Rmt2QsI7trRaEJ1jgj2E1To5gRCbIQWzQuyoS
6affgu/9dwPXCAt0+0XrnO5vhaKX/RWm7ve8hYsiT0vI0hdBJ3rDRkdS9VL6NlnX
OuohAqEq8b3s2koBigdri052hceAElTHD+4A4qRDiMLlFLlQqoJlpBwCtEPZsIQS
y62K7J/Towxxab5FoFjUTC5f79xPQPoKxYdgUB5AeAu5HgdWTn49Uqg4v/spTPSN
RTmDMVVyZ9qhzJfkDpH3TKCAE5t59w4gSPe/7l+MeSml9O+L9HTd9Vng3LBbIds3
uQ4cfLyyQmly81qpJjR1+Rvwo46hOm0kf2sIFi0WULmP/XzLw6b1SbiHf/jqFg7T
FTyLMkPMPMmc7/kpLmYbKyTB4ineasTUL+bDrwu+uSzFAjTcI+1sz4Wo4p7RVywB
DKSI5Ocbd3iMt4XWJWtz0KBX6nBzlV+BBTCwaGMAU4IpPBYOuvcl7TJWx/ODBjbO
4zm4T/66w5IG3tKpsVMs4Jtrh8mtVXCLTBmKDzyjBVN2X8ALGXarItRgLa7k80lJ
jqTHwKCjiAMmT/eh67KzwmqBq5+8rJuXkax0NoXcDu6xkCMNHUQBYdnskaJqC2pu
8hIsPTOrh7ieYSEuchFvu7lI0E+p7ypW65CMiy+Y/Rm5OWeHzjKkU5AbPtx/Me2v
pQRCgaPwciZunx2Ivi1+WYUBU1pGNDO7Xz7a8UHbDURkh7b+40uz2d7YQjKgrZBv
6YwLAmw1LTE4bT9PM9n7LROnX8u6ksei8yiw8gZeVu+plWHbF+0O9siKAgxZlBna
0XFgPpdzjMDTS/sfTIYXWlFj7camhsmTDRjo5G2B212evaKmKgh5ALLSFSk86ZN5
KvQvcfsp81jvJCBmDStrsUgSMzy0Og2quHOd61hRTVlYzwvJvfMzHGKdIWwYUbHZ
OKo/KLEk3E36U9PkPoZGEL2ZeCH4F9Wh3mgg0knBfEmlPnGexmBby6NXGK7VW3l6
xcJlpdMaXKNVMfl2YK8k/34Hyft06KaYLEJsxAqk1pmLEmGhdZC1OAqovVB/1agS
zpMMaB9OWWqNsTjDc7tkDt8BZ72NsAbCI9XmsX81W+NqPb6Ju1dtI09bn113LX/Z
bOSdVicQcXSpl0FnTZaHgHJdQLcU28O7yFFOblqrvcMKpctdTA1TwG9LXEFttGrl
pgjZF3edo0Cez10epK+S
"""
def setUp(self):
self.asn1Spec = rfc5652.ContentInfo()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.env_data_pem_text)
asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
self.assertEqual(rfc5652.id_envelopedData, asn1Object['contentType'])
ed, rest = der_decoder(asn1Object['content'], rfc5652.EnvelopedData())
self.assertFalse(rest)
self.assertTrue(ed.prettyPrint())
self.assertEqual(asn1Object['content'], der_encoder(ed))
kwa = ed['recipientInfos'][0]['kekri']['keyEncryptionAlgorithm']
self.assertEqual(rfc4010.id_npki_app_cmsSeed_wrap, kwa['algorithm'])
cea = ed['encryptedContentInfo']['contentEncryptionAlgorithm']
self.assertEqual(rfc4010.id_seedCBC, cea['algorithm'])
param, rest = der_decoder(
cea['parameters'], asn1Spec=rfc4010.SeedCBCParameter())
self.assertFalse(rest)
self.assertTrue(param.prettyPrint())
self.assertEqual(cea['parameters'], der_encoder(param))
iv = univ.OctetString(hexValue='424f47555349565f424f475553495621')
self.assertEqual(iv, param)
def testOpenTypes(self):
substrate = pem.readBase64fromText(self.env_data_pem_text)
asn1Object, rest = der_decoder(
substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
self.assertTrue(asn1Object['contentType'] in rfc5652.cmsContentTypesMap.keys())
kekri = asn1Object['content']['recipientInfos'][0]['kekri']
kwa = kekri['keyEncryptionAlgorithm']
self.assertEqual(rfc4010.id_npki_app_cmsSeed_wrap, kwa['algorithm'])
eci = asn1Object['content']['encryptedContentInfo']
cea = eci['contentEncryptionAlgorithm']
self.assertEqual(rfc4010.id_seedCBC, cea['algorithm'])
iv = univ.OctetString(hexValue='424f47555349565f424f475553495621')
self.assertEqual(iv, cea['parameters'])
class SMIMECapabilitiesTestCase(unittest.TestCase):
smime_capabilities_pem_text = "MB4wDAYIKoMajJpEAQQFADAOBgoqgxqMmkQHAQEBBQA="
def setUp(self):
self.asn1Spec = rfc5751.SMIMECapabilities()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.smime_capabilities_pem_text)
asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
alg_oid_list = [ ]
for cap in asn1Object:
self.assertTrue(cap['parameters'].hasValue())
self.assertEqual(cap['parameters'], der_encoder(rfc4010.SeedSMimeCapability("")))
alg_oid_list.append(cap['capabilityID'])
self.assertIn(rfc4010.id_seedCBC, alg_oid_list)
self.assertIn(rfc4010.id_npki_app_cmsSeed_wrap, alg_oid_list)
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not result.wasSuccessful())
|
etingof/pyasn1-modules
|
tests/test_rfc4010.py
|
Python
|
bsd-2-clause
| 5,879
|
import sys
import time
import re
import socket
import six
from collections import deque
from select import select
from fabric.state import env, output, win32
from fabric.auth import get_password, set_password
import fabric.network
from fabric.network import ssh, normalize
from fabric.exceptions import CommandTimeout
if win32:
import msvcrt
def _endswith(char_list, substring):
tail = list(char_list)[-1 * len(substring):]
substring = list(substring)
return tail == substring
def _has_newline(bytelist):
return '\r' in bytelist or '\n' in bytelist
def output_loop(*args, **kwargs):
OutputLooper(*args, **kwargs).loop()
class OutputLooper(object):
def __init__(self, chan, attr, stream, capture, timeout):
self.chan = chan
self.stream = stream
self.capture = capture
self.timeout = timeout
self.read_func = getattr(chan, attr)
self.prefix = "[%s] %s: " % (
env.host_string,
"out" if attr == 'recv' else "err"
)
self.printing = getattr(output, 'stdout' if (attr == 'recv') else 'stderr')
self.linewise = (env.linewise or env.parallel)
self.reprompt = False
self.read_size = 4096
self.write_buffer = deque(maxlen=len(self.prefix))
def _flush(self, text):
self.stream.write(text)
# Actually only flush if not in linewise mode.
# When linewise is set (e.g. in parallel mode) flushing makes
# doubling-up of line prefixes, and other mixed output, more likely.
if not env.linewise:
self.stream.flush()
self.write_buffer.extend(text)
def loop(self):
"""
Loop, reading from <chan>.<attr>(), writing to <stream> and buffering to <capture>.
Will raise `~fabric.exceptions.CommandTimeout` if network timeouts
continue to be seen past the defined ``self.timeout`` threshold.
(Timeouts before then are considered part of normal short-timeout fast
network reading; see Fabric issue #733 for background.)
"""
# Initialize loop variables
initial_prefix_printed = False
seen_cr = False
line = []
py3_buffer = b''
# Allow prefix to be turned off.
if not env.output_prefix:
self.prefix = ""
start = time.time()
while True:
# Handle actual read
try:
bytelist = self.read_func(self.read_size)
except socket.timeout:
elapsed = time.time() - start
if self.timeout is not None and elapsed > self.timeout:
raise CommandTimeout(timeout=self.timeout)
continue
if six.PY3 is True and isinstance(bytelist, six.binary_type):
# Note that we have to decode this right away, even if an error
# is thrown only later in the code, because e.g. '' != b'' (see
# first if below).
py3_buffer += bytelist
try:
bytelist = py3_buffer.decode('utf-8')
except UnicodeDecodeError:
# Go back and grab more bytes so we hopefully get a
# complete and valid Python string.
# Might hang here if remote server sends garbage but unsure
# if it's worth switching to processing byte by byte ...
continue
else:
# Reset the buffer as we succeeded
py3_buffer = b''
# Empty byte == EOS
if bytelist == '':
# If linewise, ensure we flush any leftovers in the buffer.
if self.linewise and line:
self._flush(self.prefix)
self._flush("".join(line))
break
# A None capture variable implies that we're in open_shell()
if self.capture is None:
# Just print directly -- no prefixes, no capturing, nada
# And since we know we're using a pty in this mode, just go
# straight to stdout.
self._flush(bytelist)
# Otherwise, we're in run/sudo and need to handle capturing and
# prompts.
else:
# Print to user
if self.printing:
printable_bytes = bytelist
# Small state machine to eat \n after \r
if printable_bytes[-1] == "\r":
seen_cr = True
if printable_bytes[0] == "\n" and seen_cr:
printable_bytes = printable_bytes[1:]
seen_cr = False
while _has_newline(printable_bytes) and printable_bytes != "":
# at most 1 split !
cr = re.search("(\r\n|\r|\n)", printable_bytes)
if cr is None:
break
end_of_line = printable_bytes[:cr.start(0)]
printable_bytes = printable_bytes[cr.end(0):]
if not initial_prefix_printed:
self._flush(self.prefix)
if _has_newline(end_of_line):
end_of_line = ''
if self.linewise:
self._flush("".join(line) + end_of_line + "\n")
line = []
else:
self._flush(end_of_line + "\n")
initial_prefix_printed = False
if self.linewise:
line += [printable_bytes]
else:
if not initial_prefix_printed:
self._flush(self.prefix)
initial_prefix_printed = True
self._flush(printable_bytes)
# Now we have handled printing, handle interactivity
read_lines = re.split(r"(\r|\n|\r\n)", bytelist)
for fragment in read_lines:
# Store in capture buffer
self.capture += fragment
# Handle prompts
expected, response = self._get_prompt_response()
if expected:
del list(self.capture)[-1 * len(expected):]
self.chan.sendall(str(response) + '\n')
else:
prompt = _endswith(self.capture, env.sudo_prompt)
try_again = (_endswith(self.capture, env.again_prompt + '\n')
or _endswith(self.capture, env.again_prompt + '\r\n'))
if prompt:
self.prompt()
elif try_again:
self.try_again()
# Print trailing new line if the last thing we printed was our line
# prefix.
if self.prefix and "".join(self.write_buffer) == self.prefix:
self._flush('\n')
def prompt(self):
# Obtain cached password, if any
password = get_password(*normalize(env.host_string))
# Remove the prompt itself from the capture buffer. This is
# backwards compatible with Fabric 0.9.x behavior; the user
# will still see the prompt on their screen (no way to avoid
# this) but at least it won't clutter up the captured text.
# NOTE: Yes, the original RingBuffer from Fabric can do this more elegantly.
# This removes the last N elements from the list.
_pop_count = min(len(self.capture), len(env.sudo_prompt))
for i in range(0, _pop_count):
self.capture.pop()
# If the password we just tried was bad, prompt the user again.
if (not password) or self.reprompt:
# Print the prompt and/or the "try again" notice if
# output is being hidden. In other words, since we need
# the user's input, they need to see why we're
# prompting them.
if not self.printing:
self._flush(self.prefix)
if self.reprompt:
self._flush(env.again_prompt + '\n' + self.prefix)
self._flush(env.sudo_prompt)
# Prompt for, and store, password. Give empty prompt so the
# initial display "hides" just after the actually-displayed
# prompt from the remote end.
self.chan.input_enabled = False
password = fabric.network.prompt_for_password(
prompt=" ", no_colon=True, stream=self.stream
)
self.chan.input_enabled = True
# Update env.password, env.passwords if necessary
user, host, port = normalize(env.host_string)
# TODO: in 2.x, make sure to only update sudo-specific password
# config values, not login ones.
set_password(user, host, port, password)
# Reset reprompt flag
self.reprompt = False
# Send current password down the pipe
self.chan.sendall(password + '\n')
def try_again(self):
# Remove text from capture buffer
self.capture = list(self.capture)[:len(env.again_prompt)]
# Set state so we re-prompt the user at the next prompt.
self.reprompt = True
def _get_prompt_response(self):
"""
Iterate through the request prompts dict and return the response and
original request if we find a match
"""
for tup in six.iteritems(env.prompts):
if _endswith(self.capture, tup[0]):
return tup
return None, None
def input_loop(chan, using_pty):
while not chan.exit_status_ready():
if win32:
have_char = msvcrt.kbhit()
else:
r, w, x = select([sys.stdin], [], [], 0.0)
have_char = (r and r[0] == sys.stdin)
if have_char and chan.input_enabled:
# Send all local stdin to remote end's stdin
byte = msvcrt.getch() if win32 else sys.stdin.read(1)
chan.sendall(byte)
# Optionally echo locally, if needed.
if not using_pty and env.echo_stdin:
# Not using fastprint() here -- it prints as 'user'
# output level, don't want it to be accidentally hidden
sys.stdout.write(byte)
sys.stdout.flush()
time.sleep(ssh.io_sleep)
|
rodrigc/fabric
|
fabric/io.py
|
Python
|
bsd-2-clause
| 10,647
|
from __future__ import print_function
import sys, time, random, os, json
import six
from six.moves.urllib.parse import urlencode
from subprocess import Popen, PIPE
from twisted.web.server import Site, NOT_DONE_YET
from twisted.web.resource import Resource
from twisted.internet import reactor, defer, ssl
from scrapy import twisted_version
if twisted_version < (11, 0, 0):
def deferLater(clock, delay, func, *args, **kw):
def _cancel_method():
_cancel_cb(None)
d.errback(Exception())
def _cancel_cb(result):
if cl.active():
cl.cancel()
return result
d = defer.Deferred()
d.cancel = _cancel_method
d.addCallback(lambda ignored: func(*args, **kw))
d.addBoth(_cancel_cb)
cl = clock.callLater(delay, d.callback, None)
return d
else:
from twisted.internet.task import deferLater
def getarg(request, name, default=None, type=str):
if name in request.args:
return type(request.args[name][0])
else:
return default
class LeafResource(Resource):
isLeaf = True
def deferRequest(self, request, delay, f, *a, **kw):
def _cancelrequest(_):
# silence CancelledError
d.addErrback(lambda _: None)
d.cancel()
d = deferLater(reactor, delay, f, *a, **kw)
request.notifyFinish().addErrback(_cancelrequest)
return d
class Follow(LeafResource):
def render(self, request):
total = getarg(request, "total", 100, type=int)
show = getarg(request, "show", 1, type=int)
order = getarg(request, "order", "desc")
maxlatency = getarg(request, "maxlatency", 0, type=float)
n = getarg(request, "n", total, type=int)
if order == "rand":
nlist = [random.randint(1, total) for _ in range(show)]
else: # order == "desc"
nlist = range(n, max(n - show, 0), -1)
lag = random.random() * maxlatency
self.deferRequest(request, lag, self.renderRequest, request, nlist)
return NOT_DONE_YET
def renderRequest(self, request, nlist):
s = """<html> <head></head> <body>"""
args = request.args.copy()
for nl in nlist:
args["n"] = [str(nl)]
argstr = urlencode(args, doseq=True)
s += "<a href='/follow?%s'>follow %d</a><br>" % (argstr, nl)
s += """</body>"""
request.write(s)
request.finish()
class Delay(LeafResource):
def render_GET(self, request):
n = getarg(request, "n", 1, type=float)
b = getarg(request, "b", 1, type=int)
if b:
# send headers now and delay body
request.write('')
self.deferRequest(request, n, self._delayedRender, request, n)
return NOT_DONE_YET
def _delayedRender(self, request, n):
request.write("Response delayed for %0.3f seconds\n" % n)
request.finish()
class Status(LeafResource):
def render_GET(self, request):
n = getarg(request, "n", 200, type=int)
request.setResponseCode(n)
return ""
class Raw(LeafResource):
def render_GET(self, request):
request.startedWriting = 1
self.deferRequest(request, 0, self._delayedRender, request)
return NOT_DONE_YET
render_POST = render_GET
def _delayedRender(self, request):
raw = getarg(request, 'raw', 'HTTP 1.1 200 OK\n')
request.startedWriting = 1
request.write(raw)
request.channel.transport.loseConnection()
request.finish()
class Echo(LeafResource):
def render_GET(self, request):
output = {
'headers': dict(request.requestHeaders.getAllRawHeaders()),
'body': request.content.read(),
}
return json.dumps(output)
class Partial(LeafResource):
def render_GET(self, request):
request.setHeader("Content-Length", "1024")
self.deferRequest(request, 0, self._delayedRender, request)
return NOT_DONE_YET
def _delayedRender(self, request):
request.write("partial content\n")
request.finish()
class Drop(Partial):
def _delayedRender(self, request):
abort = getarg(request, "abort", 0, type=int)
request.write("this connection will be dropped\n")
tr = request.channel.transport
try:
if abort and hasattr(tr, 'abortConnection'):
tr.abortConnection()
else:
tr.loseConnection()
finally:
request.finish()
class Root(Resource):
def __init__(self):
Resource.__init__(self)
self.putChild("status", Status())
self.putChild("follow", Follow())
self.putChild("delay", Delay())
self.putChild("partial", Partial())
self.putChild("drop", Drop())
self.putChild("raw", Raw())
self.putChild("echo", Echo())
if six.PY2 and twisted_version > (12, 3, 0):
from twisted.web.test.test_webclient import PayloadResource
from twisted.web.server import GzipEncoderFactory
from twisted.web.resource import EncodingResourceWrapper
self.putChild('payload', PayloadResource())
self.putChild("xpayload", EncodingResourceWrapper(PayloadResource(), [GzipEncoderFactory()]))
def getChild(self, name, request):
return self
def render(self, request):
return 'Scrapy mock HTTP server\n'
class MockServer():
def __enter__(self):
from scrapy.utils.test import get_testenv
self.proc = Popen([sys.executable, '-u', '-m', 'tests.mockserver'],
stdout=PIPE, env=get_testenv())
self.proc.stdout.readline()
def __exit__(self, exc_type, exc_value, traceback):
self.proc.kill()
self.proc.wait()
time.sleep(0.2)
if __name__ == "__main__":
root = Root()
factory = Site(root)
httpPort = reactor.listenTCP(8998, factory)
contextFactory = ssl.DefaultOpenSSLContextFactory(
os.path.join(os.path.dirname(__file__), 'keys/cert.pem'),
os.path.join(os.path.dirname(__file__), 'keys/cert.pem'),
)
httpsPort = reactor.listenSSL(8999, factory, contextFactory)
def print_listening():
httpHost = httpPort.getHost()
httpsHost = httpsPort.getHost()
print("Mock server running at http://%s:%d and https://%s:%d" % (
httpHost.host, httpHost.port, httpsHost.host, httpsHost.port))
reactor.callWhenRunning(print_listening)
reactor.run()
|
CENDARI/scrapy
|
tests/mockserver.py
|
Python
|
bsd-3-clause
| 6,602
|
# Copyright (c) 2014, Salesforce.com, Inc. All rights reserved.
# Copyright (c) 2015, Google, Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name of Salesforce.com nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import re
import sys
if len(sys.argv) >= 2 and sys.argv[1] == 'bdist_wheel':
# bdist_wheel needs setuptools
import setuptools
assert setuptools # satisfy pyflakes
from distutils.core import setup, Extension
from Cython.Build import cythonize
from parsable import parsable
library_dirs = []
libraries = ['protobuf', 'distributions_shared']
include_dirs = ['include']
ve = os.environ.get('VIRTUAL_ENV')
if ve:
include_dirs.append(os.path.join(ve, 'include'))
library_dirs.append(os.path.join(ve, 'lib'))
extra_compile_args = [
'-DDIST_DEBUG_LEVEL=3',
'-DDIST_THROW_ON_ERROR=1',
'-DLOOM_DEBUG_LEVEL=3',
'-std=c++0x',
'-Wall',
'-Werror',
'-Wno-unused-function',
'-Wno-unused-variable',
'-Wno-sign-compare',
'-Wno-strict-aliasing',
'-O3',
]
def make_extension(name, sources=[]):
module = 'loom.' + name
sources.append('{}.{}'.format(module.replace('.', '/'), 'pyx'))
return Extension(
module,
sources=sources,
language='c++',
include_dirs=include_dirs,
library_dirs=library_dirs,
libraries=libraries,
extra_compile_args=extra_compile_args,
)
ext_modules = [
make_extension('cFormat', sources=['src/schema.pb.cc']),
]
version = None
with open(os.path.join('loom', '__init__.py')) as f:
for line in f:
if re.match("__version__ = '\S+'$", line):
version = line.split()[-1].strip("'")
assert version, 'could not determine version'
with open('README.md') as f:
long_description = f.read()
config = {
'version': version,
'name': 'loom',
'description': 'Streaming cross-cat inference engine',
'long_description': long_description,
'url': 'https://github.com/priorknowledge/loom',
'author': 'Fritz Obermeyer, Jonathan Glidden',
'maintainer': 'Fritz Obermeyer',
'maintainer_email': 'fritz.obermeyer@gmail.com',
'license': 'Revised BSD',
'packages': [
'loom',
'loom.test',
],
'ext_modules': cythonize(ext_modules),
'entry_points': parsable.find_entry_points('loom'),
}
setup(**config)
|
fritzo/loom
|
setup.py
|
Python
|
bsd-3-clause
| 3,709
|
#!/usr/bin/env python
"""
This module checks whether the unittests for every nodes exist.
If [-l] option is specified, the result will be printed into log file
'check_unittests_logger_YYYY-MM-DD_HH-MM-SS.log' in the current directory.
**Options**
:-l: -- prints the result into log file
:-log: -- prints the result into log file
:Author: Titiruck Nuntapramote (titiruck.nuntapramote@dfki.de)
:Created: 2012/01/03
"""
import os
import inspect
import sys
import logging
import datetime
if __name__ == '__main__':
file_path = os.path.dirname(os.path.abspath(__file__))
pyspace_path = file_path[:file_path.rfind('pySPACE') - 1]
if not (pyspace_path in sys.path):
sys.path.append(pyspace_path)
import pySPACE.missions.nodes as nodes
import pySPACE.resources.data_types as data_types
def existing_test(lst, dir, files):
""" Function to get a list of existing test
**Arguments**
:lst: -- The test suite, to which the tests are added.
:dir: -- The directory which contains the files
:files: -- List of files
"""
for file in files:
# figure out module name
moduleName = dir.strip("\.\/").replace("/", ".")
if len(moduleName):
moduleName += "."
if os.path.splitext(file)[1] == '.py':
moduleName += os.path.splitext(file)[0]
print moduleName
__import__(moduleName)
module = sys.modules[moduleName]
for name, obj in inspect.getmembers(module, inspect.isclass):
if name.endswith('TestCase'):
name = name[:-8]
# print name + ": " + moduleName
lst[name] = moduleName
def get_class(module, lst):
"""
Function to get a list of class.
For now, this is only used to get classes in datatype.
**Arguments**
:module: -- Module to get classes from
:lst: -- list of classes
"""
for name, obj in inspect.getmembers(module, inspect.ismodule):
__import__(obj.__name__)
new_module = sys.modules[obj.__name__]
for name2, obj2 in inspect.getmembers(new_module, inspect.isclass):
# print name2 + ":" + str(obj2)
lst[name2] = obj2
if __name__ == '__main__':
log = False
if len(sys.argv) == 2:
arg = sys.argv[1]
if arg == '-l' or arg == '-log':
log = True
else:
print "usage: run_tests.py [-l|-log](enable logging)"
exit()
elif len(sys.argv) > 2:
print "usage: run_tests.py [-l|-log](enable logging)"
exit()
if log is True:
dateTag = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
LOG_FILENAME = "check_unittests_logger_%s.log" % dateTag
FORMATTER = '%(message)s'
logging.basicConfig(
filename=LOG_FILENAME, level=logging.INFO, format=FORMATTER)
# list for nodes
nodelist = nodes.DEFAULT_NODE_MAPPING
# collect classes from pySPACE.resources.data_types
typelist = {}
get_class(data_types, typelist)
testnode = {}
testtype = {}
# list of node class with no unittests
NO_testnode = {}
NO_testtype = {}
# the list HAVE_test* is not the same as test*
# because the matching of node names and node class is not 1 to 1
# some node class e.g. FIRFilterNode exists as BandPassFilter,
# LowPassFilter, and more
HAVE_testnode = {}
HAVE_testtype = {}
# collect test cases from nodes
os.path.walk(os.curdir + '/unittests/nodes', existing_test, testnode)
# collect test cases from data_types
os.path.walk(os.curdir + '/unittests/data_types', existing_test, testtype)
# compare tests with nodes and then print
print "Nodes without test ------------------------------------------"
for key, value in sorted(nodelist.iteritems(), key=lambda t: str(t[1])):
# -6 comes from deleting "Node'>"
if (str(value).split('.')[-1])[:-6] not in testnode:
NO_testnode[key] = value
# print "\t" + key + ": "
print "\t\t" + str(value).split('\'')[1]
else:
HAVE_testnode[key] = value
print "Existing nodes: ", len(nodelist)
print "Nodes with test: ", len(HAVE_testnode), \
"(actual existing unittests:", len(testnode), ")"
print "Nodes without test: ", len(NO_testnode)
# compare tests with data_types and then print
print "Types without test ------------------------------------------"
for key, value in typelist.iteritems():
if key not in testtype:
NO_testtype[key] = value
print "\t" + key + ": "
print "\t " + str(value).split('\'')[1]
else:
HAVE_testtype[key] = value
print "Existing types: ", len(typelist)
print "Types with test: ", len(HAVE_testtype)
print "Types without test: ", len(NO_testtype)
print "-------------------------------------------------------------"
print "Total existing tests: ", len(testnode) + len(testtype)
# if logging is enabled, print to log
if log is True:
logging.info("Nodes without test -----------------------------------")
for key, value in NO_testnode.iteritems():
logging.info("\t" + key + ": " + str(value)[22:-2])
logging.info("\tExisting nodes: " + str(len(nodelist)))
logging.info("\tNodes with test: " + str(len(HAVE_testnode)) +
"(actual existing tests: " + str(len(testnode)) + ")")
logging.info("\tNodes without test: " + str(len(NO_testnode)))
logging.info("Types without test -----------------------------------")
for key, value in NO_testtype.iteritems():
logging.info("\t" + key + ": " + str(value)[22:-2])
logging.info("\tExisting types: " + str(len(typelist)))
logging.info("\tTypes with test: " + str(len(HAVE_testtype)))
logging.info("\tTypes without test: " + str(len(NO_testtype)))
|
pyspace/pyspace
|
pySPACE/tests/check_unittests.py
|
Python
|
bsd-3-clause
| 6,000
|
from satchmo.configuration import *
from django.utils.translation import ugettext_lazy as _
# this is so that the translation utility will pick up the string
gettext = lambda s: s
_strings = (gettext('CreditCard'), gettext('Credit Card'))
PAYMENT_MODULES = config_get('PAYMENT', 'MODULES')
PAYMENT_MODULES.add_choice(('PAYMENT_AUTHORIZENET', 'Authorize.net'))
PAYMENT_GROUP = ConfigurationGroup('PAYMENT_AUTHORIZENET',
_('Authorize.net Payment Settings'),
requires=PAYMENT_MODULES,
ordering=101)
config_register_list(
StringValue(PAYMENT_GROUP,
'CONNECTION',
description=_("Submit to URL"),
help_text=_("""This is the address to submit live transactions."""),
default='https://secure.authorize.net/gateway/transact.dll'),
StringValue(PAYMENT_GROUP,
'CONNECTION_TEST',
description=_("Submit to Test URL"),
help_text=("""A Quick note on the urls.<br/>
If you are posting to https://test.authorize.net/gateway/transact.dll,
and you are not using an account whose API login ID starts with
"cpdev" or "cnpdev", you will get an Error 13 message.
Make sure you are posting to https://certification.authorize.net/gateway/transact.dll
for test transactions if you do not have a cpdev or cnpdev.
"""),
default='https://test.authorize.net/gateway/transact.dll'),
BooleanValue(PAYMENT_GROUP,
'SSL',
description=_("Use SSL for the checkout pages?"),
default=False),
BooleanValue(PAYMENT_GROUP,
'LIVE',
description=_("Accept real payments"),
help_text=_("False if you want to submit to the test urls."),
default=False),
BooleanValue(PAYMENT_GROUP,
'SIMULATE',
description=_("Force a test post?"),
help_text=_("True if you want to submit to the live url using a test flag, which won't be accepted."),
default=False),
ModuleValue(PAYMENT_GROUP,
'MODULE',
description=_('Implementation module'),
hidden=True,
default = 'satchmo.payment.modules.authorizenet'),
StringValue(PAYMENT_GROUP,
'KEY',
description=_("Module key"),
hidden=True,
default = 'AUTHORIZENET'),
StringValue(PAYMENT_GROUP,
'LABEL',
description=_('English name for this group on the checkout screens'),
default = 'Credit Cards',
help_text = _('This will be passed to the translation utility')),
StringValue(PAYMENT_GROUP,
'URL_BASE',
description=_('The url base used for constructing urlpatterns which will use this module'),
default = r'^credit/'),
MultipleStringValue(PAYMENT_GROUP,
'CREDITCHOICES',
description=_('Available credit cards'),
choices = (
(('American Express', 'American Express')),
(('Visa','Visa')),
(('Mastercard','Mastercard')),
(('Discover','Discover'))),
default = ('Visa', 'Mastercard', 'Discover')),
StringValue(PAYMENT_GROUP,
'LOGIN',
description=_('Your authorize.net transaction login'),
default=""),
StringValue(PAYMENT_GROUP,
'TRANKEY',
description=_('Your authorize.net transaction key'),
default=""),
BooleanValue(PAYMENT_GROUP,
'CAPTURE',
description=_('Capture Payment?'),
default=True,
help_text=_('IMPORTANT: If false, you will need to manually go to your authorize.net merchant account and capture payments. Setting this to false means you are authorizing the card only, not capturing payment.')),
BooleanValue(PAYMENT_GROUP,
'EXTRA_LOGGING',
description=_("Verbose logs"),
help_text=_("Add extensive logs during post."),
default=False)
)
ARB_ENABLED = config_register(
BooleanValue(PAYMENT_GROUP,
'ARB',
description=_('Enable ARB?'),
default=False,
help_text=_('Enable ARB processing for setting up subscriptions. You must have this enabled in your Authorize account for it to work.')))
config_register(
StringValue(PAYMENT_GROUP,
'ARB_CONNECTION',
description=_("Submit to URL (ARB)"),
help_text=_("""This is the address to submit live transactions for ARB."""),
requires=ARB_ENABLED,
default='https://api.authorize.net/xml/v1/request.api'))
config_register(
StringValue(PAYMENT_GROUP,
'ARB_CONNECTION_TEST',
description=_("Submit to Test URL (ARB)"),
help_text=_("""This is the address to submit test transactions for ARB."""),
requires=ARB_ENABLED,
default='https://apitest.authorize.net/xml/v1/request.api'))
|
roadhead/satchmo
|
satchmo/payment/modules/authorizenet/config.py
|
Python
|
bsd-3-clause
| 4,767
|
# -*- coding: utf-8 -*-
"""
Control your screen(s) layout easily.
This modules allows you to handle your screens outputs directly from your bar!
- Detect and propose every possible screen combinations
- Switch between combinations using click events and mouse scroll
- Activate the screen or screen combination on a single click
- It will detect any newly connected or removed screen automatically
For convenience, this module also proposes some added features:
- Dynamic parameters for POSITION and WORKSPACES assignment (see below)
- Automatic fallback to a given screen or screen combination when no more
screen is available (handy for laptops)
- Automatically apply this screen combination on start: no need for xorg!
- Automatically move workspaces to screens when they are available
Configuration parameters:
- cache_timeout: how often to (re)detect the outputs
- fallback: when the current output layout is not available anymore,
fallback to this layout if available. This is very handy if you
have a laptop and switched to an external screen for presentation
and want to automatically fallback to your laptop screen when you
disconnect the external screen.
- force_on_start: switch to the given combination mode if available
when the module starts (saves you from having to configure xorg)
- format_clone: string used to display a 'clone' combination
- format_extend: string used to display a 'extend' combination
Dynamic configuration parameters:
- <OUTPUT>_pos: apply the given position to the OUTPUT
Example: DP1_pos = "-2560x0"
Example: DP1_pos = "above eDP1"
Example: DP1_pos = "below eDP1"
Example: DP1_pos = "left-of LVDS1"
Example: DP1_pos = "right-of eDP1"
- <OUTPUT>_workspaces: comma separated list of workspaces to move to
the given OUTPUT when it is activated
Example: DP1_workspaces = "1,2,3"
Example config:
xrandr {
force_on_start = "eDP1+DP1"
DP1_pos = "left-of eDP1"
VGA_workspaces = "7"
}
@author ultrabug
"""
import shlex
from collections import deque
from collections import OrderedDict
from itertools import combinations
from subprocess import call, Popen, PIPE
from syslog import syslog, LOG_INFO
from time import sleep, time
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 10
fallback = True
fixed_width = True
force_on_start = None
format_clone = '='
format_extend = '+'
def __init__(self):
"""
"""
self.active_comb = None
self.active_layout = None
self.active_mode = 'extend'
self.displayed = None
self.max_width = 0
def _get_layout(self):
"""
Get the outputs layout from xrandr and try to detect the
currently active layout as best as we can on start.
"""
connected = list()
active_layout = list()
disconnected = list()
layout = OrderedDict({
'connected': OrderedDict(),
'disconnected': OrderedDict()
})
current = Popen(['xrandr'], stdout=PIPE)
for line in current.stdout.readlines():
try:
# python3
line = line.decode()
except:
pass
try:
s = line.split(' ')
if s[1] == 'connected':
output, state = s[0], s[1]
if s[2][0] == '(':
mode, infos = None, ' '.join(s[2:]).strip('\n')
else:
mode, infos = s[2], ' '.join(s[3:]).strip('\n')
active_layout.append(output)
connected.append(output)
elif s[1] == 'disconnected':
output, state = s[0], s[1]
mode, infos = None, ' '.join(s[2:]).strip('\n')
disconnected.append(output)
else:
continue
except Exception as err:
syslog(LOG_INFO, 'xrandr error="{}"'.format(err))
else:
layout[state][output] = {
'infos': infos,
'mode': mode,
'state': state
}
# initialize the active layout
if self.active_layout is None:
self.active_comb = tuple(active_layout)
self.active_layout = self._get_string_and_set_width(
tuple(active_layout), self.active_mode)
return layout
def _set_available_combinations(self):
"""
Generate all connected outputs combinations and
set the max display width while iterating.
"""
available_combinations = set()
combinations_map = {}
self.max_width = 0
for output in range(len(self.layout['connected']) + 1):
for comb in combinations(self.layout['connected'], output):
if comb:
for mode in ['clone', 'extend']:
string = self._get_string_and_set_width(comb, mode)
if len(comb) == 1:
combinations_map[string] = (comb, None)
else:
combinations_map[string] = (comb, mode)
available_combinations.add(string)
self.available_combinations = deque(available_combinations)
self.combinations_map = combinations_map
def _get_string_and_set_width(self, combination, mode):
"""
Construct the string to be displayed and record the max width.
"""
show = '{}'.format(self._separator(mode)).join(combination)
show = show.rstrip('{}'.format(self._separator(mode)))
self.max_width = max([self.max_width, len(show)])
return show
def _choose_what_to_display(self, force_refresh=False):
"""
Choose what combination to display on the bar.
By default we try to display the active layout on the first run, else
we display the last selected combination.
"""
for _ in range(len(self.available_combinations)):
if (self.displayed is None and
self.available_combinations[0] == self.active_layout):
self.displayed = self.available_combinations[0]
break
else:
if self.displayed == self.available_combinations[0]:
break
else:
self.available_combinations.rotate(1)
else:
if force_refresh:
self.displayed = self.available_combinations[0]
else:
syslog(LOG_INFO,
'xrandr error="displayed combination is not available"')
def _center(self, s):
"""
Center the given string on the detected max width.
"""
fmt = '{:^%d}' % self.max_width
return fmt.format(s)
def _apply(self, force=False):
"""
Call xrandr and apply the selected (displayed) combination mode.
"""
if self.displayed == self.active_layout and not force:
# no change, do nothing
return
combination, mode = self.combinations_map.get(self.displayed,
(None, None))
if combination is None and mode is None:
# displayed combination cannot be activated, ignore
return
cmd = 'xrandr'
outputs = list(self.layout['connected'].keys())
outputs += list(self.layout['disconnected'].keys())
previous_output = None
for output in outputs:
cmd += ' --output {}'.format(output)
#
if output in combination:
pos = getattr(self, '{}_pos'.format(output), '0x0')
#
if mode == 'clone' and previous_output is not None:
cmd += ' --auto --same-as {}'.format(previous_output)
else:
if ('above' in pos or 'below' in pos or 'left-of' in pos or
'right-of' in pos):
cmd += ' --auto --{} --rotate normal'.format(pos)
else:
cmd += ' --auto --pos {} --rotate normal'.format(pos)
previous_output = output
else:
cmd += ' --off'
#
code = call(shlex.split(cmd))
if code == 0:
self.active_comb = combination
self.active_layout = self.displayed
self.active_mode = mode
syslog(LOG_INFO, 'command "{}" exit code {}'.format(cmd, code))
# move workspaces to outputs as configured
self._apply_workspaces(combination, mode)
def _apply_workspaces(self, combination, mode):
"""
Allows user to force move a comma separated list of workspaces to the
given output when it's activated.
Example:
- DP1_workspaces = "1,2,3"
"""
if len(combination) > 1 and mode == 'extend':
sleep(3)
for output in combination:
workspaces = getattr(self, '{}_workspaces'.format(output),
'').split(',')
for workspace in workspaces:
if not workspace:
continue
# switch to workspace
cmd = 'i3-msg workspace "{}"'.format(workspace)
call(shlex.split(cmd), stdout=PIPE, stderr=PIPE)
# move it to output
cmd = 'i3-msg move workspace to output "{}"'.format(output)
call(shlex.split(cmd), stdout=PIPE, stderr=PIPE)
# log this
syslog(LOG_INFO,
'moved workspace {} to output {}'.format(workspace,
output))
def _refresh_py3status(self):
"""
Send a SIGUSR1 signal to py3status to force a bar refresh.
"""
call(shlex.split('killall -s USR1 py3status'))
def _fallback_to_available_output(self):
"""
Fallback to the first available output when the active layout
was composed of only one output.
This allows us to avoid cases where you get stuck with a black sreen
on your laptop by switching back to the integrated screen
automatically !
"""
if len(self.active_comb) == 1:
self._choose_what_to_display(force_refresh=True)
self._apply()
self._refresh_py3status()
def _force_force_on_start(self):
"""
Force the user configured mode on start.
"""
if self.force_on_start in self.available_combinations:
self.displayed = self.force_on_start
self.force_on_start = None
self._choose_what_to_display(force_refresh=True)
self._apply(force=True)
self._refresh_py3status()
def _separator(self, mode):
"""
Return the separator for the given mode.
"""
if mode == 'extend':
return self.format_extend
if mode == 'clone':
return self.format_clone
def _switch_selection(self, direction):
self.available_combinations.rotate(direction)
self.displayed = self.available_combinations[0]
def on_click(self, i3s_output_list, i3s_config, event):
"""
Click events
- left click & scroll up/down: switch between modes
- right click: apply selected mode
- middle click: force refresh of available modes
"""
button = event['button']
if button == 4:
self._switch_selection(-1)
if button in [1, 5]:
self._switch_selection(1)
if button == 2:
self._choose_what_to_display(force_refresh=True)
if button == 3:
self._apply()
def xrandr(self, i3s_output_list, i3s_config):
"""
This is the main py3status method, it will orchestrate what's being
displayed on the bar.
"""
self.layout = self._get_layout()
self._set_available_combinations()
self._choose_what_to_display()
if self.fixed_width is True:
full_text = self._center(self.displayed)
else:
full_text = self.displayed
response = {
'cached_until': time() + self.cache_timeout,
'full_text': full_text
}
# coloration
if self.displayed == self.active_layout:
response['color'] = i3s_config['color_good']
elif self.displayed not in self.available_combinations:
response['color'] = i3s_config['color_bad']
# force default layout setup
if self.force_on_start is not None:
sleep(1)
self._force_force_on_start()
# fallback detection
if self.active_layout not in self.available_combinations:
response['color'] = i3s_config['color_degraded']
if self.fallback is True:
self._fallback_to_available_output()
return response
if __name__ == "__main__":
"""
Test this module by calling it directly.
"""
x = Py3status()
config = {
'color_bad': '#FF0000',
'color_degraded': '#FFFF00',
'color_good': '#00FF00'
}
while True:
print(x.xrandr([], config))
sleep(1)
|
Shir0kamii/py3status
|
py3status/modules/xrandr.py
|
Python
|
bsd-3-clause
| 13,758
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import importlib
import json
import re
import unittest
from datetime import datetime
from xml.dom import minidom
from django.core import management, serializers
from django.db import connection, transaction
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, mock, override_settings,
skipUnlessDBFeature,
)
from django.test.utils import Approximate
from django.utils import six
from django.utils.six import StringIO
from .models import (
Actor, Article, Author, AuthorProfile, Category, Movie, Player, Score,
Team,
)
try:
import yaml
HAS_YAML = True
except ImportError:
HAS_YAML = False
@override_settings(
SERIALIZATION_MODULES={
"json2": "django.core.serializers.json",
}
)
class SerializerRegistrationTests(SimpleTestCase):
def setUp(self):
self.old_serializers = serializers._serializers
serializers._serializers = {}
def tearDown(self):
serializers._serializers = self.old_serializers
def test_register(self):
"Registering a new serializer populates the full registry. Refs #14823"
serializers.register_serializer('json3', 'django.core.serializers.json')
public_formats = serializers.get_public_serializer_formats()
self.assertIn('json3', public_formats)
self.assertIn('json2', public_formats)
self.assertIn('xml', public_formats)
def test_unregister(self):
"Unregistering a serializer doesn't cause the registry to be repopulated. Refs #14823"
serializers.unregister_serializer('xml')
serializers.register_serializer('json3', 'django.core.serializers.json')
public_formats = serializers.get_public_serializer_formats()
self.assertNotIn('xml', public_formats)
self.assertIn('json3', public_formats)
def test_builtin_serializers(self):
"Requesting a list of serializer formats popuates the registry"
all_formats = set(serializers.get_serializer_formats())
public_formats = set(serializers.get_public_serializer_formats())
self.assertIn('xml', all_formats),
self.assertIn('xml', public_formats)
self.assertIn('json2', all_formats)
self.assertIn('json2', public_formats)
self.assertIn('python', all_formats)
self.assertNotIn('python', public_formats)
class SerializersTestBase(object):
@staticmethod
def _comparison_value(value):
return value
def setUp(self):
sports = Category.objects.create(name="Sports")
music = Category.objects.create(name="Music")
op_ed = Category.objects.create(name="Op-Ed")
self.joe = Author.objects.create(name="Joe")
self.jane = Author.objects.create(name="Jane")
self.a1 = Article(
author=self.jane,
headline="Poker has no place on ESPN",
pub_date=datetime(2006, 6, 16, 11, 00)
)
self.a1.save()
self.a1.categories = [sports, op_ed]
self.a2 = Article(
author=self.joe,
headline="Time to reform copyright",
pub_date=datetime(2006, 6, 16, 13, 00, 11, 345)
)
self.a2.save()
self.a2.categories = [music, op_ed]
def test_serialize(self):
"""Tests that basic serialization works."""
serial_str = serializers.serialize(self.serializer_name,
Article.objects.all())
self.assertTrue(self._validate_output(serial_str))
def test_serializer_roundtrip(self):
"""Tests that serialized content can be deserialized."""
serial_str = serializers.serialize(self.serializer_name,
Article.objects.all())
models = list(serializers.deserialize(self.serializer_name, serial_str))
self.assertEqual(len(models), 2)
def test_altering_serialized_output(self):
"""
Tests the ability to create new objects by
modifying serialized content.
"""
old_headline = "Poker has no place on ESPN"
new_headline = "Poker has no place on television"
serial_str = serializers.serialize(self.serializer_name,
Article.objects.all())
serial_str = serial_str.replace(old_headline, new_headline)
models = list(serializers.deserialize(self.serializer_name, serial_str))
# Prior to saving, old headline is in place
self.assertTrue(Article.objects.filter(headline=old_headline))
self.assertFalse(Article.objects.filter(headline=new_headline))
for model in models:
model.save()
# After saving, new headline is in place
self.assertTrue(Article.objects.filter(headline=new_headline))
self.assertFalse(Article.objects.filter(headline=old_headline))
def test_one_to_one_as_pk(self):
"""
Tests that if you use your own primary key field
(such as a OneToOneField), it doesn't appear in the
serialized field list - it replaces the pk identifier.
"""
profile = AuthorProfile(author=self.joe,
date_of_birth=datetime(1970, 1, 1))
profile.save()
serial_str = serializers.serialize(self.serializer_name,
AuthorProfile.objects.all())
self.assertFalse(self._get_field_values(serial_str, 'author'))
for obj in serializers.deserialize(self.serializer_name, serial_str):
self.assertEqual(obj.object.pk, self._comparison_value(self.joe.pk))
def test_serialize_field_subset(self):
"""Tests that output can be restricted to a subset of fields"""
valid_fields = ('headline', 'pub_date')
invalid_fields = ("author", "categories")
serial_str = serializers.serialize(self.serializer_name,
Article.objects.all(),
fields=valid_fields)
for field_name in invalid_fields:
self.assertFalse(self._get_field_values(serial_str, field_name))
for field_name in valid_fields:
self.assertTrue(self._get_field_values(serial_str, field_name))
def test_serialize_unicode(self):
"""Tests that unicode makes the roundtrip intact"""
actor_name = "Za\u017c\u00f3\u0142\u0107"
movie_title = 'G\u0119\u015bl\u0105 ja\u017a\u0144'
ac = Actor(name=actor_name)
mv = Movie(title=movie_title, actor=ac)
ac.save()
mv.save()
serial_str = serializers.serialize(self.serializer_name, [mv])
self.assertEqual(self._get_field_values(serial_str, "title")[0], movie_title)
self.assertEqual(self._get_field_values(serial_str, "actor")[0], actor_name)
obj_list = list(serializers.deserialize(self.serializer_name, serial_str))
mv_obj = obj_list[0].object
self.assertEqual(mv_obj.title, movie_title)
def test_serialize_superfluous_queries(self):
"""Ensure no superfluous queries are made when serializing ForeignKeys
#17602
"""
ac = Actor(name='Actor name')
ac.save()
mv = Movie(title='Movie title', actor_id=ac.pk)
mv.save()
with self.assertNumQueries(0):
serializers.serialize(self.serializer_name, [mv])
def test_serialize_with_null_pk(self):
"""
Tests that serialized data with no primary key results
in a model instance with no id
"""
category = Category(name="Reference")
serial_str = serializers.serialize(self.serializer_name, [category])
pk_value = self._get_pk_values(serial_str)[0]
self.assertFalse(pk_value)
cat_obj = list(serializers.deserialize(self.serializer_name,
serial_str))[0].object
self.assertEqual(cat_obj.id, None)
def test_float_serialization(self):
"""Tests that float values serialize and deserialize intact"""
sc = Score(score=3.4)
sc.save()
serial_str = serializers.serialize(self.serializer_name, [sc])
deserial_objs = list(serializers.deserialize(self.serializer_name,
serial_str))
self.assertEqual(deserial_objs[0].object.score, Approximate(3.4, places=1))
def test_deferred_field_serialization(self):
author = Author.objects.create(name='Victor Hugo')
author = Author.objects.defer('name').get(pk=author.pk)
serial_str = serializers.serialize(self.serializer_name, [author])
deserial_objs = list(serializers.deserialize(self.serializer_name, serial_str))
self.assertIsInstance(deserial_objs[0].object, Author)
def test_custom_field_serialization(self):
"""Tests that custom fields serialize and deserialize intact"""
team_str = "Spartak Moskva"
player = Player()
player.name = "Soslan Djanaev"
player.rank = 1
player.team = Team(team_str)
player.save()
serial_str = serializers.serialize(self.serializer_name,
Player.objects.all())
team = self._get_field_values(serial_str, "team")
self.assertTrue(team)
self.assertEqual(team[0], team_str)
deserial_objs = list(serializers.deserialize(self.serializer_name, serial_str))
self.assertEqual(deserial_objs[0].object.team.to_string(),
player.team.to_string())
def test_pre_1000ad_date(self):
"""Tests that year values before 1000AD are properly formatted"""
# Regression for #12524 -- dates before 1000AD get prefixed
# 0's on the year
a = Article.objects.create(
author=self.jane,
headline="Nobody remembers the early years",
pub_date=datetime(1, 2, 3, 4, 5, 6))
serial_str = serializers.serialize(self.serializer_name, [a])
date_values = self._get_field_values(serial_str, "pub_date")
self.assertEqual(date_values[0].replace('T', ' '), "0001-02-03 04:05:06")
def test_pkless_serialized_strings(self):
"""
Tests that serialized strings without PKs
can be turned into models
"""
deserial_objs = list(serializers.deserialize(self.serializer_name,
self.pkless_str))
for obj in deserial_objs:
self.assertFalse(obj.object.id)
obj.save()
self.assertEqual(Category.objects.all().count(), 5)
def test_deterministic_mapping_ordering(self):
"""Mapping such as fields should be deterministically ordered. (#24558)"""
output = serializers.serialize(self.serializer_name, [self.a1], indent=2)
categories = self.a1.categories.values_list('pk', flat=True)
self.assertEqual(output, self.mapping_ordering_str % {
'article_pk': self.a1.pk,
'author_pk': self.a1.author_id,
'first_category_pk': categories[0],
'second_category_pk': categories[1],
})
def test_deserialize_force_insert(self):
"""Tests that deserialized content can be saved with force_insert as a parameter."""
serial_str = serializers.serialize(self.serializer_name, [self.a1])
deserial_obj = list(serializers.deserialize(self.serializer_name, serial_str))[0]
with mock.patch('django.db.models.Model') as mock_model:
deserial_obj.save(force_insert=False)
mock_model.save_base.assert_called_with(deserial_obj.object, raw=True, using=None, force_insert=False)
class SerializersTransactionTestBase(object):
available_apps = ['serializers']
@skipUnlessDBFeature('supports_forward_references')
def test_forward_refs(self):
"""
Tests that objects ids can be referenced before they are
defined in the serialization data.
"""
# The deserialization process needs to run in a transaction in order
# to test forward reference handling.
with transaction.atomic():
objs = serializers.deserialize(self.serializer_name, self.fwd_ref_str)
with connection.constraint_checks_disabled():
for obj in objs:
obj.save()
for model_cls in (Category, Author, Article):
self.assertEqual(model_cls.objects.all().count(), 1)
art_obj = Article.objects.all()[0]
self.assertEqual(art_obj.categories.all().count(), 1)
self.assertEqual(art_obj.author.name, "Agnes")
class XmlSerializerTestCase(SerializersTestBase, TestCase):
serializer_name = "xml"
pkless_str = """<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object model="serializers.category">
<field type="CharField" name="name">Reference</field>
</object>
<object model="serializers.category">
<field type="CharField" name="name">Non-fiction</field>
</object>
</django-objects>"""
mapping_ordering_str = """<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object model="serializers.article" pk="%(article_pk)s">
<field name="author" rel="ManyToOneRel" to="serializers.author">%(author_pk)s</field>
<field name="headline" type="CharField">Poker has no place on ESPN</field>
<field name="pub_date" type="DateTimeField">2006-06-16T11:00:00</field>
<field name="categories" rel="ManyToManyRel" to="serializers.category"><object pk="%(first_category_pk)s"></object><object pk="%(second_category_pk)s"></object></field>
<field name="meta_data" rel="ManyToManyRel" to="serializers.categorymetadata"></field>
</object>
</django-objects>"""
@staticmethod
def _comparison_value(value):
# The XML serializer handles everything as strings, so comparisons
# need to be performed on the stringified value
return six.text_type(value)
@staticmethod
def _validate_output(serial_str):
try:
minidom.parseString(serial_str)
except Exception:
return False
else:
return True
@staticmethod
def _get_pk_values(serial_str):
ret_list = []
dom = minidom.parseString(serial_str)
fields = dom.getElementsByTagName("object")
for field in fields:
ret_list.append(field.getAttribute("pk"))
return ret_list
@staticmethod
def _get_field_values(serial_str, field_name):
ret_list = []
dom = minidom.parseString(serial_str)
fields = dom.getElementsByTagName("field")
for field in fields:
if field.getAttribute("name") == field_name:
temp = []
for child in field.childNodes:
temp.append(child.nodeValue)
ret_list.append("".join(temp))
return ret_list
def test_control_char_failure(self):
"""
Serializing control characters with XML should fail as those characters
are not supported in the XML 1.0 standard (except HT, LF, CR).
"""
self.a1.headline = "This contains \u0001 control \u0011 chars"
msg = "Article.headline (pk:%s) contains unserializable characters" % self.a1.pk
with self.assertRaisesMessage(ValueError, msg):
serializers.serialize(self.serializer_name, [self.a1])
self.a1.headline = "HT \u0009, LF \u000A, and CR \u000D are allowed"
self.assertIn(
"HT \t, LF \n, and CR \r are allowed",
serializers.serialize(self.serializer_name, [self.a1])
)
class XmlSerializerTransactionTestCase(SerializersTransactionTestBase, TransactionTestCase):
serializer_name = "xml"
fwd_ref_str = """<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object pk="1" model="serializers.article">
<field to="serializers.author" name="author" rel="ManyToOneRel">1</field>
<field type="CharField" name="headline">Forward references pose no problem</field>
<field type="DateTimeField" name="pub_date">2006-06-16T15:00:00</field>
<field to="serializers.category" name="categories" rel="ManyToManyRel">
<object pk="1"></object>
</field>
<field to="serializers.categorymetadata" name="meta_data" rel="ManyToManyRel"></field>
</object>
<object pk="1" model="serializers.author">
<field type="CharField" name="name">Agnes</field>
</object>
<object pk="1" model="serializers.category">
<field type="CharField" name="name">Reference</field></object>
</django-objects>"""
class JsonSerializerTestCase(SerializersTestBase, TestCase):
serializer_name = "json"
pkless_str = """[
{
"pk": null,
"model": "serializers.category",
"fields": {"name": "Reference"}
}, {
"model": "serializers.category",
"fields": {"name": "Non-fiction"}
}]"""
mapping_ordering_str = """[
{
"model": "serializers.article",
"pk": %(article_pk)s,
"fields": {
"author": %(author_pk)s,
"headline": "Poker has no place on ESPN",
"pub_date": "2006-06-16T11:00:00",
"categories": [
%(first_category_pk)s,
%(second_category_pk)s
],
"meta_data": []
}
}
]
"""
@staticmethod
def _validate_output(serial_str):
try:
json.loads(serial_str)
except Exception:
return False
else:
return True
@staticmethod
def _get_pk_values(serial_str):
ret_list = []
serial_list = json.loads(serial_str)
for obj_dict in serial_list:
ret_list.append(obj_dict["pk"])
return ret_list
@staticmethod
def _get_field_values(serial_str, field_name):
ret_list = []
serial_list = json.loads(serial_str)
for obj_dict in serial_list:
if field_name in obj_dict["fields"]:
ret_list.append(obj_dict["fields"][field_name])
return ret_list
def test_indentation_whitespace(self):
Score.objects.create(score=5.0)
Score.objects.create(score=6.0)
qset = Score.objects.all()
s = serializers.json.Serializer()
json_data = s.serialize(qset, indent=2)
for line in json_data.splitlines():
if re.search(r'.+,\s*$', line):
self.assertEqual(line, line.rstrip())
def test_helpful_error_message_invalid_pk(self):
"""
If there is an invalid primary key, the error message should contain
the model associated with it.
"""
test_string = """[{
"pk": "badpk",
"model": "serializers.player",
"fields": {
"name": "Bob",
"rank": 1,
"team": "Team"
}
}]"""
with self.assertRaisesMessage(serializers.base.DeserializationError, "(serializers.player:pk=badpk)"):
list(serializers.deserialize('json', test_string))
def test_helpful_error_message_invalid_field(self):
"""
If there is an invalid field value, the error message should contain
the model associated with it.
"""
test_string = """[{
"pk": "1",
"model": "serializers.player",
"fields": {
"name": "Bob",
"rank": "invalidint",
"team": "Team"
}
}]"""
expected = "(serializers.player:pk=1) field_value was 'invalidint'"
with self.assertRaisesMessage(serializers.base.DeserializationError, expected):
list(serializers.deserialize('json', test_string))
def test_helpful_error_message_for_foreign_keys(self):
"""
Invalid foreign keys with a natural key should throw a helpful error
message, such as what the failing key is.
"""
test_string = """[{
"pk": 1,
"model": "serializers.category",
"fields": {
"name": "Unknown foreign key",
"meta_data": [
"doesnotexist",
"metadata"
]
}
}]"""
key = ["doesnotexist", "metadata"]
expected = "(serializers.category:pk=1) field_value was '%r'" % key
with self.assertRaisesMessage(serializers.base.DeserializationError, expected):
list(serializers.deserialize('json', test_string))
def test_helpful_error_message_for_many2many_non_natural(self):
"""
Invalid many-to-many keys should throw a helpful error message.
"""
test_string = """[{
"pk": 1,
"model": "serializers.article",
"fields": {
"author": 1,
"headline": "Unknown many to many",
"pub_date": "2014-09-15T10:35:00",
"categories": [1, "doesnotexist"]
}
}, {
"pk": 1,
"model": "serializers.author",
"fields": {
"name": "Agnes"
}
}, {
"pk": 1,
"model": "serializers.category",
"fields": {
"name": "Reference"
}
}]"""
expected = "(serializers.article:pk=1) field_value was 'doesnotexist'"
with self.assertRaisesMessage(serializers.base.DeserializationError, expected):
list(serializers.deserialize('json', test_string))
def test_helpful_error_message_for_many2many_natural1(self):
"""
Invalid many-to-many keys should throw a helpful error message.
This tests the code path where one of a list of natural keys is invalid.
"""
test_string = """[{
"pk": 1,
"model": "serializers.categorymetadata",
"fields": {
"kind": "author",
"name": "meta1",
"value": "Agnes"
}
}, {
"pk": 1,
"model": "serializers.article",
"fields": {
"author": 1,
"headline": "Unknown many to many",
"pub_date": "2014-09-15T10:35:00",
"meta_data": [
["author", "meta1"],
["doesnotexist", "meta1"],
["author", "meta1"]
]
}
}, {
"pk": 1,
"model": "serializers.author",
"fields": {
"name": "Agnes"
}
}]"""
key = ["doesnotexist", "meta1"]
expected = "(serializers.article:pk=1) field_value was '%r'" % key
with self.assertRaisesMessage(serializers.base.DeserializationError, expected):
for obj in serializers.deserialize('json', test_string):
obj.save()
def test_helpful_error_message_for_many2many_natural2(self):
"""
Invalid many-to-many keys should throw a helpful error message. This
tests the code path where a natural many-to-many key has only a single
value.
"""
test_string = """[{
"pk": 1,
"model": "serializers.article",
"fields": {
"author": 1,
"headline": "Unknown many to many",
"pub_date": "2014-09-15T10:35:00",
"meta_data": [1, "doesnotexist"]
}
}, {
"pk": 1,
"model": "serializers.categorymetadata",
"fields": {
"kind": "author",
"name": "meta1",
"value": "Agnes"
}
}, {
"pk": 1,
"model": "serializers.author",
"fields": {
"name": "Agnes"
}
}]"""
expected = "(serializers.article:pk=1) field_value was 'doesnotexist'"
with self.assertRaisesMessage(serializers.base.DeserializationError, expected):
for obj in serializers.deserialize('json', test_string, ignore=False):
obj.save()
class JsonSerializerTransactionTestCase(SerializersTransactionTestBase, TransactionTestCase):
serializer_name = "json"
fwd_ref_str = """[
{
"pk": 1,
"model": "serializers.article",
"fields": {
"headline": "Forward references pose no problem",
"pub_date": "2006-06-16T15:00:00",
"categories": [1],
"author": 1
}
},
{
"pk": 1,
"model": "serializers.category",
"fields": {
"name": "Reference"
}
},
{
"pk": 1,
"model": "serializers.author",
"fields": {
"name": "Agnes"
}
}]"""
YAML_IMPORT_ERROR_MESSAGE = r'No module named yaml'
class YamlImportModuleMock(object):
"""Provides a wrapped import_module function to simulate yaml ImportError
In order to run tests that verify the behavior of the YAML serializer
when run on a system that has yaml installed (like the django CI server),
mock import_module, so that it raises an ImportError when the yaml
serializer is being imported. The importlib.import_module() call is
being made in the serializers.register_serializer().
Refs: #12756
"""
def __init__(self):
self._import_module = importlib.import_module
def import_module(self, module_path):
if module_path == serializers.BUILTIN_SERIALIZERS['yaml']:
raise ImportError(YAML_IMPORT_ERROR_MESSAGE)
return self._import_module(module_path)
class NoYamlSerializerTestCase(SimpleTestCase):
"""Not having pyyaml installed provides a misleading error
Refs: #12756
"""
@classmethod
def setUpClass(cls):
"""Removes imported yaml and stubs importlib.import_module"""
super(NoYamlSerializerTestCase, cls).setUpClass()
cls._import_module_mock = YamlImportModuleMock()
importlib.import_module = cls._import_module_mock.import_module
# clear out cached serializers to emulate yaml missing
serializers._serializers = {}
@classmethod
def tearDownClass(cls):
"""Puts yaml back if necessary"""
super(NoYamlSerializerTestCase, cls).tearDownClass()
importlib.import_module = cls._import_module_mock._import_module
# clear out cached serializers to clean out BadSerializer instances
serializers._serializers = {}
def test_serializer_pyyaml_error_message(self):
"""Using yaml serializer without pyyaml raises ImportError"""
jane = Author(name="Jane")
self.assertRaises(ImportError, serializers.serialize, "yaml", [jane])
def test_deserializer_pyyaml_error_message(self):
"""Using yaml deserializer without pyyaml raises ImportError"""
self.assertRaises(ImportError, serializers.deserialize, "yaml", "")
def test_dumpdata_pyyaml_error_message(self):
"""Calling dumpdata produces an error when yaml package missing"""
with six.assertRaisesRegex(self, management.CommandError, YAML_IMPORT_ERROR_MESSAGE):
management.call_command('dumpdata', format='yaml')
@unittest.skipUnless(HAS_YAML, "No yaml library detected")
class YamlSerializerTestCase(SerializersTestBase, TestCase):
serializer_name = "yaml"
fwd_ref_str = """- fields:
headline: Forward references pose no problem
pub_date: 2006-06-16 15:00:00
categories: [1]
author: 1
pk: 1
model: serializers.article
- fields:
name: Reference
pk: 1
model: serializers.category
- fields:
name: Agnes
pk: 1
model: serializers.author"""
pkless_str = """- fields:
name: Reference
pk: null
model: serializers.category
- fields:
name: Non-fiction
model: serializers.category"""
mapping_ordering_str = """- model: serializers.article
pk: %(article_pk)s
fields:
author: %(author_pk)s
headline: Poker has no place on ESPN
pub_date: 2006-06-16 11:00:00
categories: [%(first_category_pk)s, %(second_category_pk)s]
meta_data: []
"""
@staticmethod
def _validate_output(serial_str):
try:
yaml.safe_load(StringIO(serial_str))
except Exception:
return False
else:
return True
@staticmethod
def _get_pk_values(serial_str):
ret_list = []
stream = StringIO(serial_str)
for obj_dict in yaml.safe_load(stream):
ret_list.append(obj_dict["pk"])
return ret_list
@staticmethod
def _get_field_values(serial_str, field_name):
ret_list = []
stream = StringIO(serial_str)
for obj_dict in yaml.safe_load(stream):
if "fields" in obj_dict and field_name in obj_dict["fields"]:
field_value = obj_dict["fields"][field_name]
# yaml.safe_load will return non-string objects for some
# of the fields we are interested in, this ensures that
# everything comes back as a string
if isinstance(field_value, six.string_types):
ret_list.append(field_value)
else:
ret_list.append(str(field_value))
return ret_list
@unittest.skipUnless(HAS_YAML, "No yaml library detected")
class YamlSerializerTransactionTestCase(SerializersTransactionTestBase, TransactionTestCase):
serializer_name = "yaml"
fwd_ref_str = """- fields:
headline: Forward references pose no problem
pub_date: 2006-06-16 15:00:00
categories: [1]
author: 1
pk: 1
model: serializers.article
- fields:
name: Reference
pk: 1
model: serializers.category
- fields:
name: Agnes
pk: 1
model: serializers.author"""
|
jdelight/django
|
tests/serializers/tests.py
|
Python
|
bsd-3-clause
| 29,932
|
#!/usr/bin/env python
from __future__ import print_function
from builtins import input
import sys
import pmagpy.pmagplotlib as pmagplotlib
import pmagpy.pmag as pmag
def main():
"""
NAME
qqunf.py
DESCRIPTION
makes qq plot from input data against uniform distribution
SYNTAX
qqunf.py [command line options]
OPTIONS
-h help message
-f FILE, specify file on command line
"""
fmt,plot='svg',0
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
elif '-f' in sys.argv: # ask for filename
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
f=open(file,'r')
input=f.readlines()
Data=[]
for line in input:
line.replace('\n','')
if '\t' in line: # read in the data from standard input
rec=line.split('\t') # split each line on space to get records
else:
rec=line.split() # split each line on space to get records
Data.append(float(rec[0]))
#
if len(Data) >=10:
QQ={'unf1':1}
pmagplotlib.plot_init(QQ['unf1'],5,5)
pmagplotlib.plot_qq_unf(QQ['unf1'],Data,'QQ-Uniform') # make plot
else:
print('you need N> 10')
sys.exit()
pmagplotlib.draw_figs(QQ)
files={}
for key in list(QQ.keys()):
files[key]=key+'.'+fmt
if pmagplotlib.isServer:
black = '#000000'
purple = '#800080'
titles={}
titles['eq']='Equal Area Plot'
EQ = pmagplotlib.add_borders(EQ,titles,black,purple)
pmagplotlib.save_plots(QQ,files)
elif plot==1:
files['qq']=file+'.'+fmt
pmagplotlib.save_plots(QQ,files)
else:
ans=input(" S[a]ve to save plot, [q]uit without saving: ")
if ans=="a": pmagplotlib.save_plots(QQ,files)
if __name__ == "__main__":
main()
|
lfairchild/PmagPy
|
programs/qqunf.py
|
Python
|
bsd-3-clause
| 1,916
|
__version_info__ = (0, 4, 2, 'a1')
__version__ = '.'.join(map(str, __version_info__[:3]))
if len(__version_info__) == 4:
__version__ += __version_info__[-1]
|
pr-omethe-us/PyKED
|
pyked/_version.py
|
Python
|
bsd-3-clause
| 161
|