repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
zak-k/iris
|
lib/iris/fileformats/pp.py
|
1
|
89450
|
# (C) British Crown Copyright 2010 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Provides UK Met Office Post Process (PP) format specific capabilities.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
import abc
import collections
from copy import deepcopy
import itertools
import operator
import os
import re
import struct
import warnings
import biggus
import cf_units
import numpy as np
import numpy.ma as ma
import netcdftime
from iris._deprecation import warn_deprecated
import iris.config
import iris.fileformats.rules
import iris.fileformats.pp_rules
import iris.coord_systems
try:
import mo_pack
except ImportError:
mo_pack = None
try:
from iris.fileformats import _old_pp_packing as pp_packing
except ImportError:
pp_packing = None
__all__ = ['load', 'save', 'load_cubes', 'PPField',
'reset_load_rules', 'add_save_rules',
'as_fields', 'load_pairs_from_fields', 'as_pairs',
'save_pairs_from_cube', 'reset_save_rules',
'save_fields', 'STASH', 'EARTH_RADIUS']
EARTH_RADIUS = 6371229.0
# Cube->PP rules are loaded on first use
_save_rules = None
PP_HEADER_DEPTH = 256
PP_WORD_DEPTH = 4
NUM_LONG_HEADERS = 45
NUM_FLOAT_HEADERS = 19
# The header definition for header release 2.
#: A list of (header_name, position_in_header(tuple of)) pairs for
#: header release 2 - using the one-based UM/FORTRAN indexing convention.
UM_HEADER_2 = [
('lbyr', (1, )),
('lbmon', (2, )),
('lbdat', (3, )),
('lbhr', (4, )),
('lbmin', (5, )),
('lbday', (6, )),
('lbyrd', (7, )),
('lbmond', (8, )),
('lbdatd', (9, )),
('lbhrd', (10, )),
('lbmind', (11, )),
('lbdayd', (12, )),
('lbtim', (13, )),
('lbft', (14, )),
('lblrec', (15, )),
('lbcode', (16, )),
('lbhem', (17, )),
('lbrow', (18, )),
('lbnpt', (19, )),
('lbext', (20, )),
('lbpack', (21, )),
('lbrel', (22, )),
('lbfc', (23, )),
('lbcfc', (24, )),
('lbproc', (25, )),
('lbvc', (26, )),
('lbrvc', (27, )),
('lbexp', (28, )),
('lbegin', (29, )),
('lbnrec', (30, )),
('lbproj', (31, )),
('lbtyp', (32, )),
('lblev', (33, )),
('lbrsvd', (34, 35, 36, 37, )),
('lbsrce', (38, )),
('lbuser', (39, 40, 41, 42, 43, 44, 45, )),
('brsvd', (46, 47, 48, 49, )),
('bdatum', (50, )),
('bacc', (51, )),
('blev', (52, )),
('brlev', (53, )),
('bhlev', (54, )),
('bhrlev', (55, )),
('bplat', (56, )),
('bplon', (57, )),
('bgor', (58, )),
('bzy', (59, )),
('bdy', (60, )),
('bzx', (61, )),
('bdx', (62, )),
('bmdi', (63, )),
('bmks', (64, )),
]
# The header definition for header release 3.
#: A list of (header_name, position_in_header(tuple of)) pairs for
#: header release 3 - using the one-based UM/FORTRAN indexing convention.
UM_HEADER_3 = [
('lbyr', (1, )),
('lbmon', (2, )),
('lbdat', (3, )),
('lbhr', (4, )),
('lbmin', (5, )),
('lbsec', (6, )),
('lbyrd', (7, )),
('lbmond', (8, )),
('lbdatd', (9, )),
('lbhrd', (10, )),
('lbmind', (11, )),
('lbsecd', (12, )),
('lbtim', (13, )),
('lbft', (14, )),
('lblrec', (15, )),
('lbcode', (16, )),
('lbhem', (17, )),
('lbrow', (18, )),
('lbnpt', (19, )),
('lbext', (20, )),
('lbpack', (21, )),
('lbrel', (22, )),
('lbfc', (23, )),
('lbcfc', (24, )),
('lbproc', (25, )),
('lbvc', (26, )),
('lbrvc', (27, )),
('lbexp', (28, )),
('lbegin', (29, )),
('lbnrec', (30, )),
('lbproj', (31, )),
('lbtyp', (32, )),
('lblev', (33, )),
('lbrsvd', (34, 35, 36, 37, )),
('lbsrce', (38, )),
('lbuser', (39, 40, 41, 42, 43, 44, 45, )),
('brsvd', (46, 47, 48, 49, )),
('bdatum', (50, )),
('bacc', (51, )),
('blev', (52, )),
('brlev', (53, )),
('bhlev', (54, )),
('bhrlev', (55, )),
('bplat', (56, )),
('bplon', (57, )),
('bgor', (58, )),
('bzy', (59, )),
('bdy', (60, )),
('bzx', (61, )),
('bdx', (62, )),
('bmdi', (63, )),
('bmks', (64, )),
]
# A map from header-release-number to header definition
UM_HEADERS = {2: UM_HEADER_2, 3: UM_HEADER_3}
# Offset value to convert from UM_HEADER positions to PP_HEADER offsets.
UM_TO_PP_HEADER_OFFSET = 1
#: A dictionary mapping IB values to their names.
EXTRA_DATA = {
1: 'x',
2: 'y',
3: 'lower_y_domain',
4: 'lower_x_domain',
5: 'upper_y_domain',
6: 'upper_x_domain',
7: 'lower_z_domain',
8: 'upper_z_domain',
10: 'field_title',
11: 'domain_title',
12: 'x_lower_bound',
13: 'x_upper_bound',
14: 'y_lower_bound',
15: 'y_upper_bound',
}
#: Maps lbuser[0] to numpy data type. "default" will be interpreted if
#: no match is found, providing a warning in such a case.
LBUSER_DTYPE_LOOKUP = {1: np.dtype('>f4'),
2: np.dtype('>i4'),
3: np.dtype('>i4'),
-1: np.dtype('>f4'),
-2: np.dtype('>i4'),
-3: np.dtype('>i4'),
'default': np.dtype('>f4'),
}
# LBPROC codes and their English equivalents
LBPROC_PAIRS = ((1, "Difference from another experiment"),
(2, "Difference from zonal (or other spatial) mean"),
(4, "Difference from time mean"),
(8, "X-derivative (d/dx)"),
(16, "Y-derivative (d/dy)"),
(32, "Time derivative (d/dt)"),
(64, "Zonal mean field"),
(128, "Time mean field"),
(256, "Product of two fields"),
(512, "Square root of a field"),
(1024, "Difference between fields at levels BLEV and BRLEV"),
(2048, "Mean over layer between levels BLEV and BRLEV"),
(4096, "Minimum value of field during time period"),
(8192, "Maximum value of field during time period"),
(16384, "Magnitude of a vector, not specifically wind speed"),
(32768, "Log10 of a field"),
(65536, "Variance of a field"),
(131072, "Mean over an ensemble of parallel runs"))
# lbproc_map is dict mapping lbproc->English and English->lbproc
# essentially a one to one mapping
lbproc_map = {x: y for x, y in
itertools.chain(LBPROC_PAIRS, ((y, x) for x, y in LBPROC_PAIRS))}
class STASH(collections.namedtuple('STASH', 'model section item')):
"""
A class to hold a single STASH code.
Create instances using:
>>> model = 1
>>> section = 2
>>> item = 3
>>> my_stash = iris.fileformats.pp.STASH(model, section, item)
Access the sub-components via:
>>> my_stash.model
1
>>> my_stash.section
2
>>> my_stash.item
3
String conversion results in the MSI format:
>>> print(iris.fileformats.pp.STASH(1, 16, 203))
m01s16i203
"""
__slots__ = ()
def __new__(cls, model, section, item):
"""
Args:
* model
A positive integer less than 100, or None.
* section
A non-negative integer less than 100, or None.
* item
A positive integer less than 1000, or None.
"""
model = cls._validate_member('model', model, 1, 99)
section = cls._validate_member('section', section, 0, 99)
item = cls._validate_member('item', item, 1, 999)
return super(STASH, cls).__new__(cls, model, section, item)
@staticmethod
def from_msi(msi):
"""Convert a STASH code MSI string to a STASH instance."""
if not isinstance(msi, six.string_types):
raise TypeError('Expected STASH code MSI string, got %r' % (msi,))
msi_match = re.match('^\s*m(.*)s(.*)i(.*)\s*$', msi, re.IGNORECASE)
if msi_match is None:
raise ValueError('Expected STASH code MSI string "mXXsXXiXXX", '
'got %r' % (msi,))
return STASH(*msi_match.groups())
@staticmethod
def _validate_member(name, value, lower_limit, upper_limit):
# Returns a valid integer or None.
try:
value = int(value)
if not lower_limit <= value <= upper_limit:
value = None
except (TypeError, ValueError):
value = None
return value
def __str__(self):
model = self._format_member(self.model, 2)
section = self._format_member(self.section, 2)
item = self._format_member(self.item, 3)
return 'm{}s{}i{}'.format(model, section, item)
def _format_member(self, value, num_digits):
if value is None:
result = '?' * num_digits
else:
format_spec = '0' + str(num_digits)
result = format(value, format_spec)
return result
def lbuser3(self):
"""Return the lbuser[3] value that this stash represents."""
return (self.section or 0) * 1000 + (self.item or 0)
def lbuser6(self):
"""Return the lbuser[6] value that this stash represents."""
return self.model or 0
@property
def is_valid(self):
return '?' not in str(self)
def __hash__(self):
return super(STASH, self).__hash__()
def __eq__(self, other):
if isinstance(other, six.string_types):
return super(STASH, self).__eq__(STASH.from_msi(other))
else:
return super(STASH, self).__eq__(other)
def __ne__(self, other):
return not self.__eq__(other)
class SplittableInt(object):
"""
A class to hold integers which can easily get each decimal digit
individually.
>>> three_six_two = SplittableInt(362)
>>> print(three_six_two)
362
>>> print(three_six_two[0])
2
>>> print(three_six_two[2])
3
.. note:: No support for negative numbers
"""
def __init__(self, value, name_mapping_dict=None):
"""
Build a SplittableInt given the positive integer value provided.
Kwargs:
* name_mapping_dict - (dict)
A special mapping to provide name based access to specific integer
positions:
>>> a = SplittableInt(1234, {'hundreds': 2})
>>> print(a.hundreds)
2
>>> a.hundreds = 9
>>> print(a.hundreds)
9
>>> print(a)
1934
"""
if value < 0:
raise ValueError('Negative numbers not supported with splittable'
' integers object')
# define the name lookup first (as this is the way __setattr__ is
# plumbed)
#: A dictionary mapping special attribute names on this object
#: to the slices/indices required to access them.
self._name_lookup = name_mapping_dict or {}
self._value = value
self._calculate_str_value_from_value()
def __int__(self):
return int(self._value)
def _calculate_str_value_from_value(self):
# Reverse the string to get the appropriate index when getting the
# sliced value
self._strvalue = [int(c) for c in str(self._value)[::-1]]
# Associate the names in the lookup table to attributes
for name, index in self._name_lookup.items():
object.__setattr__(self, name, self[index])
def _calculate_value_from_str_value(self):
self._value = np.sum([10**i * val for
i, val in enumerate(self._strvalue)])
def __len__(self):
return len(self._strvalue)
def __getitem__(self, key):
try:
val = self._strvalue[key]
except IndexError:
val = 0
# if the key returns a list of values, then combine them together
# to an integer
if isinstance(val, list):
val = sum([10**i * val for i, val in enumerate(val)])
return val
def __setitem__(self, key, value):
# The setitem method has been overridden so that assignment using
# ``val[0] = 1`` style syntax updates
# the entire object appropriately.
if (not isinstance(value, int) or value < 0):
raise ValueError('Can only set %s as a positive integer value.'
% key)
if isinstance(key, slice):
if ((key.start is not None and key.start < 0) or
(key.step is not None and key.step < 0) or
(key.stop is not None and key.stop < 0)):
raise ValueError('Cannot assign a value with slice objects'
' containing negative indices.')
# calculate the current length of the value of this string
current_length = len(range(*key.indices(len(self))))
# get indices for as many digits as have been requested. Putting
# the upper limit on the number of digits at 100.
indices = range(*key.indices(100))
if len(indices) < len(str(value)):
raise ValueError('Cannot put %s into %s as it has too many'
' digits.' % (value, key))
# Iterate over each of the indices in the slice,
# zipping them together with the associated digit
for index, digit in zip(indices,
str(value).zfill(current_length)[::-1]):
# assign each digit to the associated index
self.__setitem__(index, int(digit))
else:
# If we are trying to set to an index which does not currently
# exist in _strvalue then extend it to the
# appropriate length
if (key + 1) > len(self):
new_str_value = [0] * (key + 1)
new_str_value[:len(self)] = self._strvalue
self._strvalue = new_str_value
self._strvalue[key] = value
for name, index in self._name_lookup.items():
if index == key:
object.__setattr__(self, name, value)
self._calculate_value_from_str_value()
def __setattr__(self, name, value):
# if the attribute is a special value, update the index value which
# will in turn update the attribute value
if name != '_name_lookup' and name in self._name_lookup:
self[self._name_lookup[name]] = value
else:
object.__setattr__(self, name, value)
def __str__(self):
return str(self._value)
def __repr__(self):
return 'SplittableInt(%r, name_mapping_dict=%r)' % (self._value,
self._name_lookup)
def __eq__(self, other):
result = NotImplemented
if isinstance(other, SplittableInt):
result = self._value == other._value
elif isinstance(other, int):
result = self._value == other
return result
def __ne__(self, other):
result = self.__eq__(other)
if result is not NotImplemented:
result = not result
return result
def _compare(self, other, op):
result = NotImplemented
if isinstance(other, SplittableInt):
result = op(self._value, other._value)
elif isinstance(other, int):
result = op(self._value, other)
return result
def __lt__(self, other):
return self._compare(other, operator.lt)
def __le__(self, other):
return self._compare(other, operator.le)
def __gt__(self, other):
return self._compare(other, operator.gt)
def __ge__(self, other):
return self._compare(other, operator.ge)
class BitwiseInt(SplittableInt):
"""
A class to hold an integer, of fixed bit-length, which can easily get/set
each bit individually.
.. deprecated:: 1.8
Please use `int` instead.
.. note::
Uses a fixed number of bits.
Will raise an Error when attempting to access an out-of-range flag.
>>> a = BitwiseInt(511)
>>> a.flag1
1
>>> a.flag8
1
>>> a.flag128
1
>>> a.flag256
1
>>> a.flag512
AttributeError: 'BitwiseInt' object has no attribute 'flag512'
>>> a.flag512 = 1
AttributeError: Cannot set a flag that does not exist: flag512
"""
def __init__(self, value, num_bits=None):
# intentionally empty docstring as all covered in the class docstring.
""" """
warn_deprecated('BitwiseInt is deprecated - please use `int` instead.')
SplittableInt.__init__(self, value)
self.flags = ()
# do we need to calculate the number of bits based on the given value?
self._num_bits = num_bits
if self._num_bits is None:
self._num_bits = 0
while((value >> self._num_bits) > 0):
self._num_bits += 1
else:
# make sure the number of bits is enough to store the given value.
if (value >> self._num_bits) > 0:
raise ValueError("Not enough bits to store value")
self._set_flags_from_value()
def _set_flags_from_value(self):
all_flags = []
# Set attributes "flag[n]" to 0 or 1
for i in range(self._num_bits):
flag_name = 1 << i
flag_value = ((self._value >> i) & 1)
object.__setattr__(self, 'flag%d' % flag_name, flag_value)
# Add to list off all flags
if flag_value:
all_flags.append(flag_name)
self.flags = tuple(all_flags)
def _set_value_from_flags(self):
self._value = 0
for i in range(self._num_bits):
bit_value = pow(2, i)
flag_name = "flag%i" % bit_value
flag_value = object.__getattribute__(self, flag_name)
self._value += flag_value * bit_value
def __iand__(self, value):
"""Perform an &= operation."""
self._value &= value
self._set_flags_from_value()
return self
def __ior__(self, value):
"""Perform an |= operation."""
self._value |= value
self._set_flags_from_value()
return self
def __iadd__(self, value):
"""Perform an inplace add operation"""
self._value += value
self._set_flags_from_value()
return self
def __setattr__(self, name, value):
# Allow setting of the attribute flags
# Are we setting a flag?
if name.startswith("flag") and name != "flags":
# true and false become 1 and 0
if not isinstance(value, bool):
raise TypeError("Can only set bits to True or False")
# Setting an existing flag?
if hasattr(self, name):
# which flag?
flag_value = int(name[4:])
# on or off?
if value:
self |= flag_value
else:
self &= ~flag_value
# Fail if an attempt has been made to set a flag that does not
# exist
else:
raise AttributeError("Cannot set a flag that does not"
" exist: %s" % name)
# If we're not setting a flag, then continue as normal
else:
SplittableInt.__setattr__(self, name, value)
def _make_flag_getter(value):
def getter(self):
warn_deprecated('The `flag` attributes are deprecated - please use '
'integer bitwise operators instead.')
return int(bool(self._value & value))
return getter
def _make_flag_setter(value):
def setter(self, flag):
warn_deprecated('The `flag` attributes are deprecated - please use '
'integer bitwise operators instead.')
if not isinstance(flag, bool):
raise TypeError('Can only set bits to True or False')
if flag:
self._value |= value
else:
self._value &= ~value
return setter
class _FlagMetaclass(type):
NUM_BITS = 18
def __new__(cls, classname, bases, class_dict):
for i in range(cls.NUM_BITS):
value = 2 ** i
name = 'flag{}'.format(value)
class_dict[name] = property(_make_flag_getter(value),
_make_flag_setter(value))
class_dict['NUM_BITS'] = cls.NUM_BITS
return type.__new__(cls, classname, bases, class_dict)
class _LBProc(six.with_metaclass(_FlagMetaclass, BitwiseInt)):
# Use a metaclass to define the `flag1`, `flag2`, `flag4, etc.
# properties.
def __init__(self, value):
"""
Args:
* value (int):
The initial value which will determine the flags.
"""
value = int(value)
if value < 0:
raise ValueError('Negative numbers not supported with '
'splittable integers object')
self._value = value
def __len__(self):
"""
Base ten length.
.. deprecated:: 1.8
The value of a BitwiseInt only makes sense in base-two.
"""
warn_deprecated('Length is deprecated')
return len(str(self._value))
def __setattr__(self, name, value):
object.__setattr__(self, name, value)
def __getitem__(self, key):
"""
Base ten indexing support.
.. deprecated:: 1.8
The value of an _LBProc only makes sense in base-two.
"""
warn_deprecated('Indexing is deprecated')
try:
value = int('0' + str(self._value)[::-1][key][::-1])
except IndexError:
value = 0
# If the key returns a list of values, then combine them
# together to an integer.
if isinstance(value, list):
value = sum(10**i * val for i, val in enumerate(value))
return value
def __setitem__(self, key, value):
"""
Base ten indexing support.
.. deprecated:: 1.8
The value of an _LBProc only makes sense in base-two.
"""
warn_deprecated('Indexing is deprecated')
if (not isinstance(value, int) or value < 0):
msg = 'Can only set {} as a positive integer value.'.format(key)
raise ValueError(msg)
if isinstance(key, slice):
if ((key.start is not None and key.start < 0) or
(key.step is not None and key.step < 0) or
(key.stop is not None and key.stop < 0)):
raise ValueError('Cannot assign a value with slice '
'objects containing negative indices.')
# calculate the current length of the value of this string
current_length = len(range(*key.indices(len(self))))
# Get indices for as many digits as have been requested.
# Putting the upper limit on the number of digits at 100.
indices = range(*key.indices(100))
if len(indices) < len(str(value)):
fmt = 'Cannot put {} into {} as it has too many digits.'
raise ValueError(fmt.format(value, key))
# Iterate over each of the indices in the slice, zipping
# them together with the associated digit.
filled_value = str(value).zfill(current_length)
for index, digit in zip(indices, filled_value[::-1]):
# assign each digit to the associated index
self.__setitem__(index, int(digit))
else:
if value > 9:
raise ValueError('Can only set a single digit')
# Setting a single digit.
factor = 10 ** key
head, tail = divmod(self._value, factor)
head = head // 10
self._value = (head * 10 + value) * factor + tail
def __iadd__(self, value):
self._value += value
return self
def __and__(self, value):
return self._value & value
def __iand__(self, value):
self._value &= value
return self
def __ior__(self, value):
self._value |= value
return self
def __int__(self):
return self._value
def __repr__(self):
return '_LBProc({})'.format(self._value)
def __str__(self):
return str(self._value)
@property
def flags(self):
warn_deprecated('The `flags` attribute is deprecated - please use '
'integer bitwise operators instead.')
return tuple(2 ** i for i in range(self.NUM_BITS)
if self._value & 2 ** i)
class PPDataProxy(object):
"""A reference to the data payload of a single PP field."""
__slots__ = ('shape', 'src_dtype', 'path', 'offset', 'data_len',
'_lbpack', 'boundary_packing', 'mdi', 'mask')
def __init__(self, shape, src_dtype, path, offset, data_len,
lbpack, boundary_packing, mdi, mask):
self.shape = shape
self.src_dtype = src_dtype
self.path = path
self.offset = offset
self.data_len = data_len
self.lbpack = lbpack
self.boundary_packing = boundary_packing
self.mdi = mdi
self.mask = mask
# lbpack
def _lbpack_setter(self, value):
self._lbpack = value
def _lbpack_getter(self):
value = self._lbpack
if not isinstance(self._lbpack, SplittableInt):
mapping = dict(n5=slice(4, None), n4=3, n3=2, n2=1, n1=0)
value = SplittableInt(self._lbpack, mapping)
return value
lbpack = property(_lbpack_getter, _lbpack_setter)
@property
def dtype(self):
return self.src_dtype.newbyteorder('=')
@property
def fill_value(self):
return self.mdi
@property
def ndim(self):
return len(self.shape)
def __getitem__(self, keys):
with open(self.path, 'rb') as pp_file:
pp_file.seek(self.offset, os.SEEK_SET)
data_bytes = pp_file.read(self.data_len)
data = _data_bytes_to_shaped_array(data_bytes,
self.lbpack,
self.boundary_packing,
self.shape, self.src_dtype,
self.mdi, self.mask)
return data.__getitem__(keys)
def __repr__(self):
fmt = '<{self.__class__.__name__} shape={self.shape}' \
' src_dtype={self.dtype!r} path={self.path!r}' \
' offset={self.offset} mask={self.mask!r}>'
return fmt.format(self=self)
def __getstate__(self):
# Because we have __slots__, this is needed to support Pickle.dump()
return [(name, getattr(self, name)) for name in self.__slots__]
def __setstate__(self, state):
# Because we have __slots__, this is needed to support Pickle.load()
# (Use setattr, as there is no object dictionary.)
for (key, value) in state:
setattr(self, key, value)
def __eq__(self, other):
result = NotImplemented
if isinstance(other, PPDataProxy):
result = True
for attr in self.__slots__:
if getattr(self, attr) != getattr(other, attr):
result = False
break
return result
def __ne__(self, other):
result = self.__eq__(other)
if result is not NotImplemented:
result = not result
return result
def _data_bytes_to_shaped_array(data_bytes, lbpack, boundary_packing,
data_shape, data_type, mdi,
mask=None):
"""
Convert the already read binary data payload into a numpy array, unpacking
and decompressing as per the F3 specification.
"""
if lbpack.n1 in (0, 2):
data = np.frombuffer(data_bytes, dtype=data_type)
elif lbpack.n1 == 1:
if mo_pack is not None:
try:
decompress_wgdos = mo_pack.decompress_wgdos
except AttributeError:
decompress_wgdos = mo_pack.unpack_wgdos
elif pp_packing is not None:
msg = 'iris.fileformats.pp_packing has been ' \
'deprecated and will be removed in a future release. ' \
'Install mo_pack to make use of the new unpacking ' \
'functionality.'
warn_deprecated(msg)
decompress_wgdos = pp_packing.wgdos_unpack
else:
msg = 'Unpacking PP fields with LBPACK of {} ' \
'requires mo_pack to be installed'.format(lbpack.n1)
raise ValueError(msg)
data = decompress_wgdos(data_bytes, data_shape[0], data_shape[1], mdi)
elif lbpack.n1 == 4:
if mo_pack is not None and hasattr(mo_pack, 'decompress_rle'):
decompress_rle = mo_pack.decompress_rle
elif pp_packing is not None:
msg = 'iris.fileformats.pp_packing has been ' \
'deprecated and will be removed in a future release. ' \
'Install/upgrade mo_pack to make use of the new unpacking ' \
'functionality.'
warn_deprecated(msg)
decompress_rle = pp_packing.rle_decode
else:
msg = 'Unpacking PP fields with LBPACK of {} ' \
'requires mo_pack to be installed'.format(lbpack.n1)
raise ValueError(msg)
data = decompress_rle(data_bytes, data_shape[0], data_shape[1], mdi)
else:
raise iris.exceptions.NotYetImplementedError(
'PP fields with LBPACK of %s are not yet supported.' % lbpack)
# Ensure we have write permission on the data buffer.
data.setflags(write=True)
# Ensure the data is in the native byte order
if not data.dtype.isnative:
data.byteswap(True)
data.dtype = data.dtype.newbyteorder('=')
if boundary_packing is not None:
# Convert a long string of numbers into a "lateral boundary
# condition" array, which is split into 4 quartiles, North
# East, South, West and where North and South contain the corners.
compressed_data = data
data = np.ma.masked_all(data_shape)
boundary_height = boundary_packing.y_halo + boundary_packing.rim_width
boundary_width = boundary_packing.x_halo + boundary_packing.rim_width
y_height, x_width = data_shape
# The height of the east and west components.
mid_height = y_height - 2 * boundary_height
n_s_shape = boundary_height, x_width
e_w_shape = mid_height, boundary_width
# Keep track of our current position in the array.
current_posn = 0
north = compressed_data[:boundary_height*x_width]
current_posn += len(north)
data[-boundary_height:, :] = north.reshape(*n_s_shape)
east = compressed_data[current_posn:
current_posn + boundary_width * mid_height]
current_posn += len(east)
data[boundary_height:-boundary_height,
-boundary_width:] = east.reshape(*e_w_shape)
south = compressed_data[current_posn:
current_posn + boundary_height * x_width]
current_posn += len(south)
data[:boundary_height, :] = south.reshape(*n_s_shape)
west = compressed_data[current_posn:
current_posn + boundary_width * mid_height]
current_posn += len(west)
data[boundary_height:-boundary_height,
:boundary_width] = west.reshape(*e_w_shape)
elif lbpack.n2 == 2:
if mask is None:
raise ValueError('No mask was found to unpack the data. '
'Could not load.')
land_mask = mask.data.astype(np.bool)
sea_mask = ~land_mask
new_data = np.ma.masked_all(land_mask.shape)
if lbpack.n3 == 1:
# Land mask packed data.
new_data.mask = sea_mask
# Sometimes the data comes in longer than it should be (i.e. it
# looks like the compressed data is compressed, but the trailing
# data hasn't been clipped off!).
new_data[land_mask] = data[:land_mask.sum()]
elif lbpack.n3 == 2:
# Sea mask packed data.
new_data.mask = land_mask
new_data[sea_mask] = data[:sea_mask.sum()]
else:
raise ValueError('Unsupported mask compression.')
data = new_data
else:
# Reform in row-column order
data.shape = data_shape
# Mask the array?
if mdi in data:
data = ma.masked_values(data, mdi, copy=False)
return data
# The special headers of the PPField classes which get some improved
# functionality
_SPECIAL_HEADERS = ('lbtim', 'lbcode', 'lbpack', 'lbproc', 'data', 'stash',
't1', 't2')
def _header_defn(release_number):
"""
Returns the zero-indexed header definition for a particular release of
a PPField.
"""
um_header = UM_HEADERS[release_number]
offset = UM_TO_PP_HEADER_OFFSET
return [(name, tuple(position - offset for position in positions))
for name, positions in um_header]
def _pp_attribute_names(header_defn):
"""
Returns the allowed attributes of a PPField:
all of the normal headers (i.e. not the _SPECIAL_HEADERS),
the _SPECIAL_HEADERS with '_' prefixed,
the possible extra data headers.
"""
normal_headers = list(name for name, positions in header_defn
if name not in _SPECIAL_HEADERS)
special_headers = list('_' + name for name in _SPECIAL_HEADERS)
extra_data = list(EXTRA_DATA.values())
special_attributes = ['_raw_header', 'raw_lbtim', 'raw_lbpack',
'boundary_packing']
return normal_headers + special_headers + extra_data + special_attributes
class PPField(six.with_metaclass(abc.ABCMeta, object)):
"""
A generic class for PP fields - not specific to a particular
header release number.
A PPField instance can easily access the PP header "words" as attributes
with some added useful capabilities::
for field in iris.fileformats.pp.load(filename):
print(field.lbyr)
print(field.lbuser)
print(field.lbuser[0])
print(field.lbtim)
print(field.lbtim.ia)
print(field.t1)
"""
# NB. Subclasses must define the attribute HEADER_DEFN to be their
# zero-based header definition. See PPField2 and PPField3 for examples.
__slots__ = ()
def __init__(self, header=None):
# Combined header longs and floats data cache.
self._raw_header = header
self.raw_lbtim = None
self.raw_lbpack = None
self.boundary_packing = None
if header is not None:
self.raw_lbtim = header[self.HEADER_DICT['lbtim'][0]]
self.raw_lbpack = header[self.HEADER_DICT['lbpack'][0]]
def __getattr__(self, key):
"""
This method supports deferred attribute creation, which offers a
significant loading optimisation, particularly when not all attributes
are referenced and therefore created on the instance.
When an 'ordinary' HEADER_DICT attribute is required, its associated
header offset is used to lookup the data value/s from the combined
header longs and floats data cache. The attribute is then set with this
value/s on the instance. Thus future lookups for this attribute will be
optimised, avoiding the __getattr__ lookup mechanism again.
When a 'special' HEADER_DICT attribute (leading underscore) is
required, its associated 'ordinary' (no leading underscore) header
offset is used to lookup the data value/s from the combined header
longs and floats data cache. The 'ordinary' attribute is then set
with this value/s on the instance. This is required as 'special'
attributes have supporting property convenience functionality base on
the attribute value e.g. see 'lbpack' and 'lbtim'. Note that, for
'special' attributes the interface is via the 'ordinary' attribute but
the underlying attribute value is stored within the 'special'
attribute.
"""
try:
loc = self.HEADER_DICT[key]
except KeyError:
if key[0] == '_' and key[1:] in self.HEADER_DICT:
# Must be a special attribute.
loc = self.HEADER_DICT[key[1:]]
else:
cls = self.__class__.__name__
msg = '{!r} object has no attribute {!r}'.format(cls, key)
raise AttributeError(msg)
if len(loc) == 1:
value = self._raw_header[loc[0]]
else:
start = loc[0]
stop = loc[-1] + 1
value = tuple(self._raw_header[start:stop])
# Now cache the attribute value on the instance.
if key[0] == '_':
# First we need to assign to the attribute so that the
# special attribute is calculated, then we retrieve it.
setattr(self, key[1:], value)
value = getattr(self, key)
else:
setattr(self, key, value)
return value
@abc.abstractproperty
def t1(self):
pass
@abc.abstractproperty
def t2(self):
pass
def __repr__(self):
"""Return a string representation of the PP field."""
# Define an ordering on the basic header names
attribute_priority_lookup = {name: loc[0] for name, loc
in self.HEADER_DEFN}
# With the attributes sorted the order will remain stable if extra
# attributes are added.
public_attribute_names = list(attribute_priority_lookup.keys()) + \
list(EXTRA_DATA.values())
self_attrs = [(name, getattr(self, name, None))
for name in public_attribute_names]
self_attrs = [pair for pair in self_attrs if pair[1] is not None]
# Output any masked data as separate `data` and `mask`
# components, to avoid the standard MaskedArray output
# which causes irrelevant discrepancies between NumPy
# v1.6 and v1.7.
if ma.isMaskedArray(self._data):
# Force the fill value to zero to have the minimum
# impact on the output style.
self_attrs.append(('data.data', self._data.filled(0)))
self_attrs.append(('data.mask', self._data.mask))
else:
self_attrs.append(('data', self._data))
# sort the attributes by position in the pp header followed,
# then by alphabetical order.
attributes = sorted(self_attrs, key=lambda pair:
(attribute_priority_lookup.get(pair[0], 999),
pair[0]))
return 'PP Field' + ''.join(['\n %s: %s' % (k, v)
for k, v in attributes]) + '\n'
@property
def stash(self):
"""
A stash property giving access to the associated STASH object,
now supporting __eq__
"""
if (not hasattr(self, '_stash') or
self.lbuser[6] != self._stash.lbuser6() or
self.lbuser[3] != self._stash.lbuser3()):
self._stash = STASH(self.lbuser[6], self.lbuser[3] // 1000,
self.lbuser[3] % 1000)
return self._stash
@stash.setter
def stash(self, stash):
if isinstance(stash, six.string_types):
self._stash = STASH.from_msi(stash)
elif isinstance(stash, STASH):
self._stash = stash
else:
raise ValueError('Cannot set stash to {!r}'.format(stash))
# Keep the lbuser up to date.
self.lbuser = list(self.lbuser)
self.lbuser[6] = self._stash.lbuser6()
self.lbuser[3] = self._stash.lbuser3()
@property
def lbtim(self):
return self._lbtim
@lbtim.setter
def lbtim(self, value):
value = int(value)
self.raw_lbtim = value
self._lbtim = SplittableInt(value, {'ia': slice(2, None), 'ib': 1,
'ic': 0})
# lbcode
def _lbcode_setter(self, new_value):
if not isinstance(new_value, SplittableInt):
# add the ix/iy values for lbcode
new_value = SplittableInt(new_value,
{'iy': slice(0, 2), 'ix': slice(2, 4)})
self._lbcode = new_value
lbcode = property(lambda self: self._lbcode, _lbcode_setter)
# lbpack
def _lbpack_setter(self, new_value):
if not isinstance(new_value, SplittableInt):
self.raw_lbpack = new_value
# add the n1/n2/n3/n4/n5 values for lbpack
name_mapping = dict(n5=slice(4, None), n4=3, n3=2, n2=1, n1=0)
new_value = SplittableInt(new_value, name_mapping)
else:
self.raw_lbpack = new_value._value
self._lbpack = new_value
lbpack = property(lambda self: self._lbpack, _lbpack_setter)
@property
def lbproc(self):
return self._lbproc
@lbproc.setter
def lbproc(self, value):
if not isinstance(value, _LBProc):
value = _LBProc(value)
self._lbproc = value
@property
def data(self):
"""
The :class:`numpy.ndarray` representing the multidimensional data
of the pp file
"""
# Cache the real data on first use
if isinstance(self._data, biggus.Array):
data = self._data.masked_array()
if ma.count_masked(data) == 0:
data = data.data
self._data = data
return self._data
@data.setter
def data(self, value):
self._data = value
@property
def calendar(self):
"""Return the calendar of the field."""
# TODO #577 What calendar to return when ibtim.ic in [0, 3]
calendar = cf_units.CALENDAR_GREGORIAN
if self.lbtim.ic == 2:
calendar = cf_units.CALENDAR_360_DAY
elif self.lbtim.ic == 4:
calendar = cf_units.CALENDAR_365_DAY
return calendar
def _read_extra_data(self, pp_file, file_reader, extra_len,
little_ended=False):
"""Read the extra data section and update the self appropriately."""
dtype_endian_char = '<' if little_ended else '>'
# While there is still extra data to decode run this loop
while extra_len > 0:
dtype = '%cL' % dtype_endian_char
extra_int_code = struct.unpack_from(dtype,
file_reader(PP_WORD_DEPTH))[0]
extra_len -= PP_WORD_DEPTH
ib = extra_int_code % 1000
ia = extra_int_code // 1000
data_len = ia * PP_WORD_DEPTH
if ib == 10:
dtype = '%c%ds' % (dtype_endian_char, data_len)
field_title = struct.unpack_from(dtype, file_reader(data_len))
self.field_title = field_title[0].rstrip(b'\00').decode()
elif ib == 11:
dtype = '%c%ds' % (dtype_endian_char, data_len)
domain_title = struct.unpack_from(dtype,
file_reader(data_len))
self.domain_title = domain_title[0].rstrip(b'\00').decode()
elif ib in EXTRA_DATA:
attr_name = EXTRA_DATA[ib]
dtype = np.dtype('%cf%d' % (dtype_endian_char, PP_WORD_DEPTH))
values = np.fromfile(pp_file, dtype=dtype, count=ia)
# Ensure the values are in the native byte order
if not values.dtype.isnative:
values.byteswap(True)
values.dtype = values.dtype.newbyteorder('=')
setattr(self, attr_name, values)
else:
raise ValueError('Unknown IB value for extra data: %s' % ib)
extra_len -= data_len
@property
def x_bounds(self):
if hasattr(self, "x_lower_bound") and hasattr(self, "x_upper_bound"):
return np.column_stack((self.x_lower_bound, self.x_upper_bound))
@property
def y_bounds(self):
if hasattr(self, "y_lower_bound") and hasattr(self, "y_upper_bound"):
return np.column_stack((self.y_lower_bound, self.y_upper_bound))
def save(self, file_handle):
"""
Save the PPField to the given file object
(typically created with :func:`open`).
::
# to append the field to a file
with open(filename, 'ab') as fh:
a_pp_field.save(fh)
# to overwrite/create a file
with open(filename, 'wb') as fh:
a_pp_field.save(fh)
.. note::
The fields which are automatically calculated are: 'lbext',
'lblrec' and 'lbuser[0]'. Some fields are not currently
populated, these are: 'lbegin', 'lbnrec', 'lbuser[1]'.
"""
# Before we can actually write to file, we need to calculate the header
# elements. First things first, make sure the data is big-endian
data = self.data
if isinstance(data, ma.core.MaskedArray):
data = data.filled(fill_value=self.bmdi)
if data.dtype.newbyteorder('>') != data.dtype:
# take a copy of the data when byteswapping
data = data.byteswap(False)
data.dtype = data.dtype.newbyteorder('>')
# Create the arrays which will hold the header information
lb = np.empty(shape=NUM_LONG_HEADERS,
dtype=np.dtype(">u%d" % PP_WORD_DEPTH))
b = np.empty(shape=NUM_FLOAT_HEADERS,
dtype=np.dtype(">f%d" % PP_WORD_DEPTH))
# Populate the arrays from the PPField
for name, pos in self.HEADER_DEFN:
try:
header_elem = getattr(self, name)
except AttributeError:
raise AttributeError("PPField.save() could not find %s" % name)
if pos[0] <= NUM_LONG_HEADERS - UM_TO_PP_HEADER_OFFSET:
index = slice(pos[0], pos[-1] + 1)
if isinstance(header_elem, SplittableInt):
header_elem = int(header_elem)
lb[index] = header_elem
else:
index = slice(pos[0] - NUM_LONG_HEADERS,
pos[-1] - NUM_LONG_HEADERS + 1)
b[index] = header_elem
# Although all of the elements are now populated, we still need to
# update some of the elements in case
# things have changed (for example, the data length etc.)
# Set up a variable to represent the datalength of this PPField in
# WORDS.
len_of_data_payload = 0
# set up a list to hold the extra data which will need to be encoded
# at the end of the data
extra_items = []
# iterate through all of the possible extra data fields
for ib, extra_data_attr_name in six.iteritems(EXTRA_DATA):
# try to get the extra data field, returning None if it doesn't
# exist
extra_elem = getattr(self, extra_data_attr_name, None)
if extra_elem is not None:
# The special case of character extra data must be caught
if isinstance(extra_elem, six.string_types):
ia = len(extra_elem)
# pad any strings up to a multiple of PP_WORD_DEPTH
# (this length is # of bytes)
ia = (PP_WORD_DEPTH - (ia-1) % PP_WORD_DEPTH) + (ia-1)
extra_elem = extra_elem.ljust(ia, '\00')
# ia is now the datalength in WORDS of the string
ia //= PP_WORD_DEPTH
else:
# ia is the datalength in WORDS
ia = np.product(extra_elem.shape)
# flip the byteorder if the data is not big-endian
if extra_elem.dtype.newbyteorder('>') != extra_elem.dtype:
# take a copy of the extra data when byte swapping
extra_elem = extra_elem.byteswap(False)
extra_elem.dtype = extra_elem.dtype.newbyteorder('>')
# add the number of bytes to the len_of_data_payload variable
# + the extra integer which will encode ia/ib
len_of_data_payload += PP_WORD_DEPTH * ia + PP_WORD_DEPTH
integer_code = 1000 * ia + ib
extra_items.append([integer_code, extra_elem])
if ia >= 1000:
raise IOError('PP files cannot write extra data with more'
' than 1000 elements. Tried to write "%s"'
' which has %s elements.'
% (extra_data_attr_name, ib)
)
# populate lbext in WORDS
lb[self.HEADER_DICT['lbext'][0]] = len_of_data_payload // PP_WORD_DEPTH
# Put the data length of pp.data into len_of_data_payload (in BYTES)
lbpack = lb[self.HEADER_DICT['lbpack'][0]]
if lbpack == 0:
len_of_data_payload += data.size * PP_WORD_DEPTH
elif lbpack == 1:
if mo_pack is not None:
try:
compress_wgdos = mo_pack.compress_wgdos
except AttributeError:
compress_wgdos = mo_pack.pack_wgdos
packed_data = compress_wgdos(data.astype(np.float32),
b[self.HEADER_DICT['bacc'][0]-45],
b[self.HEADER_DICT['bmdi'][0]-45])
len_of_data_payload += len(packed_data)
else:
msg = 'Writing packed pp data with lbpack of {} ' \
'requires mo_pack to be installed.'.format(lbpack)
raise NotImplementedError(msg)
# populate lbrec in WORDS
lb[self.HEADER_DICT['lblrec'][0]] = len_of_data_payload // \
PP_WORD_DEPTH
# populate lbuser[0] to have the data's datatype
if data.dtype == np.dtype('>f4'):
lb[self.HEADER_DICT['lbuser'][0]] = 1
elif data.dtype == np.dtype('>f8'):
warnings.warn("Downcasting array precision from float64 to float32"
" for save.If float64 precision is required then"
" please save in a different format")
data = data.astype('>f4')
lb[self.HEADER_DICT['lbuser'][0]] = 1
elif data.dtype == np.dtype('>i4'):
# NB: there is no physical difference between lbuser[0] of 2 or 3
# so we encode just 2
lb[self.HEADER_DICT['lbuser'][0]] = 2
else:
raise IOError('Unable to write data array to a PP file. '
'The datatype was %s.' % data.dtype)
# NB: lbegin, lbnrec, lbuser[1] not set up
# Now that we have done the manouvering required, write to the file...
if not hasattr(file_handle, 'write'):
raise TypeError('The file_handle argument must be an instance of a'
' Python file object, but got %r. \n e.g. '
'open(filename, "wb") to open a binary file with'
' write permission.' % type(file_handle))
pp_file = file_handle
# header length
pp_file.write(struct.pack(">L", PP_HEADER_DEPTH))
# 45 integers
lb.tofile(pp_file)
# 19 floats
b.tofile(pp_file)
# Header length (again)
pp_file.write(struct.pack(">L", PP_HEADER_DEPTH))
# Data length (including extra data length)
pp_file.write(struct.pack(">L", int(len_of_data_payload)))
# the data itself
if lbpack == 0:
data.tofile(pp_file)
elif lbpack == 1:
pp_file.write(packed_data)
else:
msg = 'Writing packed pp data with lbpack of {} ' \
'is not supported.'.format(lbpack)
raise NotImplementedError(msg)
# extra data elements
for int_code, extra_data in extra_items:
pp_file.write(struct.pack(">L", int(int_code)))
if isinstance(extra_data, six.string_types):
pp_file.write(struct.pack(">%ss" % len(extra_data),
extra_data.encode()))
else:
extra_data = extra_data.astype(np.dtype('>f4'))
extra_data.tofile(pp_file)
# Data length (again)
pp_file.write(struct.pack(">L", int(len_of_data_payload)))
##############################################################
#
# From here on define helper methods for PP -> Cube conversion.
#
def time_unit(self, time_unit, epoch='epoch'):
return cf_units.Unit('%s since %s' % (time_unit, epoch),
calendar=self.calendar)
def coord_system(self):
"""Return a CoordSystem for this PPField.
Returns:
Currently, a :class:`~iris.coord_systems.GeogCS` or
:class:`~iris.coord_systems.RotatedGeogCS`.
"""
geog_cs = iris.coord_systems.GeogCS(EARTH_RADIUS)
def degrees_ne(angle, ref_angle):
"""
Return whether an angle differs significantly from a set value.
The inputs are in degrees.
The difference is judged significant if more than 0.0001 degrees.
"""
return abs(angle - ref_angle) > 0.0001
if (degrees_ne(self.bplat, 90.0) or (degrees_ne(self.bplon, 0.0) and
degrees_ne(self.bplon, 180.0))):
# NOTE: when bplat,bplon=90,0 this encodes an unrotated system.
# However, the rotated system which is *equivalent* to an unrotated
# one actually has blat,bplon=90,180, due to a quirk in the
# definition equations.
# So we accept BPLON of 0 *or* 180 to mean 'unrotated'.
geog_cs = iris.coord_systems.RotatedGeogCS(
self.bplat, self.bplon, ellipsoid=geog_cs)
return geog_cs
def _x_coord_name(self):
# TODO: Remove once we have the ability to derive this in the rules.
x_name = "longitude"
if isinstance(self.coord_system(), iris.coord_systems.RotatedGeogCS):
x_name = "grid_longitude"
return x_name
def _y_coord_name(self):
# TODO: Remove once we have the ability to derive this in the rules.
y_name = "latitude"
if isinstance(self.coord_system(), iris.coord_systems.RotatedGeogCS):
y_name = "grid_latitude"
return y_name
def copy(self):
"""
Returns a deep copy of this PPField.
Returns:
A copy instance of the :class:`PPField`.
"""
return self._deepcopy({})
def __deepcopy__(self, memo):
return self._deepcopy(memo)
def _deepcopy(self, memo):
field = self.__class__()
for attr in self.__slots__:
if hasattr(self, attr):
value = getattr(self, attr)
# Cope with inability to deepcopy a 0-d NumPy array.
if attr == '_data' and value is not None and value.ndim == 0:
setattr(field, attr, np.array(deepcopy(value[()], memo)))
else:
setattr(field, attr, deepcopy(value, memo))
return field
def __eq__(self, other):
result = NotImplemented
if isinstance(other, PPField):
result = True
for attr in self.__slots__:
attrs = [hasattr(self, attr), hasattr(other, attr)]
if all(attrs):
self_attr = getattr(self, attr)
other_attr = getattr(other, attr)
if isinstance(self_attr, biggus.NumpyArrayAdapter):
self_attr = self_attr.concrete
if isinstance(other_attr, biggus.NumpyArrayAdapter):
other_attr = other_attr.concrete
if not np.all(self_attr == other_attr):
result = False
break
elif any(attrs):
result = False
break
return result
def __ne__(self, other):
result = self.__eq__(other)
if result is not NotImplemented:
result = not result
return result
class PPField2(PPField):
"""
A class to hold a single field from a PP file, with a
header release number of 2.
"""
HEADER_DEFN = _header_defn(2)
HEADER_DICT = dict(HEADER_DEFN)
__slots__ = _pp_attribute_names(HEADER_DEFN)
def _get_t1(self):
if not hasattr(self, '_t1'):
self._t1 = netcdftime.datetime(self.lbyr, self.lbmon, self.lbdat,
self.lbhr, self.lbmin)
return self._t1
def _set_t1(self, dt):
self.lbyr = dt.year
self.lbmon = dt.month
self.lbdat = dt.day
self.lbhr = dt.hour
self.lbmin = dt.minute
self.lbday = int(dt.strftime('%j'))
if hasattr(self, '_t1'):
delattr(self, '_t1')
t1 = property(_get_t1, _set_t1, None,
"A netcdftime.datetime object consisting of the lbyr, lbmon,"
" lbdat, lbhr, and lbmin attributes.")
def _get_t2(self):
if not hasattr(self, '_t2'):
self._t2 = netcdftime.datetime(self.lbyrd, self.lbmond,
self.lbdatd, self.lbhrd,
self.lbmind)
return self._t2
def _set_t2(self, dt):
self.lbyrd = dt.year
self.lbmond = dt.month
self.lbdatd = dt.day
self.lbhrd = dt.hour
self.lbmind = dt.minute
self.lbdayd = int(dt.strftime('%j'))
if hasattr(self, '_t2'):
delattr(self, '_t2')
t2 = property(_get_t2, _set_t2, None,
"A netcdftime.datetime object consisting of the lbyrd, "
"lbmond, lbdatd, lbhrd, and lbmind attributes.")
class PPField3(PPField):
"""
A class to hold a single field from a PP file, with a
header release number of 3.
"""
HEADER_DEFN = _header_defn(3)
HEADER_DICT = dict(HEADER_DEFN)
__slots__ = _pp_attribute_names(HEADER_DEFN)
def _get_t1(self):
if not hasattr(self, '_t1'):
self._t1 = netcdftime.datetime(self.lbyr, self.lbmon, self.lbdat,
self.lbhr, self.lbmin, self.lbsec)
return self._t1
def _set_t1(self, dt):
self.lbyr = dt.year
self.lbmon = dt.month
self.lbdat = dt.day
self.lbhr = dt.hour
self.lbmin = dt.minute
self.lbsec = dt.second
if hasattr(self, '_t1'):
delattr(self, '_t1')
t1 = property(_get_t1, _set_t1, None,
"A netcdftime.datetime object consisting of the lbyr, lbmon,"
" lbdat, lbhr, lbmin, and lbsec attributes.")
def _get_t2(self):
if not hasattr(self, '_t2'):
self._t2 = netcdftime.datetime(self.lbyrd, self.lbmond,
self.lbdatd, self.lbhrd,
self.lbmind, self.lbsecd)
return self._t2
def _set_t2(self, dt):
self.lbyrd = dt.year
self.lbmond = dt.month
self.lbdatd = dt.day
self.lbhrd = dt.hour
self.lbmind = dt.minute
self.lbsecd = dt.second
if hasattr(self, '_t2'):
delattr(self, '_t2')
t2 = property(_get_t2, _set_t2, None,
"A netcdftime.datetime object consisting of the lbyrd, "
"lbmond, lbdatd, lbhrd, lbmind, and lbsecd attributes.")
PP_CLASSES = {
2: PPField2,
3: PPField3
}
def make_pp_field(header):
# Choose a PP field class from the value of LBREL
lbrel = header[21]
if lbrel not in PP_CLASSES:
raise ValueError('Unsupported header release number: {}'.format(lbrel))
pp_field = PP_CLASSES[lbrel](header)
return pp_field
LoadedArrayBytes = collections.namedtuple('LoadedArrayBytes', 'bytes, dtype')
def load(filename, read_data=False, little_ended=False):
"""
Return an iterator of PPFields given a filename.
Args:
* filename - string of the filename to load.
Kwargs:
* read_data - boolean
Flag whether or not the data should be read, if False an empty
data manager will be provided which can subsequently load the data
on demand. Default False.
* little_ended - boolean
If True, file contains all little-ended words (header and data).
To iterate through all of the fields in a pp file::
for field in iris.fileformats.pp.load(filename):
print(field)
"""
return _interpret_fields(_field_gen(filename,
read_data_bytes=read_data,
little_ended=little_ended))
def _interpret_fields(fields):
"""
Turn the fields read with load and FF2PP._extract_field into useable
fields. One of the primary purposes of this function is to either convert
"deferred bytes" into "deferred arrays" or "loaded bytes" into actual
numpy arrays (via the _create_field_data) function.
"""
land_mask = None
landmask_compressed_fields = []
for field in fields:
# Store the first reference to a land mask, and use this as the
# definitive mask for future fields in this generator.
if land_mask is None and field.lbuser[6] == 1 and \
(field.lbuser[3] // 1000) == 0 and \
(field.lbuser[3] % 1000) == 30:
land_mask = field
# Handle land compressed data payloads,
# when lbpack.n2 is 2.
if (field.raw_lbpack // 10 % 10) == 2:
if land_mask is None:
landmask_compressed_fields.append(field)
continue
# Land compressed fields don't have a lbrow and lbnpt.
field.lbrow, field.lbnpt = land_mask.lbrow, land_mask.lbnpt
data_shape = (field.lbrow, field.lbnpt)
_create_field_data(field, data_shape, land_mask)
yield field
if landmask_compressed_fields:
if land_mask is None:
warnings.warn('Landmask compressed fields existed without a '
'landmask to decompress with. The data will have '
'a shape of (0, 0) and will not read.')
mask_shape = (0, 0)
else:
mask_shape = (land_mask.lbrow, land_mask.lbnpt)
for field in landmask_compressed_fields:
field.lbrow, field.lbnpt = mask_shape
_create_field_data(field, (field.lbrow, field.lbnpt), land_mask)
yield field
def _create_field_data(field, data_shape, land_mask):
"""
Modifies a field's ``_data`` attribute either by:
* converting DeferredArrayBytes into a biggus array,
* converting LoadedArrayBytes into an actual numpy array.
"""
if isinstance(field._data, LoadedArrayBytes):
loaded_bytes = field._data
field._data = _data_bytes_to_shaped_array(loaded_bytes.bytes,
field.lbpack,
field.boundary_packing,
data_shape,
loaded_bytes.dtype,
field.bmdi, land_mask)
else:
# Wrap the reference to the data payload within a data proxy
# in order to support deferred data loading.
fname, position, n_bytes, dtype = field._data
proxy = PPDataProxy(data_shape, dtype,
fname, position, n_bytes,
field.raw_lbpack,
field.boundary_packing,
field.bmdi, land_mask)
field._data = biggus.NumpyArrayAdapter(proxy)
def _field_gen(filename, read_data_bytes, little_ended=False):
"""
Returns a generator of "half-formed" PPField instances derived from
the given filename.
A field returned by the generator is only "half-formed" because its
`_data` attribute represents a simple one-dimensional stream of
bytes. (Encoded as an instance of either LoadedArrayBytes or
DeferredArrayBytes, depending on the value of `read_data_bytes`.)
This is because fields encoded with a land/sea mask do not contain
sufficient information within the field to determine the final
two-dimensional shape of the data.
"""
dtype_endian_char = '<' if little_ended else '>'
with open(filename, 'rb') as pp_file:
# Get a reference to the seek method on the file
# (this is accessed 3* #number of headers so can provide a small
# performance boost)
pp_file_seek = pp_file.seek
pp_file_read = pp_file.read
field_count = 0
# Keep reading until we reach the end of file
while True:
# Move past the leading header length word
pp_file_seek(PP_WORD_DEPTH, os.SEEK_CUR)
# Get the LONG header entries
dtype = '%ci%d' % (dtype_endian_char, PP_WORD_DEPTH)
header_longs = np.fromfile(pp_file, dtype=dtype,
count=NUM_LONG_HEADERS)
# Nothing returned => EOF
if len(header_longs) == 0:
break
# Get the FLOAT header entries
dtype = '%cf%d' % (dtype_endian_char, PP_WORD_DEPTH)
header_floats = np.fromfile(pp_file, dtype=dtype,
count=NUM_FLOAT_HEADERS)
header = tuple(header_longs) + tuple(header_floats)
# Make a PPField of the appropriate sub-class (depends on header
# release number)
try:
pp_field = make_pp_field(header)
except ValueError as e:
msg = 'Unable to interpret field {}. {}. Skipping ' \
'the remainder of the file.'.format(field_count,
str(e))
warnings.warn(msg)
break
# Skip the trailing 4-byte word containing the header length
pp_file_seek(PP_WORD_DEPTH, os.SEEK_CUR)
# Read the word telling me how long the data + extra data is
# This value is # of bytes
len_of_data_plus_extra = struct.unpack_from(
'%cL' % dtype_endian_char,
pp_file_read(PP_WORD_DEPTH))[0]
if len_of_data_plus_extra != pp_field.lblrec * PP_WORD_DEPTH:
wmsg = ('LBLREC has a different value to the integer recorded '
'after the header in the file ({} and {}). '
'Skipping the remainder of the file.')
warnings.warn(wmsg.format(pp_field.lblrec * PP_WORD_DEPTH,
len_of_data_plus_extra))
break
# calculate the extra length in bytes
extra_len = pp_field.lbext * PP_WORD_DEPTH
# Derive size and datatype of payload
data_len = len_of_data_plus_extra - extra_len
dtype = LBUSER_DTYPE_LOOKUP.get(pp_field.lbuser[0],
LBUSER_DTYPE_LOOKUP['default'])
if little_ended:
# Change data dtype for a little-ended file.
dtype = str(dtype)
if dtype[0] != '>':
msg = ("Unexpected dtype {!r} can't be converted to "
"little-endian")
raise ValueError(msg)
dtype = np.dtype('<' + dtype[1:])
if read_data_bytes:
# Read the actual bytes. This can then be converted to a numpy
# array at a higher level.
pp_field._data = LoadedArrayBytes(pp_file.read(data_len),
dtype)
else:
# Provide enough context to read the data bytes later on.
pp_field._data = (filename, pp_file.tell(), data_len, dtype)
# Seek over the actual data payload.
pp_file_seek(data_len, os.SEEK_CUR)
# Do we have any extra data to deal with?
if extra_len:
pp_field._read_extra_data(pp_file, pp_file_read, extra_len,
little_ended=little_ended)
# Skip that last 4 byte record telling me the length of the field I
# have already read
pp_file_seek(PP_WORD_DEPTH, os.SEEK_CUR)
field_count += 1
yield pp_field
def reset_load_rules():
"""
Resets the PP load process to use only the standard conversion rules.
.. deprecated:: 1.7
"""
warn_deprecated('reset_load_rules was deprecated in v1.7.')
def _ensure_save_rules_loaded():
"""Makes sure the standard save rules are loaded."""
# Uses these module-level variables
global _save_rules
if _save_rules is None:
# Load the pp save rules
rules_filename = os.path.join(iris.config.CONFIG_PATH,
'pp_save_rules.txt')
with iris.fileformats.rules._disable_deprecation_warnings():
_save_rules = iris.fileformats.rules.RulesContainer(
rules_filename, iris.fileformats.rules.ProcedureRule)
def add_save_rules(filename):
"""
Registers a rules file for use during the PP save process.
Registered files are processed after the standard conversion rules, and in
the order they were registered.
.. deprecated:: 1.10
If you need to customise pp field saving, please refer to the functions
:func:`as_fields`, :func:`save_pairs_from_cube` and :func:`save_fields`
for an alternative solution.
"""
warn_deprecated(
'custom pp save rules are deprecated from v1.10.\n'
'If you need to customise pp field saving, please refer to the '
'functions iris.fileformats.pp.as_fields, '
'iris.fileformats.pp.save_pairs_from_cube and '
'iris.fileformats.pp.save_fields for an alternative solution.')
_ensure_save_rules_loaded()
_save_rules.import_rules(filename)
def reset_save_rules():
"""
Resets the PP save process to use only the standard conversion rules.
.. deprecated:: 1.10
If you need to customise pp field saving, please refer to the functions
:func:`as_fields`, :func:`save_pairs_from_cube` and :func:`save_fields`
for an alternative solution.
"""
warn_deprecated(
'custom pp save rules are deprecated from v1.10.\n'
'If you need to customise pp field saving, please refer to the '
'functions iris.fileformats.pp.as_fields, '
'iris.fileformats.pp.save_pairs_from_cube and '
'iris.fileformats.pp.save_fields for an alternative solution.')
# Uses this module-level variable
global _save_rules
_save_rules = None
# Stash codes not to be filtered (reference altitude and pressure fields).
_STASH_ALLOW = [STASH(1, 0, 33), STASH(1, 0, 1)]
def _convert_constraints(constraints):
"""
Converts known constraints from Iris semantics to PP semantics
ignoring all unknown constraints.
"""
constraints = iris._constraints.list_of_constraints(constraints)
pp_constraints = {}
unhandled_constraints = False
def _make_func(stashobj):
"""
Provides unique name-space for each lambda function's stashobj
variable.
"""
return lambda stash: stash == stashobj
for con in constraints:
if isinstance(con, iris.AttributeConstraint) and \
list(con._attributes.keys()) == ['STASH']:
# Convert a STASH constraint.
# The attribute can be a STASH object, a stashcode string, or a
# callable.
stashobj = con._attributes['STASH']
if callable(stashobj):
call_func = stashobj
elif isinstance(stashobj, (six.string_types, STASH)):
call_func = _make_func(stashobj)
else:
raise TypeError("STASH constraints should be either a"
" callable, string or STASH object")
if 'stash' not in pp_constraints:
pp_constraints['stash'] = [call_func]
else:
pp_constraints['stash'].append(call_func)
else:
# only keep the pp constraints set if they are all handled as
# pp constraints
unhandled_constraints = True
def pp_filter(field):
"""
return True if field is to be kept,
False if field does not match filter
"""
res = True
if field.stash not in _STASH_ALLOW:
if pp_constraints.get('stash'):
res = False
for call_func in pp_constraints['stash']:
if call_func(str(field.stash)):
res = True
break
return res
if pp_constraints and not unhandled_constraints:
result = pp_filter
else:
result = None
return result
def load_cubes(filenames, callback=None, constraints=None):
"""
Loads cubes from a list of pp filenames.
Args:
* filenames - list of pp filenames to load
Kwargs:
* constraints - a list of Iris constraints
* callback - a function which can be passed on to
:func:`iris.io.run_callback`
.. note::
The resultant cubes may not be in the order that they are in the file
(order is not preserved when there is a field with orography
references)
"""
return _load_cubes_variable_loader(filenames, callback, load,
constraints=constraints)
def load_cubes_little_endian(filenames, callback=None, constraints=None):
"""
Loads cubes from a list of pp filenames containing little-endian data.
Args:
* filenames - list of pp filenames to load
Kwargs:
* constraints - a list of Iris constraints
* callback - a function which can be passed on to
:func:`iris.io.run_callback`
.. note::
The resultant cubes may not be in the order that they are in the file
(order is not preserved when there is a field with orography
references)
"""
return _load_cubes_variable_loader(filenames, callback, load,
{'little_ended': True},
constraints=constraints)
def load_pairs_from_fields(pp_fields):
"""
Convert an iterable of PP fields into an iterable of tuples of
(Cubes, PPField).
Args:
* pp_fields:
An iterable of :class:`iris.fileformats.pp.PPField`.
Returns:
An iterable of :class:`iris.cube.Cube`s.
This capability can be used to filter out fields before they are passed to
the load pipeline, and amend the cubes once they are created, using
PP metadata conditions. Where this filtering
removes a significant number of fields, the speed up to load can be
significant:
>>> import iris
>>> from iris.fileformats.pp import load_pairs_from_fields
>>> filename = iris.sample_data_path('E1.2098.pp')
>>> filtered_fields = []
>>> for field in iris.fileformats.pp.load(filename):
... if field.lbproc == 128:
... filtered_fields.append(field)
>>> cube_field_pairs = load_pairs_from_fields(filtered_fields)
>>> for cube, field in cube_field_pairs:
... cube.attributes['lbproc'] = field.lbproc
... print(cube.attributes['lbproc'])
128
This capability can also be used to alter fields before they are passed to
the load pipeline. Fields with out of specification header elements can
be cleaned up this way and cubes created:
>>> filename = iris.sample_data_path('E1.2098.pp')
>>> cleaned_fields = list(iris.fileformats.pp.load(filename))
>>> for field in cleaned_fields:
... if field.lbrel == 0:
... field.lbrel = 3
>>> cubes_field_pairs = list(load_pairs_from_fields(cleaned_fields))
"""
load_pairs_from_fields = iris.fileformats.rules.load_pairs_from_fields
return load_pairs_from_fields(pp_fields, iris.fileformats.pp_rules.convert)
def _load_cubes_variable_loader(filenames, callback, loading_function,
loading_function_kwargs=None,
constraints=None):
import iris.fileformats.um._fast_load as um_fast_load
pp_filter = None
if constraints is not None:
pp_filter = _convert_constraints(constraints)
if um_fast_load.STRUCTURED_LOAD_CONTROLS.loads_use_structured:
# For structured loads, pass down the pp_filter function as an extra
# keyword to the low-level generator function.
loading_function_kwargs = loading_function_kwargs or {}
loading_function_kwargs['pp_filter'] = pp_filter
# Also do *not* use this filter in generic rules processing, as for
# structured loading, the 'field' of rules processing is no longer a
# PPField but a FieldCollation.
pp_filter = None
# Make a loader object for the generic rules code.
loader = iris.fileformats.rules.Loader(
um_fast_load._basic_load_function,
loading_function_kwargs,
um_fast_load._convert_collation)
else:
loader = iris.fileformats.rules.Loader(
loading_function, loading_function_kwargs or {},
iris.fileformats.pp_rules.convert)
result = iris.fileformats.rules.load_cubes(filenames, callback, loader,
pp_filter)
if um_fast_load.STRUCTURED_LOAD_CONTROLS.loads_use_structured:
# We need an additional concatenate-like operation to combine cubes
# from different files. Unfortunately, the 'merge' call provided in
# the iris_load_xx functions cannot do this.
result = um_fast_load._combine_structured_cubes(result)
return result
def save(cube, target, append=False, field_coords=None):
"""
Use the PP saving rules (and any user rules) to save a cube to a PP file.
Args:
* cube - A :class:`iris.cube.Cube`
* target - A filename or open file handle.
Kwargs:
* append - Whether to start a new file afresh or add the cube(s)
to the end of the file.
Only applicable when target is a filename, not a file
handle.
Default is False.
* field_coords - list of 2 coords or coord names which are to be used
for reducing the given cube into 2d slices,
which will ultimately determine the x and y
coordinates of the resulting fields.
If None, the final two dimensions are chosen
for slicing.
See also :func:`iris.io.save`.
"""
fields = as_fields(cube, field_coords, target)
save_fields(fields, target, append=append)
def as_pairs(cube, field_coords=None, target=None):
"""
.. deprecated:: 1.10
Please use :func:`iris.fileformats.pp.save_pairs_from_cube` for the same
functionality.
"""
warn_deprecated('as_pairs is deprecated in v1.10; please use'
' save_pairs_from_cube instead.')
return save_pairs_from_cube(cube, field_coords=field_coords,
target=target)
def save_pairs_from_cube(cube, field_coords=None, target=None):
"""
Use the PP saving rules (and any user rules) to convert a cube or
iterable of cubes to an iterable of (2D cube, PP field) pairs.
Args:
* cube:
A :class:`iris.cube.Cube`
Kwargs:
* field_coords:
List of 2 coords or coord names which are to be used for
reducing the given cube into 2d slices, which will ultimately
determine the x and y coordinates of the resulting fields.
If None, the final two dimensions are chosen for slicing.
* target:
A filename or open file handle.
"""
# Open issues
# Could use rules in "sections" ... e.g. to process the extensive
# dimensions; ...?
# Could pre-process the cube to add extra convenient terms?
# e.g. x-coord, y-coord ... but what about multiple coordinates on the
# dimension?
# How to perform the slicing?
# Do we always slice in the last two dimensions?
# Not all source data will contain lat-lon slices.
# What do we do about dimensions with multiple coordinates?
# Deal with:
# LBLREC - Length of data record in words (incl. extra data)
# Done on save(*)
# LBUSER[0] - Data type
# Done on save(*)
# LBUSER[1] - Start address in DATA (?! or just set to "null"?)
# BLEV - Level - the value of the coordinate for LBVC
# *) With the current on-save way of handling LBLREC and LBUSER[0] we can't
# check if they've been set correctly without *actually* saving as a binary
# PP file. That also means you can't use the same reference.txt file for
# loaded vs saved fields (unless you re-load the saved field!).
# Set to (or leave as) "null":
# LBEGIN - Address of start of field in direct access dataset
# LBEXP - Experiment identification
# LBPROJ - Fields file projection number
# LBTYP - Fields file field type code
# LBLEV - Fields file level code / hybrid height model level
# Build confidence by having a PP object that records which header items
# have been set, and only saves if they've all been set?
# Watch out for extra-data.
# On the flip side, record which Cube metadata has been "used" and flag up
# unused?
_ensure_save_rules_loaded()
n_dims = len(cube.shape)
if n_dims < 2:
raise ValueError('Unable to save a cube of fewer than 2 dimensions.')
if field_coords is not None:
# cast the given coord/coord names into cube coords
field_coords = cube._as_list_of_coords(field_coords)
if len(field_coords) != 2:
raise ValueError('Got %s coordinates in field_coords, expecting'
' exactly 2.' % len(field_coords))
else:
# default to the last two dimensions
# (if result of coords is an empty list, will raise an IndexError)
# NB watch out for the ordering of the dimensions
field_coords = (cube.coords(dimensions=n_dims-2)[0],
cube.coords(dimensions=n_dims-1)[0])
# Save each named or latlon slice2D in the cube
for slice2D in cube.slices(field_coords):
# Start with a blank PPField
pp_field = PPField3()
# Set all items to 0 because we need lbuser, lbtim
# and some others to be present before running the rules.
for name, positions in pp_field.HEADER_DEFN:
# Establish whether field name is integer or real
default = 0 if positions[0] <= NUM_LONG_HEADERS - \
UM_TO_PP_HEADER_OFFSET else 0.0
# Establish whether field position is scalar or composite
if len(positions) > 1:
default = [default] * len(positions)
setattr(pp_field, name, default)
# Some defaults should not be 0
pp_field.lbrel = 3 # Header release 3.
pp_field.lbcode = 1 # Grid code.
pp_field.bmks = 1.0 # Some scaley thing.
pp_field.lbproc = 0
# From UM doc F3: "Set to -99 if LBEGIN not known"
pp_field.lbuser[1] = -99
# Set the data
pp_field.data = slice2D.data
# Run the PP save rules on the slice2D, to fill the PPField,
# recording the rules that were used
rules_result = _save_rules.verify(slice2D, pp_field)
verify_rules_ran = rules_result.matching_rules
# Log the rules used
if target is None:
target = 'None'
elif not isinstance(target, six.string_types):
target = target.name
with iris.fileformats.rules._disable_deprecation_warnings():
iris.fileformats.rules.log('PP_SAVE', str(target),
verify_rules_ran)
yield (slice2D, pp_field)
def as_fields(cube, field_coords=None, target=None):
"""
Use the PP saving rules (and any user rules) to convert a cube to
an iterable of PP fields.
Args:
* cube:
A :class:`iris.cube.Cube`
Kwargs:
* field_coords:
List of 2 coords or coord names which are to be used for
reducing the given cube into 2d slices, which will ultimately
determine the x and y coordinates of the resulting fields.
If None, the final two dimensions are chosen for slicing.
* target:
A filename or open file handle.
"""
return (field for cube, field in save_pairs_from_cube(
cube, field_coords=field_coords, target=target))
def save_fields(fields, target, append=False):
"""
Save an iterable of PP fields to a PP file.
Args:
* fields:
An iterable of PP fields.
* target:
A filename or open file handle.
Kwargs:
* append:
Whether to start a new file afresh or add the cube(s) to the end
of the file.
Only applicable when target is a filename, not a file handle.
Default is False.
* callback:
A modifier/filter function.
See also :func:`iris.io.save`.
"""
# Open issues
# Deal with:
# LBLREC - Length of data record in words (incl. extra data)
# Done on save(*)
# LBUSER[0] - Data type
# Done on save(*)
# LBUSER[1] - Start address in DATA (?! or just set to "null"?)
# BLEV - Level - the value of the coordinate for LBVC
# *) With the current on-save way of handling LBLREC and LBUSER[0] we can't
# check if they've been set correctly without *actually* saving as a binary
# PP file. That also means you can't use the same reference.txt file for
# loaded vs saved fields (unless you re-load the saved field!).
# Set to (or leave as) "null":
# LBEGIN - Address of start of field in direct access dataset
# LBEXP - Experiment identification
# LBPROJ - Fields file projection number
# LBTYP - Fields file field type code
# LBLEV - Fields file level code / hybrid height model level
if isinstance(target, six.string_types):
pp_file = open(target, "ab" if append else "wb")
filename = target
elif hasattr(target, "write"):
if hasattr(target, "mode") and "b" not in target.mode:
raise ValueError("Target not binary")
filename = target.name if hasattr(target, 'name') else None
pp_file = target
else:
raise ValueError("Can only save pp to filename or writable")
try:
# Save each field
for pp_field in fields:
# Write to file
pp_field.save(pp_file)
finally:
if isinstance(target, six.string_types):
pp_file.close()
|
gpl-3.0
| 3,401,959,726,195,242,500
| 34.216535
| 79
| 0.55488
| false
| 3.972378
| false
| false
| false
|
kgaipal/workspace
|
test-code/json.py
|
1
|
9566
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Requirements: python packages: websocket-client
# Installation: pip install websocket-client
# https://pypi.python.org/pypi/websocket-client/
class StateTableRecord:
def __init__(self, recordId):
self.recordId = recordId
self.fields = {} # map of fieldName:fieldValue
def __str__(self):
str_field = ""
for f, v in self.fields.iteritems():
str_field += str(f) + ":" + str(v) + "\n"
return str_field
class StateTable:
def __init__(self, tableId, name, fieldsInfo):
self.tableId = tableId
self.name = name
self.fieldsInfo = fieldsInfo
self.records = {} # map of "recordId":StateTableRecord
def updateRecordField(self, recordId, field, value):
if not recordId in self.records:
self.records[recordId] = StateTableRecord(recordId)
self.records[recordId].fields[field] = value
def deleteRecord(self, recordId):
if recordId in self.records:
del self.records[recordId]
def truncate(self):
self.records = {}
def __str__(self):
header = "Table '%s'; fields: %d; records: %d\n" % (
self.name, len(self.fieldsInfo), len(self.records))
str_records = ""
for record in self.records.values():
str_records += str(record) + "\n"
return header+str_records
STATE_TABLES = {
"queue" : StateTable(
"queue", "Queue", [
("id", "ID"),
("name", "Name"),
("code_name", "Code Name"),
("type", "Type"),
("support_team_id", "Support Team ID"),
("created_timestamp", "Created Timestamp")
]),
"support_session" : StateTable(
"support_session", "Support Session", [
("id", "ID"),
("lsid", "LSID"),
("queue_id", "Queue ID"),
("queue_entry_timestamp", "Queue Entry Timestamp"),
("customer_name", "Customer Name"),
("customer_company", "Customer Company"),
("customer_company_code", "Customer Company Code"),
("customer_description", "Customer Description"),
("start_method", "Start Method"),
("priority", "Priority"),
("estimated_pickup_timestamp", "Estimated Pickup Timestamp"),
("created_timestamp", "Created Timestamp")
]),
"support_session_attribute" : StateTable(
"support_session_attribute", "Support Session Attribute", [
("id", "ID"),
("support_session_id", "Support Session ID"),
("code_name", "Code Name"),
("value", "Value"),
("created_timestamp", "Created Timestamp")
]),
"support_session_skill" : StateTable(
"support_session_skill", "Support Session Skill", [
("id", "ID"),
("support_session_id", "Support Session ID"),
("code_name", "Code Name"),
("value", "Value"),
("created_timestamp", "Created Timestamp")
]),
"customer_client" : StateTable(
"customer_client", "Customer Client", [
("id", "ID"),
("support_session_id", "Support Session ID"),
("support_session_id", "Operating System"),
("hostname", "Hostname"),
("client_type", "Client Type"),
("elevated", "Elevated"),
("created_timestamp", "Created Timestamp")
]),
"representative" : StateTable(
"representative", "Representative", [
("id", "ID"),
("user_id", "User ID"),
("username", "Username"),
("private_display_name", "Private Display Name"),
("public_display_name", "Public Display Name"),
("routing_available", "Routing Available"),
("routing_idle", "Routing Idle"),
("routing_busy", "Routing Busy"),
("routing_enabled", "Routing Enabled"),
("skill_code_names", "Skill Code Names"),
("queue_id", "Queue ID"),
("created_timestamp", "Created Timestamp")
]),
"representative_queue" : StateTable(
"representative_queue", "Representative Queue", [
("id", "ID"),
("user_id", "User ID"),
("username", "Username"),
("private_display_name", "Private Display Name"),
("public_display_name", "Public Display Name"),
("routing_available", "Routing Available"),
("routing_idle", "Routing Idle"),
("routing_busy", "Routing Busy"),
("routing_enabled", "Routing Enabled"),
("skill_code_names", "Skill Code Names"),
("queue_id", "Queue ID"),
("created_timestamp", "Created Timestamp")
]),
"representative_support_session" : StateTable(
"representative_support_session", "Representative Support Session", [
("id", "ID"),
("support_session_id", "Support Session ID"),
("representative_id", "Representative ID"),
("created_timestamp", "Created Timestamp")
])
}
import json
import websocket
class State:
DISCONNECTED = -1
APP_CHOSER = 0
AUTHENTICATE = 1
SUBSCRIBE = 2
UPDATES = 3
class Client:
def __init__(self, site, company, enable_trace=True):
self.state = State.DISCONNECTED
self.site = site
self.company = company
self.enable_trace = enable_trace
def on_open(self, ws):
self.state = State.APP_CHOSER
ws.send("NS01" + self.company + "\ningredi state api\n")
def on_close(self, ws):
self.disconnect()
raise Exception("connection closed")
def on_error(self, ws, error):
raise Exception(str(error))
def on_message(self, ws, message):
message = message.decode('utf-8', 'ignore')
if self.state == State.APP_CHOSER:
if message != "0 Application chosen\n":
raise Exception("Application choser failed")
auth = {
'type': "authenticate",
'credentials': {
'username': "kgaipal",
'password': "password"
}
}
ws.send(json.dumps(auth) + "\n")
self.state = State.AUTHENTICATE
elif self.state == State.AUTHENTICATE:
resp = json.loads(message)
if resp["success"] != True:
raise Exception("Authentication failed")
subscription = {
'type': "subscribe",
'tables': "all"
}
ws.send(json.dumps(subscription) + "\n")
self.state = State.SUBSCRIBE
elif self.state == State.SUBSCRIBE:
resp = json.loads(message)
if resp["success"] != True:
raise Exception("Subscription failed")
self.state = State.UPDATES
elif self.state == State.UPDATES:
model_update = json.loads(message)
updated = False
if model_update["type"] == "update_model":
if model_update.has_key("insert"):
self.parse_inserts(model_update["insert"])
updated = True
if model_update.has_key("update"):
self.parse_updates(model_update["update"])
updated = True
if model_update.has_key("delete"):
self.parse_deletes(model_update["delete"])
updated = True
elif model_update["type"] == "truncate_model":
for table in STATE_TABLES.values():
table.truncate()
updated = True
if updated:
print "\n"
self.printAllTables()
else:
raise Exception("Unkown state: " + str(self.state))
def printAllTables(self):
for table in STATE_TABLES.values():
if table is not None:
print str(table) + "\n**"
else:
print "<empty>\n**"
def parse_inserts(self, msg):
self.parse_updates(msg) # same structure
def parse_updates(self, msg):
for table in msg:
for recId, record in msg[table].items():
for field, value in record.items():
STATE_TABLES[table].updateRecordField(recId, field, value)
def parse_deletes(self, msg):
for table in msg:
for recId in msg[table]:
print "KGAIPAL: " + str(recId)
STATE_TABLES[table].deleteRecord(recId)
def connect(self):
if self.state != State.DISCONNECTED:
self.disconnect()
# start new connection
websocket.enableTrace(self.enable_trace)
ws = websocket.WebSocketApp(
"wss://" + self.site + "/nw",
on_message = self.on_message,
on_error = self.on_error,
on_close = self.on_close,
on_open = self.on_open)
ws.run_forever()
def disconnect(self):
if self.state == State.DISCONNECTED:
raise Exception("already disconnected")
self.state = State.DISCONNECTED
print "disconnected"
if __name__ == "__main__":
try:
c = Client("kgaipalrtd.dev.bomgar.com", "kgaipalrtd", False)
c.connect()
except Exception, e:
print str(e)
c.disconnect()
|
gpl-3.0
| -3,622,807,888,267,472,400
| 32.215278
| 78
| 0.523625
| false
| 4.229001
| false
| false
| false
|
MrYsLab/python_banyan
|
projects/OneGPIO/arduino_uno/arduino_gateway.py
|
1
|
22028
|
"""
Copyright (c) 2018-2019 Alan Yorinks All rights reserved.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
Version 3 as published by the Free Software Foundation; either
or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU AFFERO GENERAL PUBLIC LICENSE
along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import argparse
import asyncio
import logging
import pathlib
import signal
import sys
from pymata_express.private_constants import PrivateConstants
from pymata_express.pymata_express import PymataExpress
from python_banyan.gateway_base_aio import GatewayBaseAIO
# noinspection PyAbstractClass,PyMethodMayBeStatic,PyRedundantParentheses,DuplicatedCode
class ArduinoGateway(GatewayBaseAIO):
# This class implements the GatewayBase interface adapted for asyncio.
# It supports Arduino boards, tested with Uno.
# NOTE: This class requires the use of Python 3.7 or above
# serial_port = None
def __init__(self, *subscriber_list, back_plane_ip_address=None,
subscriber_port='43125',
publisher_port='43124', process_name='ArduinoGateway',
event_loop=None, keep_alive=False, com_port=None,
arduino_instance_id=None, log=False):
"""
Set up the gateway for operation
:param subscriber_list: a tuple or list of subscription topics.
:param back_plane_ip_address: ip address of backplane or none if local
:param subscriber_port: backplane subscriber port
:param publisher_port: backplane publisher port
:param process_name: name to display on the console
:param event_loop: optional parameter to pass in an asyncio
event loop
:param keep_alive: if True, enable FirmataExpress keep-alives
:param com_port: force pymata-express to use this comport
:param arduino_instance: set an arduino instance id that must
be programmed into the FirmataExpress
sketch.
:param log: enable logging
"""
# set up logging if requested
self.log = log
if self.log:
fn = str(pathlib.Path.home()) + "/ardgw.log"
self.logger = logging.getLogger(__name__)
logging.basicConfig(filename=fn, filemode='w', level=logging.DEBUG)
sys.excepthook = self.my_handler
# set the event loop to be used. accept user's if provided
self.event_loop = event_loop
# instantiate pymata express to control the arduino
# if user want to pass in a com port, then pass it in
try:
if com_port:
self.arduino = PymataExpress(loop=self.event_loop,
com_port=com_port)
# if user wants to set an instance id, then pass it in
elif arduino_instance_id:
self.arduino = PymataExpress(loop=self.event_loop,
arduino_instance_id=arduino_instance_id)
# default settings
else:
self.arduino = PymataExpress(loop=self.event_loop)
except RuntimeError:
if self.log:
logging.exception("Exception occurred", exc_info=True)
raise
# extract pin info from self.arduino
self.number_of_digital_pins = len(self.arduino.digital_pins)
self.number_of_analog_pins = len(self.arduino.analog_pins)
self.first_analog_pin = self.arduino.first_analog_pin
# Initialize the parent
super(ArduinoGateway, self).__init__(subscriber_list=subscriber_list,
event_loop=self.event_loop,
back_plane_ip_address=back_plane_ip_address,
subscriber_port=subscriber_port,
publisher_port=publisher_port,
process_name=process_name,
)
self.first_analog_pin = self.arduino.first_analog_pin
self.keep_alive = keep_alive
def init_pins_dictionary(self):
"""
This method will initialize the pins dictionary contained
in gateway base parent class. This method is called by
the gateway base parent in its init method.
NOTE: that this a a non-asyncio method.
"""
report = self.event_loop.run_until_complete(self.arduino.get_capability_report())
x = 0
pin = 0
while x < len(report):
while report[x] != 127:
mode = report[x]
if mode == PrivateConstants.INPUT:
self.pins_dictionary[pin] = \
[GatewayBaseAIO.DIGITAL_INPUT_MODE, 0, False]
elif mode == PrivateConstants.ANALOG:
self.pins_dictionary[pin + self.first_analog_pin] = \
[GatewayBaseAIO.ANALOG_INPUT_MODE, 0, False]
x += 1
x += 1
pin += 1
# set up entry for i2c as pin 200 ( a pseudo pin number)
self.pins_dictionary[200] = GatewayBaseAIO.DIGITAL_INPUT_MODE
async def main(self):
# call the inherited begin method located in banyan_base_aio
await self.begin()
# start the keep alive on the Arduino if enabled
if self.keep_alive:
await self.arduino.keep_alive()
# sit in an endless loop to receive protocol messages
while True:
await self.receive_loop()
# The following methods and are called
# by the gateway base class in its incoming_message_processing
# method. They overwrite the default methods in the gateway_base.
async def digital_write(self, topic, payload):
"""
This method performs a digital write
:param topic: message topic
:param payload: {"command": "digital_write", "pin": “PIN”, "value": “VALUE”}
"""
await self.arduino.digital_write(payload["pin"], payload['value'])
async def disable_analog_reporting(self, topic, payload):
"""
This method disables analog input reporting for the selected pin.
:param topic: message topic
:param payload: {"command": "disable_analog_reporting", "pin": “PIN”, "tag": "TAG"}
"""
await self.arduino.disable_analog_reporting(payload["pin"])
async def disable_digital_reporting(self, topic, payload):
"""
This method disables digital input reporting for the selected pin.
:param topic: message topic
:param payload: {"command": "disable_digital_reporting", "pin": “PIN”, "tag": "TAG"}
"""
await self.arduino.disable_digital_reporting(payload["pin"])
async def enable_analog_reporting(self, topic, payload):
"""
This method enables analog input reporting for the selected pin.
:param topic: message topic
:param payload: {"command": "enable_analog_reporting", "pin": “PIN”, "tag": "TAG"}
"""
await self.arduino.enable_analog_reporting(payload["pin"])
async def enable_digital_reporting(self, topic, payload):
"""
This method enables digital input reporting for the selected pin.
:param topic: message topic
:param payload: {"command": "enable_digital_reporting", "pin": “PIN”, "tag": "TAG"}
"""
await self.arduino.enable_digital_reporting(payload["pin"])
async def i2c_read(self, topic, payload):
"""
This method will perform an i2c read by specifying the i2c
device address, i2c device register and the number of bytes
to read.
Call set_mode_i2c first to establish the pins for i2c operation.
:param topic: message topic
:param payload: {"command": "i2c_read", "pin": “PIN”, "tag": "TAG",
"addr": “I2C ADDRESS, "register": “I2C REGISTER”,
"number_of_bytes": “NUMBER OF BYTES”}
:return via the i2c_callback method
"""
await self.arduino.i2c_read(payload['addr'],
payload['register'],
payload['number_of_bytes'], callback=self.i2c_callback)
async def i2c_write(self, topic, payload):
"""
This method will perform an i2c write for the i2c device with
the specified i2c device address, i2c register and a list of byte
to write.
Call set_mode_i2c first to establish the pins for i2c operation.
:param topic: message topic
:param payload: {"command": "i2c_write", "pin": “PIN”, "tag": "TAG",
"addr": “I2C ADDRESS, "register": “I2C REGISTER”,
"data": [“DATA IN LIST FORM”]}
"""
await self.arduino.i2c_write(payload['addr'], payload['data'])
async def play_tone(self, topic, payload):
"""
This method plays a tone on a piezo device connected to the selected
pin at the frequency and duration requested.
Frequency is in hz and duration in milliseconds.
Call set_mode_tone before using this method.
:param topic: message topic
:param payload: {"command": "play_tone", "pin": “PIN”, "tag": "TAG",
“freq”: ”FREQUENCY”, duration: “DURATION”}
"""
await self.arduino.play_tone(payload['pin'],
payload['freq'],
payload['duration'])
async def pwm_write(self, topic, payload):
"""
This method sets the pwm value for the selected pin.
Call set_mode_pwm before calling this method.
:param topic: message topic
:param payload: {“command”: “pwm_write”, "pin": “PIN”,
"tag":”TAG”,
“value”: “VALUE”}
"""
await self.arduino.analog_write(payload["pin"], payload['value'])
async def servo_position(self, topic, payload):
"""
This method will set a servo's position in degrees.
Call set_mode_servo first to activate the pin for
servo operation.
:param topic: message topic
:param payload: {'command': 'servo_position',
"pin": “PIN”,'tag': 'servo',
“position”: “POSITION”}
"""
await self.arduino.servo_write(payload["pin"], payload["position"])
async def set_mode_analog_input(self, topic, payload):
"""
This method sets a GPIO pin as analog input.
:param topic: message topic
:param payload: {"command": "set_mode_analog_input", "pin": “PIN”, "tag":”TAG” }
"""
pin = payload["pin"]
self.pins_dictionary[pin + self.first_analog_pin][GatewayBaseAIO.PIN_MODE] = \
GatewayBaseAIO.ANALOG_INPUT_MODE
await self.arduino.set_pin_mode_analog_input(pin, self.analog_input_callback)
async def set_mode_digital_input(self, topic, payload):
"""
This method sets a pin as digital input.
:param topic: message topic
:param payload: {"command": "set_mode_digital_input", "pin": “PIN”, "tag":”TAG” }
"""
pin = payload["pin"]
self.pins_dictionary[pin][GatewayBaseAIO.PIN_MODE] = GatewayBaseAIO.DIGITAL_INPUT_MODE
await self.arduino.set_pin_mode_digital_input(pin, self.digital_input_callback)
async def set_mode_digital_input_pullup(self, topic, payload):
"""
This method sets a pin as digital input with pull up enabled.
:param topic: message topic
:param payload: message payload
"""
pin = payload["pin"]
self.pins_dictionary[pin][GatewayBaseAIO.PIN_MODE] = GatewayBaseAIO.DIGITAL_INPUT_PULLUP_MODE
await self.arduino.set_pin_mode_digital_input_pullup(pin, self.digital_input_callback)
async def set_mode_digital_output(self, topic, payload):
"""
This method sets a pin as a digital output pin.
:param topic: message topic
:param payload: {"command": "set_mode_digital_output", "pin": PIN, "tag":”TAG” }
"""
pin = payload["pin"]
self.pins_dictionary[pin][GatewayBaseAIO.PIN_MODE] = GatewayBaseAIO.DIGITAL_OUTPUT_MODE
await self.arduino.set_pin_mode_digital_output(pin)
async def set_mode_i2c(self, topic, payload):
"""
This method sets up the i2c pins for i2c operations.
:param topic: message topic
:param payload: {"command": "set_mode_i2c"}
"""
self.pins_dictionary[200][GatewayBaseAIO.PIN_MODE] = GatewayBaseAIO.I2C_MODE
await self.arduino.set_pin_mode_i2c()
async def set_mode_pwm(self, topic, payload):
"""
This method sets a GPIO pin capable of PWM for PWM operation.
:param topic: message topic
:param payload: {"command": "set_mode_pwm", "pin": “PIN”, "tag":”TAG” }
"""
pin = payload["pin"]
self.pins_dictionary[pin][GatewayBaseAIO.PIN_MODE] = GatewayBaseAIO.PWM_OUTPUT_MODE
await self.arduino.set_pin_mode_pwm(pin)
async def set_mode_servo(self, topic, payload):
"""
This method establishes a GPIO pin for servo operation.
:param topic: message topic
:param payload: {"command": "set_mode_servo", "pin": “PIN”, "tag":”TAG” }
"""
pin = payload["pin"]
self.pins_dictionary[pin][GatewayBaseAIO.PIN_MODE] = GatewayBaseAIO.SERVO_MODE
await self.arduino.set_pin_mode_servo(pin)
async def set_mode_sonar(self, topic, payload):
"""
This method sets the trigger and echo pins for sonar operation.
:param topic: message topic
:param payload: {"command": "set_mode_sonar", "trigger_pin": “PIN”, "tag":”TAG”
"echo_pin": “PIN”"tag":”TAG” }
"""
trigger = payload["trigger_pin"]
echo = payload["echo_pin"]
self.pins_dictionary[trigger][GatewayBaseAIO.PIN_MODE] = GatewayBaseAIO.SONAR_MODE
self.pins_dictionary[echo][GatewayBaseAIO.PIN_MODE] = GatewayBaseAIO.SONAR_MODE
await self.arduino.set_pin_mode_sonar(trigger, echo, callback=self.sonar_callback)
async def set_mode_stepper(self, topic, payload):
"""
This method establishes either 2 or 4 GPIO pins to be used in stepper
motor operation.
:param topic:
:param payload:{"command": "set_mode_stepper", "pins": [“PINS”],
"steps_per_revolution": “NUMBER OF STEPS”}
"""
for pin in payload['pins']:
self.pins_dictionary[pin][GatewayBaseAIO.PIN_MODE] = GatewayBaseAIO.STEPPER_MODE
await self.arduino.set_pin_mode_stepper(payload['steps_per_revolution'],
payload['pins'])
async def set_mode_tone(self, topic, payload):
"""
Establish a GPIO pin for tone operation.
:param topic:
:param payload:{"command": "set_mode_tone", "pin": “PIN”, "tag":”TAG” }
"""
pin = payload["pin"]
self.pins_dictionary[pin][GatewayBaseAIO.PIN_MODE] = GatewayBaseAIO.TONE_MODE
await self.arduino.set_pin_mode_tone(pin)
async def stepper_write(self, topic, payload):
"""
Move a stepper motor for the specified number of steps.
:param topic:
:param payload: {"command": "stepper_write", "motor_speed": “SPEED”,
"number_of_steps":”NUMBER OF STEPS” }
"""
await self.arduino.stepper_write(payload['motor_speed'],
payload['number_of_steps'])
# Callbacks
async def digital_input_callback(self, data):
"""
Digital input data change reported by Arduino
:param data:
:return:
"""
# data = [pin mode, pin, current reported value, timestamp]
self.pins_dictionary[data[1]][GatewayBaseAIO.LAST_VALUE] = data[2]
payload = {'report': 'digital_input', 'pin': data[1],
'value': data[2], 'timestamp': data[3]}
await self.publish_payload(payload, 'from_arduino_gateway')
async def analog_input_callback(self, data):
# data = [pin mode, pin, current reported value, timestamp]
self.pins_dictionary[data[1] + self.arduino.first_analog_pin][GatewayBaseAIO.LAST_VALUE] = data[2]
payload = {'report': 'analog_input', 'pin': data[1],
'value': data[2], 'timestamp': data[3]}
await self.publish_payload(payload, 'from_arduino_gateway')
async def i2c_callback(self, data):
"""
Analog input data change reported by Arduino
:param data:
:return:
"""
# creat a string representation of the data returned
self.pins_dictionary[200] = data[1]
report = ', '.join([str(elem) for elem in data])
payload = {'report': 'i2c_data', 'value': report}
await self.publish_payload(payload, 'from_arduino_gateway')
async def sonar_callback(self, data):
"""
Sonar data change reported by Arduino
:param data:
:return:
"""
self.pins_dictionary[data[1]][GatewayBaseAIO.LAST_VALUE] = data[2]
payload = {'report': 'sonar_data', 'value': data[2]}
await self.publish_payload(payload, 'from_arduino_gateway')
def my_handler(self, tp, value, tb):
"""
for logging uncaught exceptions
:param tp:
:param value:
:param tb:
:return:
"""
self.logger.exception("Uncaught exception: {0}".format(str(value)))
# noinspection DuplicatedCode
def arduino_gateway():
# allow user to bypass the IP address auto-discovery. This is necessary if the component resides on a computer
# other than the computing running the backplane.
parser = argparse.ArgumentParser()
parser.add_argument("-b", dest="back_plane_ip_address", default="None",
help="None or IP address used by Back Plane")
parser.add_argument("-c", dest="com_port", default="None",
help="Use this COM port instead of auto discovery")
parser.add_argument("-k", dest="keep_alive", default="True",
help="Enable firmata-express keep-alive - set to True or False - default=False")
parser.add_argument("-i", dest="arduino_instance_id", default="None",
help="Set an Arduino Instance ID and match it in FirmataExpress")
parser.add_argument("-l", dest="log", default="False",
help="Set to True to turn logging on.")
parser.add_argument("-m", dest="subscriber_list",
default="to_arduino_gateway", nargs='+',
help="Banyan topics space delimited: topic1 topic2 topic3")
parser.add_argument("-n", dest="process_name",
default="ArduinoGateway", help="Set process name in "
"banner")
parser.add_argument("-p", dest="publisher_port", default='43124',
help="Publisher IP port")
parser.add_argument("-r", dest="publisher_topic",
default="from_rpi_gpio", help="Report topic")
parser.add_argument("-s", dest="subscriber_port", default='43125',
help="Subscriber IP port")
args = parser.parse_args()
subscriber_list = args.subscriber_list
kw_options = {
'publisher_port': args.publisher_port,
'subscriber_port': args.subscriber_port,
'process_name': args.process_name,
}
keep_alive = args.keep_alive.lower()
if keep_alive == 'false':
keep_alive = False
else:
keep_alive = True
kw_options['keep_alive'] = keep_alive
log = args.log.lower()
if log == 'false':
log = False
else:
log = True
kw_options['log'] = log
if args.back_plane_ip_address != 'None':
kw_options['back_plane_ip_address'] = args.back_plane_ip_address
if args.com_port != 'None':
kw_options['com_port'] = args.com_port
if args.arduino_instance_id != 'None':
kw_options['arduino_instance_id'] = int(args.arduino_instance_id)
# get the event loop
# this is for python 3.8
if sys.platform == 'win32':
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
loop = asyncio.get_event_loop()
# replace with the name of your class
app = ArduinoGateway(subscriber_list, **kw_options, event_loop=loop)
try:
loop.run_until_complete(app.main())
except (KeyboardInterrupt, asyncio.CancelledError, RuntimeError):
if app.log:
logging.exception("Exception occurred", exc_info=True)
loop.stop()
loop.close()
sys.exit(0)
# signal handler function called when Control-C occurs
# noinspection PyShadowingNames,PyUnusedLocal
def signal_handler(sig, frame):
print('Exiting Through Signal Handler')
raise KeyboardInterrupt
# listen for SIGINT
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
if __name__ == '__main__':
arduino_gateway()
|
agpl-3.0
| 5,566,145,373,664,180,000
| 39.913858
| 114
| 0.597171
| false
| 3.904217
| false
| false
| false
|
dwbro1/SS-Reporter
|
rpiGPIOActuator.py
|
1
|
1544
|
"""
Purpose: Changes the state of the configured pin on command
"""
import sys
import time
import RPi.GPIO as GPIO
class rpiGPIOActuator:
"""Represents an actuator connected to a GPIO pin"""
def __init__(self, connection, logger, params):
"""Sets the output and changes its state when it receives a command"""
self.logger = logger
self.pin = int(params("Pin"))
GPIO.setmode(GPIO.BCM) # uses BCM numbering, not Board numbering
GPIO.setup(self.pin, GPIO.OUT)
GPIO.output(self.pin, GPIO.HIGH)
self.destination = params("Topic")
self.connection = connection
self.toggle = bool(params("Toggle"))
self.logger.info('----------Configuring rpiGPIOActuator: pin {0} on destination {1} with toggle {2}'.format(self.pin, self.destination, self.toggle))
self.connection.register(self.destination, self.on_message)
def on_message(self, client, userdata, msg):
"""Process a message"""
self.logger.info('Received command on {0}: {1} Toggle = {2} PIN = {3}'.format(self.destination, msg.payload, self.toggle, self.pin))
if self.toggle == "True":
self.logger.info('Toggling pin %s HIGH to LOW' % (self.pin))
GPIO.output(self.pin, GPIO.LOW)
time.sleep(.5)
GPIO.output(self.pin, GPIO.HIGH)
self.logger.info('Toggling pin %s LOW to HIGH' % (self.pin))
else:
out = GPIO.LOW if msg.payload == "ON" else GPIO.HIGH
GPIO.output(self.pin, out)
|
apache-2.0
| -3,478,561,868,706,700,300
| 35.761905
| 157
| 0.620466
| false
| 3.650118
| false
| false
| false
|
chrismcginlay/crazy-koala
|
word_search.py
|
1
|
2311
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 28 09:55:21 2016
@author: chrismcginlay
"""
grid = [
list("SYNTAXQWERT"),
list("GHFPOSTKDSK"),
list("LKJHCVNBVYR"),
list("CCCBIWUISKT"),
list("LKTSOPSHDER"),
list("XZPOSTSEIGU"),
]
for row in grid:
row.insert(0,"*")
row.append("*")
width = len(grid[0])+2
grid.insert(0,list("*"*width))
grid.append(list("*"*width))
target = "POST"
letter1 = target[0]
letter2 = target[1]
row_index = 0
pass1_loci = list()
pass2_loci = list()
#get all occurences of letter1, place in list of (col, row) tuples
for row in grid:
row_loci = [i for i,x in enumerate(row) if x==letter1]
for locus in row_loci:
pass1_loci.append((locus, row_index))
row_index+=1
#pass2_loci - search box around letter1, construct list of tuples
for locus1 in pass1_loci:
pass2_loci = list()
L_one_c = locus1[0]
L_one_r = locus1[1]
#in the following note grid[r][c] -vs- pass2_loci((c,r)) transposed rc
if grid[L_one_r-1][L_one_c-1]==letter2:
pass2_loci.append((L_one_c-1, L_one_r-1))
if grid[L_one_r][L_one_c-1]==letter2:
pass2_loci.append((L_one_c-1,L_one_r))
if grid[L_one_r+1][L_one_c-1]==letter2:
pass2_loci.append((L_one_c-1,L_one_r+1))
if grid[L_one_r+1][L_one_c]==letter2:
pass2_loci.append((L_one_c,L_one_r+1))
if grid[L_one_r+1][L_one_c+1]==letter2:
pass2_loci.append((L_one_c+1,L_one_r+1))
if grid[L_one_r][L_one_c+1]==letter2:
pass2_loci.append((L_one_c+1,L_one_r))
if grid[L_one_r-1][L_one_c+1]==letter2:
pass2_loci.append((L_one_c+1,L_one_r-1))
if grid[L_one_r-1][L_one_c]==letter2:
pass2_loci.append((L_one_c,L_one_r-1))
for locus2 in pass2_loci:
#vector index order r,c to match grid
vector = (locus2[1]-L_one_r, locus2[0]-L_one_c)
#use vector to search for rest of target
target_found = False
#start from locus of second letter
r = locus2[1]
c = locus2[0]
for ch in target[2:]:
r+=vector[0]
c+=vector[1]
if grid[r][c]==ch:
target_found = True
else:
target_found = False
break
if target_found:
print("Found the target")
|
gpl-3.0
| -3,462,465,035,189,730,000
| 29.012987
| 74
| 0.566421
| false
| 2.53956
| false
| false
| false
|
ferdkuh/rlkart
|
src/main.py
|
1
|
1996
|
# create timestep counter N and network
# create environments and learner agents
# main loop:
# for t in range(0, max_episode_size):
# get a_t[], v_t[] from network for the state of each agent:
# convert a_t to a single action index
#
# parallel for i in range(0, num_agents)
# new_state, reward = perform a_t[i] in environment[i]
#
# estimate R_tmax+1 for each agent
# compute R_t for each agent
#
# train network
# needed
# network # the neural network ops
# states # array of states [N,84,84,4], shared memory
# shared_action_indices # array of int shape = [N]
# num_agents = 16
# max_episode_size = 30
# agent_manager = 0
# for t in range(0, max_episode_size):
# ops = [network.policy_out, network.value_out]
# feed_dict = { network.states: states }
# # policy_out has shape [num_agents, num_actions]
# # value out has shape [num_agents]
# policy_out, value_out = session.run(ops, feed_dict)
# # get one action index for each agent, write them to the shared memory
# shared_action_indices = sample_action_from_policy(policy_out)
# # run each environment for one timestep
# # blocks current until update is done
# agent_manager.update_agents()
# # copy results from shared array to episode buffer
import multiprocessing as mp
import numpy as np
import ctypes as C
import mariokart
import logging
logging.basicConfig(level=logging.DEBUG, format='(%(threadName)s) %(message)s',)
def sample_action_from_policy(probabilities):
# Subtract a tiny value from probabilities in order to avoid
# "ValueError: sum(pvals[:-1]) > 1.0" in numpy.multinomial
# as seen in: https://github.com/Alfredvc/paac/blob/master/paac.py
probabilities -= np.finfo(np.float32).epsneg
action_indices = [int(np.nonzero(np.random.multinomial(1, p))[0]) for p in probabilities]
return action_indices
# where can this function live?
#return np.frombuffer(shared, dtype).reshape(shape)
NUM_ACTIONS = 8
ROM_PATH = r"../res/Mario Kart 64 (U) [!].z64"
|
gpl-3.0
| -3,481,168,514,002,871,000
| 25.972973
| 90
| 0.700401
| false
| 2.979104
| false
| false
| false
|
jriehl/numba
|
numba/targets/hashing.py
|
1
|
23337
|
"""
Hash implementations for Numba types
"""
from __future__ import print_function, absolute_import, division
import math
import numpy as np
import sys
import ctypes
from collections import namedtuple
import llvmlite.binding as ll
import llvmlite.llvmpy.core as lc
from llvmlite import ir
from numba.extending import (
overload, overload_method, intrinsic, register_jitable)
from numba import types, errors
from numba.unsafe.bytes import grab_byte, grab_uint64_t
_py34_or_later = sys.version_info[:2] >= (3, 4)
if _py34_or_later:
# This is Py_hash_t, which is a Py_ssize_t, which has sizeof(size_t):
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Include/pyport.h#L91-L96
_hash_width = sys.hash_info.width
_Py_hash_t = getattr(types, 'int%s' % _hash_width)
_Py_uhash_t = getattr(types, 'uint%s' % _hash_width)
# Constants from CPython source, obtained by various means:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Include/pyhash.h
_PyHASH_INF = sys.hash_info.inf
_PyHASH_NAN = sys.hash_info.nan
_PyHASH_MODULUS = _Py_uhash_t(sys.hash_info.modulus)
_PyHASH_BITS = 31 if types.intp.bitwidth == 32 else 61 # mersenne primes
_PyHASH_MULTIPLIER = 0xf4243 # 1000003UL
_PyHASH_IMAG = _PyHASH_MULTIPLIER
_PyLong_SHIFT = sys.int_info.bits_per_digit
_Py_HASH_CUTOFF = sys.hash_info.cutoff
_Py_hashfunc_name = sys.hash_info.algorithm
else:
_hash_width = types.intp.bitwidth
_Py_hash_t = getattr(types, 'int%s' % _hash_width)
_Py_uhash_t = getattr(types, 'uint%s' % _hash_width)
# these are largely just copied in from python 3 as reasonable defaults
_PyHASH_INF = 314159
_PyHASH_NAN = 0
_PyHASH_BITS = 31 if types.intp.bitwidth == 32 else 61 # mersenne primes
_PyHASH_MODULUS = _Py_uhash_t((1 << _PyHASH_BITS) - 1)
_PyHASH_MULTIPLIER = 0xf4243 # 1000003UL
_PyHASH_IMAG = _PyHASH_MULTIPLIER
_PyLong_SHIFT = 30 if types.intp.bitwidth == 64 else 15
_Py_HASH_CUTOFF = 0
# set this as siphash24 for py27... TODO: implement py27 string first!
_Py_hashfunc_name = "siphash24"
# hash(obj) is implemented by calling obj.__hash__()
@overload(hash)
def hash_overload(obj):
def impl(obj):
return obj.__hash__()
return impl
@register_jitable
def process_return(val):
asint = _Py_hash_t(val)
if (asint == int(-1)):
asint = int(-2)
return asint
# This is a translation of CPython's _Py_HashDouble:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Python/pyhash.c#L34-L129
@register_jitable(locals={'x': _Py_uhash_t,
'y': _Py_uhash_t,
'm': types.double,
'e': types.intc,
'sign': types.intc,
'_PyHASH_MODULUS': _Py_uhash_t,
'_PyHASH_BITS': types.intc})
def _Py_HashDouble(v):
if not np.isfinite(v):
if (np.isinf(v)):
if (v > 0):
return _PyHASH_INF
else:
return -_PyHASH_INF
else:
return _PyHASH_NAN
m, e = math.frexp(v)
sign = 1
if (m < 0):
sign = -1
m = -m
# process 28 bits at a time; this should work well both for binary
# and hexadecimal floating point.
x = 0
while (m):
x = ((x << 28) & _PyHASH_MODULUS) | x >> (_PyHASH_BITS - 28)
m *= 268435456.0 # /* 2**28 */
e -= 28
y = int(m) # /* pull out integer part */
m -= y
x += y
if x >= _PyHASH_MODULUS:
x -= _PyHASH_MODULUS
# /* adjust for the exponent; first reduce it modulo _PyHASH_BITS */
if e >= 0:
e = e % _PyHASH_BITS
else:
e = _PyHASH_BITS - 1 - ((-1 - e) % _PyHASH_BITS)
x = ((x << e) & _PyHASH_MODULUS) | x >> (_PyHASH_BITS - e)
x = x * sign
return process_return(x)
@intrinsic
def _fpext(tyctx, val):
def impl(cgctx, builder, signature, args):
val = args[0]
return builder.fpext(val, lc.Type.double())
sig = types.float64(types.float32)
return sig, impl
# This is a translation of CPython's long_hash, but restricted to the numerical
# domain reachable by int64/uint64 (i.e. no BigInt like support):
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Objects/longobject.c#L2934-L2989
# obdigit is a uint32_t which is typedef'd to digit
# int32_t is typedef'd to sdigit
@register_jitable(locals={'x': _Py_uhash_t,
'p1': _Py_uhash_t,
'p2': _Py_uhash_t,
'p3': _Py_uhash_t,
'p4': _Py_uhash_t,
'_PyHASH_MODULUS': _Py_uhash_t,
'_PyHASH_BITS': types.int32,
'_PyLong_SHIFT': types.int32,
'x.1': _Py_uhash_t})
def _long_impl(val):
# This function assumes val came from a long int repr with val being a
# uint64_t this means having to split the input into PyLong_SHIFT size
# chunks in an unsigned hash wide type, max numba can handle is a 64bit int
# mask to select low _PyLong_SHIFT bits
_tmp_shift = 32 - _PyLong_SHIFT
mask_shift = (~types.uint32(0x0)) >> _tmp_shift
# a 64bit wide max means Numba only needs 3 x 30 bit values max,
# or 5 x 15 bit values max on 32bit platforms
i = (64 // _PyLong_SHIFT) + 1
# alg as per hash_long
x = 0
p3 = (_PyHASH_BITS - _PyLong_SHIFT)
for idx in range(i - 1, -1, -1):
p1 = x << _PyLong_SHIFT
p2 = p1 & _PyHASH_MODULUS
p4 = x >> p3
x = p2 | p4
# the shift and mask splits out the `ob_digit` parts of a Long repr
x += types.uint32((val >> idx * _PyLong_SHIFT) & mask_shift)
if x >= _PyHASH_MODULUS:
x -= _PyHASH_MODULUS
return _Py_hash_t(x)
# This has no CPython equivalent, CPython uses long_hash.
@overload_method(types.Integer, '__hash__')
@overload_method(types.Boolean, '__hash__')
def int_hash(val):
_HASH_I64_MIN = -2 if sys.maxsize <= 2 ** 32 else -4
# this is a bit involved due to the CPython repr of ints
def impl(val):
# If the magnitude is under PyHASH_MODULUS, if so just return the
# value itval as the has, couple of special cases if val == val:
# 1. it's 0, in which case return 0
# 2. it's signed int minimum value, return the value CPython computes
# but Numba cannot as there's no type wide enough to hold the shifts.
#
# If the magnitude is greater than PyHASH_MODULUS then... if the value
# is negative then negate it switch the sign on the hash once computed
# and use the standard wide unsigned hash implementation
mag = abs(val)
if mag < _PyHASH_MODULUS:
if val == -val:
if val == 0:
ret = 0
else: # int64 min, -0x8000000000000000
ret = _Py_hash_t(_HASH_I64_MIN)
else:
ret = _Py_hash_t(val)
else:
needs_negate = False
if val < 0:
val = -val
needs_negate = True
ret = _long_impl(val)
if needs_negate:
ret = -ret
return process_return(ret)
return impl
# This is a translation of CPython's float_hash:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Objects/floatobject.c#L528-L532
@overload_method(types.Float, '__hash__')
def float_hash(val):
if val.bitwidth == 64:
def impl(val):
hashed = _Py_HashDouble(val)
return hashed
else:
def impl(val):
# widen the 32bit float to 64bit
fpextended = np.float64(_fpext(val))
hashed = _Py_HashDouble(fpextended)
return hashed
return impl
# This is a translation of CPython's complex_hash:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Objects/complexobject.c#L408-L428
@overload_method(types.Complex, '__hash__')
def complex_hash(val):
def impl(val):
hashreal = hash(val.real)
hashimag = hash(val.imag)
# Note: if the imaginary part is 0, hashimag is 0 now,
# so the following returns hashreal unchanged. This is
# important because numbers of different types that
# compare equal must have the same hash value, so that
# hash(x + 0*j) must equal hash(x).
combined = hashreal + _PyHASH_IMAG * hashimag
return process_return(combined)
return impl
# This is a translation of CPython's tuplehash:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Objects/tupleobject.c#L347-L369
@register_jitable(locals={'x': _Py_uhash_t,
'y': _Py_hash_t,
'mult': _Py_uhash_t,
'l': _Py_hash_t, })
def _tuple_hash(tup):
tl = len(tup)
mult = _PyHASH_MULTIPLIER
x = _Py_uhash_t(0x345678)
# in C this is while(--l >= 0), i is indexing tup instead of *tup++
for i, l in enumerate(range(tl - 1, -1, -1)):
y = hash(tup[i])
xxory = (x ^ y)
x = xxory * mult
mult += _Py_hash_t((_Py_uhash_t(82520) + l + l))
x += _Py_uhash_t(97531)
return process_return(x)
# This is an obfuscated translation of CPython's tuplehash:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Objects/tupleobject.c#L347-L369
# The obfuscation occurs for a heterogeneous tuple as each tuple member needs
# a potentially different hash() function calling for it. This cannot be done at
# runtime as there's no way to iterate a heterogeneous tuple, so this is
# achieved by essentially unrolling the loop over the members and inserting a
# per-type hash function call for each member, and then simply computing the
# hash value in an inlined/rolling fashion.
@intrinsic
def _tuple_hash_resolve(tyctx, val):
def impl(cgctx, builder, signature, args):
typingctx = cgctx.typing_context
fnty = typingctx.resolve_value_type(hash)
tupty, = signature.args
tup, = args
lty = cgctx.get_value_type(signature.return_type)
x = ir.Constant(lty, 0x345678)
mult = ir.Constant(lty, _PyHASH_MULTIPLIER)
shift = ir.Constant(lty, 82520)
tl = len(tupty)
for i, packed in enumerate(zip(tupty.types, range(tl - 1, -1, -1))):
ty, l = packed
sig = fnty.get_call_type(tyctx, (ty,), {})
impl = cgctx.get_function(fnty, sig)
tuple_val = builder.extract_value(tup, i)
y = impl(builder, (tuple_val,))
xxory = builder.xor(x, y)
x = builder.mul(xxory, mult)
lconst = ir.Constant(lty, l)
mult = builder.add(mult, shift)
mult = builder.add(mult, lconst)
mult = builder.add(mult, lconst)
x = builder.add(x, ir.Constant(lty, 97531))
return x
sig = _Py_hash_t(val)
return sig, impl
@overload_method(types.BaseTuple, '__hash__')
def tuple_hash(val):
if isinstance(val, types.Sequence):
def impl(val):
return _tuple_hash(val)
return impl
else:
def impl(val):
hashed = _Py_hash_t(_tuple_hash_resolve(val))
return process_return(hashed)
return impl
# ------------------------------------------------------------------------------
# String/bytes hashing needs hashseed info, this is from:
# https://stackoverflow.com/a/41088757
# with thanks to Martijn Pieters
#
# Developer note:
# CPython makes use of an internal "hashsecret" which is essentially a struct
# containing some state that is set on CPython initialization and contains magic
# numbers used particularly in unicode/string hashing. This code binds to the
# Python runtime libraries in use by the current process and reads the
# "hashsecret" state so that it can be used by Numba. As this is done at runtime
# the behaviour and influence of the PYTHONHASHSEED environment variable is
# accommodated.
from ctypes import ( # noqa
c_size_t,
c_ubyte,
c_uint64,
pythonapi,
Structure,
Union,
) # noqa
class FNV(Structure):
_fields_ = [
('prefix', c_size_t),
('suffix', c_size_t)
]
class SIPHASH(Structure):
_fields_ = [
('k0', c_uint64),
('k1', c_uint64),
]
class DJBX33A(Structure):
_fields_ = [
('padding', c_ubyte * 16),
('suffix', c_size_t),
]
class EXPAT(Structure):
_fields_ = [
('padding', c_ubyte * 16),
('hashsalt', c_size_t),
]
class _Py_HashSecret_t(Union):
_fields_ = [
# ensure 24 bytes
('uc', c_ubyte * 24),
# two Py_hash_t for FNV
('fnv', FNV),
# two uint64 for SipHash24
('siphash', SIPHASH),
# a different (!) Py_hash_t for small string optimization
('djbx33a', DJBX33A),
('expat', EXPAT),
]
_hashsecret_entry = namedtuple('_hashsecret_entry', ['symbol', 'value'])
# Only a few members are needed at present
def _build_hashsecret():
"""Read hash secret from the Python process
Returns
-------
info : dict
- keys are "djbx33a_suffix", "siphash_k0", siphash_k1".
- values are the namedtuple[symbol:str, value:int]
"""
# Read hashsecret and inject it into the LLVM symbol map under the
# prefix `_numba_hashsecret_`.
pyhashsecret = _Py_HashSecret_t.in_dll(pythonapi, '_Py_HashSecret')
info = {}
def inject(name, val):
symbol_name = "_numba_hashsecret_{}".format(name)
val = ctypes.c_uint64(val)
addr = ctypes.addressof(val)
ll.add_symbol(symbol_name, addr)
info[name] = _hashsecret_entry(symbol=symbol_name, value=val)
inject('djbx33a_suffix', pyhashsecret.djbx33a.suffix)
inject('siphash_k0', pyhashsecret.siphash.k0)
inject('siphash_k1', pyhashsecret.siphash.k1)
return info
_hashsecret = _build_hashsecret()
# ------------------------------------------------------------------------------
if _Py_hashfunc_name == 'siphash24':
# This is a translation of CPython's siphash24 function:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Python/pyhash.c#L287-L413
# /* *********************************************************************
# <MIT License>
# Copyright (c) 2013 Marek Majkowski <marek@popcount.org>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# </MIT License>
# Original location:
# https://github.com/majek/csiphash/
# Solution inspired by code from:
# Samuel Neves (supercop/crypto_auth/siphash24/little)
#djb (supercop/crypto_auth/siphash24/little2)
# Jean-Philippe Aumasson (https://131002.net/siphash/siphash24.c)
# Modified for Python by Christian Heimes:
# - C89 / MSVC compatibility
# - _rotl64() on Windows
# - letoh64() fallback
# */
@register_jitable(locals={'x': types.uint64,
'b': types.uint64, })
def _ROTATE(x, b):
return types.uint64(((x) << (b)) | ((x) >> (types.uint64(64) - (b))))
@register_jitable(locals={'a': types.uint64,
'b': types.uint64,
'c': types.uint64,
'd': types.uint64,
's': types.uint64,
't': types.uint64, })
def _HALF_ROUND(a, b, c, d, s, t):
a += b
c += d
b = _ROTATE(b, s) ^ a
d = _ROTATE(d, t) ^ c
a = _ROTATE(a, 32)
return a, b, c, d
@register_jitable(locals={'v0': types.uint64,
'v1': types.uint64,
'v2': types.uint64,
'v3': types.uint64, })
def _DOUBLE_ROUND(v0, v1, v2, v3):
v0, v1, v2, v3 = _HALF_ROUND(v0, v1, v2, v3, 13, 16)
v2, v1, v0, v3 = _HALF_ROUND(v2, v1, v0, v3, 17, 21)
v0, v1, v2, v3 = _HALF_ROUND(v0, v1, v2, v3, 13, 16)
v2, v1, v0, v3 = _HALF_ROUND(v2, v1, v0, v3, 17, 21)
return v0, v1, v2, v3
@register_jitable(locals={'v0': types.uint64,
'v1': types.uint64,
'v2': types.uint64,
'v3': types.uint64,
'b': types.uint64,
'mi': types.uint64,
'tmp': types.Array(types.uint64, 1, 'C'),
't': types.uint64,
'mask': types.uint64,
'jmp': types.uint64,
'ohexefef': types.uint64})
def _siphash24(k0, k1, src, src_sz):
b = types.uint64(src_sz) << 56
v0 = k0 ^ types.uint64(0x736f6d6570736575)
v1 = k1 ^ types.uint64(0x646f72616e646f6d)
v2 = k0 ^ types.uint64(0x6c7967656e657261)
v3 = k1 ^ types.uint64(0x7465646279746573)
idx = 0
while (src_sz >= 8):
mi = grab_uint64_t(src, idx)
idx += 1
src_sz -= 8
v3 ^= mi
v0, v1, v2, v3 = _DOUBLE_ROUND(v0, v1, v2, v3)
v0 ^= mi
# this is the switch fallthrough:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Python/pyhash.c#L390-L400
t = types.uint64(0x0)
boffset = idx * 8
ohexefef = types.uint64(0xff)
if src_sz >= 7:
jmp = (6 * 8)
mask = ~types.uint64(ohexefef << jmp)
t = (t & mask) | (types.uint64(grab_byte(src, boffset + 6)) << jmp)
if src_sz >= 6:
jmp = (5 * 8)
mask = ~types.uint64(ohexefef << jmp)
t = (t & mask) | (types.uint64(grab_byte(src, boffset + 5)) << jmp)
if src_sz >= 5:
jmp = (4 * 8)
mask = ~types.uint64(ohexefef << jmp)
t = (t & mask) | (types.uint64(grab_byte(src, boffset + 4)) << jmp)
if src_sz >= 4:
t &= types.uint64(0xffffffff00000000)
for i in range(4):
jmp = i * 8
mask = ~types.uint64(ohexefef << jmp)
t = (t & mask) | (types.uint64(grab_byte(src, boffset + i)) << jmp)
if src_sz >= 3:
jmp = (2 * 8)
mask = ~types.uint64(ohexefef << jmp)
t = (t & mask) | (types.uint64(grab_byte(src, boffset + 2)) << jmp)
if src_sz >= 2:
jmp = (1 * 8)
mask = ~types.uint64(ohexefef << jmp)
t = (t & mask) | (types.uint64(grab_byte(src, boffset + 1)) << jmp)
if src_sz >= 1:
mask = ~(ohexefef)
t = (t & mask) | (types.uint64(grab_byte(src, boffset + 0)))
b |= t
v3 ^= b
v0, v1, v2, v3 = _DOUBLE_ROUND(v0, v1, v2, v3)
v0 ^= b
v2 ^= ohexefef
v0, v1, v2, v3 = _DOUBLE_ROUND(v0, v1, v2, v3)
v0, v1, v2, v3 = _DOUBLE_ROUND(v0, v1, v2, v3)
t = (v0 ^ v1) ^ (v2 ^ v3)
return t
elif _Py_hashfunc_name == 'fnv':
#TODO: Should this instead warn and switch to siphash24?
raise NotImplementedError("FNV hashing is not implemented")
else:
msg = "Unsupported hashing algorithm in use %s" % _Py_hashfunc_name
raise ValueError(msg)
@intrinsic
def _inject_hashsecret_read(tyctx, name):
"""Emit code to load the hashsecret.
"""
if not isinstance(name, types.StringLiteral):
raise errors.TypingError("requires literal string")
sym = _hashsecret[name.literal_value].symbol
resty = types.uint64
sig = resty(name)
def impl(cgctx, builder, sig, args):
mod = builder.module
try:
# Search for existing global
gv = mod.get_global(sym)
except KeyError:
# Inject the symbol if not already exist.
gv = ir.GlobalVariable(mod, ir.IntType(64), name=sym)
v = builder.load(gv)
return v
return sig, impl
def _load_hashsecret(name):
return _hashsecret[name].value
@overload(_load_hashsecret)
def _impl_load_hashsecret(name):
def imp(name):
return _inject_hashsecret_read(name)
return imp
# This is a translation of CPythons's _Py_HashBytes:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Python/pyhash.c#L145-L191
@register_jitable(locals={'_hash': _Py_uhash_t})
def _Py_HashBytes(val, _len):
if (_len == 0):
return process_return(0)
if (_len < _Py_HASH_CUTOFF):
# TODO: this branch needs testing, needs a CPython setup for it!
# /* Optimize hashing of very small strings with inline DJBX33A. */
_hash = _Py_uhash_t(5381) # /* DJBX33A starts with 5381 */
for idx in range(_len):
_hash = ((_hash << 5) + _hash) + np.uint8(grab_byte(val, idx))
_hash ^= _len
_hash ^= _load_hashsecret('djbx33a_suffix')
else:
tmp = _siphash24(types.uint64(_load_hashsecret('siphash_k0')),
types.uint64(_load_hashsecret('siphash_k1')),
val, _len)
_hash = process_return(tmp)
return process_return(_hash)
# This is an approximate translation of CPython's unicode_hash:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Objects/unicodeobject.c#L11635-L11663
@overload_method(types.UnicodeType, '__hash__')
def unicode_hash(val):
from numba.unicode import _kind_to_byte_width
def impl(val):
kindwidth = _kind_to_byte_width(val._kind)
_len = len(val)
# use the cache if possible
current_hash = val._hash
if current_hash != -1:
return current_hash
else:
# cannot write hash value to cache in the unicode struct due to
# pass by value on the struct making the struct member immutable
return _Py_HashBytes(val._data, kindwidth * _len)
return impl
|
bsd-2-clause
| 3,617,599,016,980,870,000
| 34.305598
| 119
| 0.577624
| false
| 3.269403
| false
| false
| false
|
ShipleyCollege/ViPteam1
|
ExtractAndAnalyzeCode/ExtractNodes.py
|
1
|
2439
|
import os
import glob
import BuildNode
#INPUT_FILENAME = "../Sample Blueprint code/SimpleMoveToActor-Network - Code.txt";
#INPUT_FILENAME = "../Sample Blueprint code/SpawnObjectsWithForLoop - Code.txt";
#INPUT_FILENAME = "../Sample Blueprint code/SpawnRoundTargetPoint - Code.txt";
#INPUT_FILENAME = "../Sample Blueprint code/SpawnRoundTargetPointV2 - Code.txt";
#INPUT_FILENAME = "../Sample Blueprint code/SpawnRoundTargetPointV3 - Code.txt";
#INPUT_FILENAME = "../Sample Blueprint code/JISCTest1.txt";
#INPUT_FILENAME = "../Sample Blueprint code/ResetLevelAfterTimeout.txt";
INPUT_FILENAME = "../Sample Blueprint code/Randomly Spawn Sound Objects - Code.txt";
OUTPUT_FOLDER = "../GeneratedCode"
WORK_FOLDERNAME = "../GeneratedCode/Temp";
# function to remove temporary nodes extracted from network
def removeTempNodes():
if not os.path.exists(WORK_FOLDERNAME):
os.makedirs(WORK_FOLDERNAME)
files = glob.glob(WORK_FOLDERNAME + '/node*.txt')
for f in files:
os.remove(f)
def DoItNow(buildMode, filename, output_folder, debug=False):
global INPUT_FILENAME
INPUT_FILENAME = filename
global OUTPUT_FOLDER
OUTPUT_FOLDER = output_folder
global WORK_FOLDERNAME
WORK_FOLDERNAME = OUTPUT_FOLDER + "/Temp"
DoIt(buildMode, debug=debug)
def DoIt(buildMode, debug=False):
# make sure we start with an empty directory
removeTempNodes()
# break network into individual nodes
# - each node starts end ends with 'Begin' and 'End' in column 0
nodeNumber = 0
print("Reading network from " + INPUT_FILENAME)
if (not os.path.isfile(INPUT_FILENAME)) or (not os.path.exists(INPUT_FILENAME)):
print("======> Input File Not Found <======")
else:
print("======> Input File Found <=======")
with open(INPUT_FILENAME) as f:
content = f.readlines()
for line in content:
if (line[0:5] == "Begin"):
text_file = open(WORK_FOLDERNAME + "/node" + str(nodeNumber) + ".txt", "w")
text_file.write(line)
if (line[0:3] == "End"):
text_file.close()
nodeNumber += 1
nrc = 0
nodeNumber = 0
files = glob.glob(WORK_FOLDERNAME + '/node*.txt')
for f in files:
print("Calling BuildNode with [" + buildMode + ", " + f + "]")
if debug:
nrc += BuildNode.doIt(OUTPUT_FOLDER, buildMode, f, str(nodeNumber))
else:
nrc += BuildNode.doIt(OUTPUT_FOLDER, buildMode, f, "")
nodeNumber += 1
print("Nodes Extracted : " + str(nodeNumber))
print("Nodes not recognized : " + str(nrc))
# removeTempNodes()
|
gpl-3.0
| 5,190,332,986,550,599,000
| 28.385542
| 84
| 0.696597
| false
| 3.175781
| false
| false
| false
|
ucb-bar/bar-crawl-web
|
flower/utils/tasks.py
|
1
|
2717
|
from __future__ import absolute_import
import datetime
import time
from celery.events.state import Task
from .search import satisfies_search_terms
def iter_tasks(events, limit=None, type=None, worker=None, state=None,
sort_by=None, received_start=None, received_end=None,
started_start=None, started_end=None, search_terms=None,
jobid=None):
i = 0
tasks = events.state.tasks_by_timestamp()
if sort_by is not None:
tasks = sort_tasks(tasks, sort_by)
convert = lambda x: time.mktime(
datetime.datetime.strptime(x, '%Y-%m-%d %H:%M').timetuple()
)
search_terms = search_terms or {}
any_value_search_term = search_terms.get('any', None)
result_search_term = search_terms.get('result', None)
args_search_terms = search_terms.get('args', None)
kwargs_search_terms = search_terms.get('kwargs', None)
for uuid, task in tasks:
if type and task.name != type:
continue
if worker and task.worker and task.worker.hostname != worker:
continue
if state and task.state != state:
continue
if received_start and task.received and\
task.received < convert(received_start):
continue
if received_end and task.received and\
task.received > convert(received_end):
continue
if started_start and task.started and\
task.started < convert(started_start):
continue
if started_end and task.started and\
task.started > convert(started_end):
continue
if not satisfies_search_terms(task, any_value_search_term, result_search_term, args_search_terms, kwargs_search_terms):
continue
if jobid is not None and eval(task.as_dict()['args'])[2] != jobid:
continue
yield uuid, task
i += 1
if i == limit:
break
sort_keys = {'name': str, 'state': str, 'received': float, 'started': float}
def sort_tasks(tasks, sort_by):
assert sort_by.lstrip('-') in sort_keys
reverse = False
if sort_by.startswith('-'):
sort_by = sort_by.lstrip('-')
reverse = True
for task in sorted(tasks,
key=lambda x: getattr(x[1], sort_by) or sort_keys[sort_by](),
reverse=reverse):
yield task
def get_task_by_id(events, task_id):
if hasattr(Task, '_fields'): # Old version
return events.state.tasks.get(task_id)
else:
_fields = Task._defaults.keys()
task = events.state.tasks.get(task_id)
if task is not None:
task._fields = _fields
return task
|
bsd-3-clause
| 4,780,227,939,877,630,000
| 34.285714
| 127
| 0.591829
| false
| 3.89255
| false
| false
| false
|
megaprojectske/megaprojects.co.ke
|
megaprojects/articles/migrations/0002_auto__add_field_image_reviewed__add_field_article_reviewed.py
|
1
|
7695
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Image.reviewed'
db.add_column(u'articles_image', 'reviewed',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Article.reviewed'
db.add_column(u'articles_article', 'reviewed',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Image.reviewed'
db.delete_column(u'articles_image', 'reviewed')
# Deleting field 'Article.reviewed'
db.delete_column(u'articles_article', 'reviewed')
models = {
u'articles.article': {
'Meta': {'ordering': "['-pubdate']", 'object_name': 'Article'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'body': ('django.db.models.fields.TextField', [], {}),
'changed': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'drupal_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'lead': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'program': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['programs.Program']", 'null': 'True', 'blank': 'True'}),
'pubdate': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 6, 5, 0, 0)'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'articles.image': {
'Meta': {'ordering': "['-article__pubdate', '-created']", 'object_name': 'Image'},
'alt': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['articles.Article']"}),
'changed': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'programs.program': {
'Meta': {'ordering': "['title']", 'object_name': 'Program'},
'abbr': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'body': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'changed': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lead': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'status': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
}
}
complete_apps = ['articles']
|
apache-2.0
| 464,978,031,842,884,700
| 68.324324
| 187
| 0.548538
| false
| 3.688878
| false
| false
| false
|
adezfouli/savigp
|
GP/grad_checker.py
|
1
|
2303
|
__author__ = 'AT'
from DerApproximator import get_d1
from numpy import concatenate
from texttable import Texttable
class GradChecker:
""" A class for checking gradients. """
def __init__(self):
pass
@staticmethod
def check(f, f_grad, x0, name, verbose=False):
"""
Checks whether gradients of function ``f`` at point x0 is same as the gradients provided by ``f_grad``.
``error`` is the difference between numerical and provided gradients.
'%error' = abs(error) / numerical gradient.
Parameters
----------
f : callable
input function to check gradients against
f_grad : callable
input function which provides gradients
x0 : ndarray
the point at which gradients should be calculated
name : list
a vector with the size of the number of parameters, which provides name for each parameter. This
name will be used when generating output table
verbose : boolean
whether to print output for each parameter separately
Returns
-------
avg : float
average of the percentage error over all the parameters, i.e., mean(%error)
"""
g = f_grad(x0)
if len(g) != len(x0):
raise Exception('dimensions mismatch')
table = Texttable()
table.set_cols_align(["l", "r", "c", "c", "c"])
table.set_cols_valign(["t", "m", "b" , "r", "c"])
rows = []
rows += [["Name ", "analytical ", "numerical ", "error ", "% error "]]
if verbose:
print 'dimensions:', len(x0)
aver_error = 0
for i in range(len(x0)):
def f_i(x):
return f((concatenate((x0[:i], x, x0[(i+1):]))))
t = get_d1(f_i, [x0[i]])
p_errro=None
if t != 0:
p_errro = abs(t-g[i]) / abs(t)
rows += [[name[i], g[i], t, abs(t-g[i]), p_errro]]
if abs(g[i]) <1e-4 and abs(t) < 1e-4:
pass
else:
aver_error += abs(t-g[i]) / abs(t)
if verbose:
print 'element:', i
table.add_rows(rows)
if verbose:
print(table.draw())
return aver_error / len(x0)
|
apache-2.0
| -3,464,575,333,593,513,000
| 31
| 111
| 0.520625
| false
| 3.910017
| false
| false
| false
|
tensorflow/federated
|
tensorflow_federated/python/core/backends/mapreduce/forms.py
|
1
|
16487
|
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Standardized representation of logic deployable to MapReduce-like systems."""
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.core.api import computation_base
from tensorflow_federated.python.core.impl.computation import computation_impl
from tensorflow_federated.python.core.impl.types import computation_types
def _check_tensorflow_computation(label, comp):
py_typecheck.check_type(comp, computation_base.Computation, label)
comp_proto = computation_impl.ComputationImpl.get_proto(comp)
which_comp = comp_proto.WhichOneof('computation')
if which_comp != 'tensorflow':
raise TypeError('Expected all computations supplied as arguments to '
'be plain TensorFlow, found {}.'.format(which_comp))
def _is_assignable_from_or_both_none(first, second):
if first is None:
return second is None
return first.is_assignable_from(second)
def _is_two_tuple(t: computation_types.Type) -> bool:
return t.is_struct() and len(t) == 2
def _check_accepts_two_tuple(label: str, comp: computation_base.Computation):
param_type = comp.type_signature.parameter
if not _is_two_tuple(param_type):
raise TypeError(
f'The `{label}` computation accepts a parameter of type\n{param_type}\n'
'that is not a two-tuple.')
def _check_returns_two_tuple(label: str, comp: computation_base.Computation):
result_type = comp.type_signature.result
if not _is_two_tuple(result_type):
raise TypeError(
f'The `{label}` computation returns a result of type\n{result_type}\n'
'that is not a two-tuple.')
class BroadcastForm(object):
"""Standardized representation of server-to-client logic.
This class is designed to represent computations of the form:
```
server_data_type = self.compute_server_context.type_signature.parameter
client_data_type = self.client_processing.type_signature.parameter[1]
@tff.federated_computation(server_data_type, client_data_type)
def _(server_data, client_data):
# Select out the bit of server context to send to the clients.
context_at_server = tff.federated_map(
self.compute_server_context, server_data)
# Broadcast the context to the clients.
context_at_clients = tff.federated_broadcast(context_at_server)
# Compute some value on the clients based on the server context and
# the client data.
return tff.federated_map(
self.client_processing, (context_at_clients, client_data))
```
"""
def __init__(self,
compute_server_context,
client_processing,
server_data_label=None,
client_data_label=None):
for label, comp in (
('compute_server_context', compute_server_context),
('client_processing', client_processing),
):
_check_tensorflow_computation(label, comp)
_check_accepts_two_tuple('client_processing', client_processing)
client_first_arg_type = client_processing.type_signature.parameter[0]
server_context_type = compute_server_context.type_signature.result
if not _is_assignable_from_or_both_none(client_first_arg_type,
server_context_type):
raise TypeError(
'The `client_processing` computation expects an argument tuple with '
f'type\n{client_first_arg_type}\nas the first element (the context '
'type from the server), which does not match the result type\n'
f'{server_context_type}\n of `compute_server_context`.')
self._compute_server_context = compute_server_context
self._client_processing = client_processing
if server_data_label is not None:
py_typecheck.check_type(server_data_label, str)
self._server_data_label = server_data_label
if client_data_label is not None:
py_typecheck.check_type(server_data_label, str)
self._client_data_label = client_data_label
@property
def compute_server_context(self):
return self._compute_server_context
@property
def client_processing(self):
return self._client_processing
@property
def server_data_label(self):
return self._server_data_label
@property
def client_data_label(self):
return self._client_data_label
def summary(self, print_fn=print):
"""Prints a string summary of the `BroadcastForm`.
Args:
print_fn: Print function to use. It will be called on each line of the
summary in order to capture the string summary.
"""
for label, comp in (
('compute_server_context', self.compute_server_context),
('client_processing', self.client_processing),
):
# Add sufficient padding to align first column;
# len('compute_server_context') == 22
print_fn('{:<22}: {}'.format(
label, comp.type_signature.compact_representation()))
class MapReduceForm(object):
"""Standardized representation of logic deployable to MapReduce-like systems.
This class docstring describes the purpose of `MapReduceForm` as a data
structure; for a discussion of the conceptual content of an instance `mrf` of
`MapReduceForm`, including how precisely it maps to a single federated round,
see the [package-level docstring](
https://www.tensorflow.org/federated/api_docs/python/tff/backends/mapreduce).
This standardized representation can be used to describe a range of iterative
processes representable as a single round of MapReduce-like processing, and
deployable to MapReduce-like systems that are only capable of executing plain
TensorFlow code.
Non-iterative processes, or processes that do not originate at the server can
be described by `MapReduceForm`, as well as degenerate cases like computations
which use exclusively one of the two possible aggregation paths.
Instances of this class can be generated by TFF's transformation pipeline and
consumed by a variety of backends that have the ability to orchestrate their
execution in a MapReduce-like fashion. The latter can include systems that run
static data pipelines such Apache Beam or Hadoop, but also platforms like that
which has been described in the following paper:
"Towards Federated Learning at Scale: System Design"
https://arxiv.org/pdf/1902.01046.pdf
It should be noted that not every computation that proceeds in synchronous
rounds is representable as an instance of this class. In particular, this
representation is not suitable for computations that involve multiple phases
of processing, and does not generalize to arbitrary static data pipelines.
Generalized representations that can take advantage of the full expressiveness
of Apache Beam-like systems may emerge at a later time, and will be supported
by a separate set of tools, with a more expressive canonical representation.
The requirement that the variable constituents of the template be in the form
of pure TensorFlow code (not arbitrary TFF constructs) reflects the intent
for instances of this class to be easily converted into a representation that
can be compiled into a system that does *not* have the ability to interpret
the full TFF language (as defined in `computation.proto`), but that does have
the ability to run TensorFlow. Client-side logic in such systems could be
deployed in a number of ways, e.g., as shards in a MapReduce job, to mobile or
embedded devices, etc.
The individual TensorFlow computations that constitute an iterative process
in this form are supplied as constructor arguments. Generally, this class will
not be instantiated by a programmer directly but targeted by a sequence of
transformations that take a `tff.templates.IterativeProcess` and produce the
appropriate pieces of logic.
"""
def __init__(self,
initialize,
prepare,
work,
zero,
accumulate,
merge,
report,
bitwidth,
update,
server_state_label=None,
client_data_label=None):
"""Constructs a representation of a MapReduce-like iterative process.
Note: All the computations supplied here as arguments must be TensorFlow
computations, i.e., instances of `tff.Computation` constructed by the
`tff.tf_computation` decorator/wrapper.
Args:
initialize: The computation that produces the initial server state.
prepare: The computation that prepares the input for the clients.
work: The client-side work computation.
zero: The computation that produces the initial state for accumulators.
accumulate: The computation that adds a client update to an accumulator.
merge: The computation to use for merging pairs of accumulators.
report: The computation that produces the final server-side aggregate for
the top level accumulator (the global update).
bitwidth: The computation that produces the bitwidth for secure sum.
update: The computation that takes the global update and the server state
and produces the new server state, as well as server-side output.
server_state_label: Optional string label for the server state.
client_data_label: Optional string label for the client data.
Raises:
TypeError: If the Python or TFF types of the arguments are invalid or not
compatible with each other.
AssertionError: If the manner in which the given TensorFlow computations
are represented by TFF does not match what this code is expecting (this
is an internal error that requires code update).
"""
for label, comp in (
('initialize', initialize),
('prepare', prepare),
('work', work),
('zero', zero),
('accumulate', accumulate),
('merge', merge),
('report', report),
('bitwidth', bitwidth),
('update', update),
):
_check_tensorflow_computation(label, comp)
prepare_arg_type = prepare.type_signature.parameter
init_result_type = initialize.type_signature.result
if not _is_assignable_from_or_both_none(prepare_arg_type, init_result_type):
raise TypeError(
'The `prepare` computation expects an argument of type {}, '
'which does not match the result type {} of `initialize`.'.format(
prepare_arg_type, init_result_type))
_check_accepts_two_tuple('work', work)
work_2nd_arg_type = work.type_signature.parameter[1]
prepare_result_type = prepare.type_signature.result
if not _is_assignable_from_or_both_none(work_2nd_arg_type,
prepare_result_type):
raise TypeError(
'The `work` computation expects an argument tuple with type {} as '
'the second element (the initial client state from the server), '
'which does not match the result type {} of `prepare`.'.format(
work_2nd_arg_type, prepare_result_type))
_check_returns_two_tuple('work', work)
py_typecheck.check_len(accumulate.type_signature.parameter, 2)
accumulate.type_signature.parameter[0].check_assignable_from(
zero.type_signature.result)
accumulate_2nd_arg_type = accumulate.type_signature.parameter[1]
work_client_update_type = work.type_signature.result[0]
if not _is_assignable_from_or_both_none(accumulate_2nd_arg_type,
work_client_update_type):
raise TypeError(
'The `accumulate` computation expects a second argument of type {}, '
'which does not match the expected {} as implied by the type '
'signature of `work`.'.format(accumulate_2nd_arg_type,
work_client_update_type))
accumulate.type_signature.parameter[0].check_assignable_from(
accumulate.type_signature.result)
py_typecheck.check_len(merge.type_signature.parameter, 2)
merge.type_signature.parameter[0].check_assignable_from(
accumulate.type_signature.result)
merge.type_signature.parameter[1].check_assignable_from(
accumulate.type_signature.result)
merge.type_signature.parameter[0].check_assignable_from(
merge.type_signature.result)
report.type_signature.parameter.check_assignable_from(
merge.type_signature.result)
expected_update_parameter_type = computation_types.to_type([
initialize.type_signature.result,
[report.type_signature.result, work.type_signature.result[1]],
])
if not _is_assignable_from_or_both_none(update.type_signature.parameter,
expected_update_parameter_type):
raise TypeError(
'The `update` computation expects an argument of type {}, '
'which does not match the expected {} as implied by the type '
'signatures of `initialize`, `report`, and `work`.'.format(
update.type_signature.parameter, expected_update_parameter_type))
_check_returns_two_tuple('update', update)
updated_state_type = update.type_signature.result[0]
if not prepare_arg_type.is_assignable_from(updated_state_type):
raise TypeError(
'The `update` computation returns a result tuple whose first element '
f'(the updated state type of the server) is type:\n'
f'{updated_state_type}\n'
f'which is not assignable to the state parameter type of `prepare`:\n'
f'{prepare_arg_type}')
self._initialize = initialize
self._prepare = prepare
self._work = work
self._zero = zero
self._accumulate = accumulate
self._merge = merge
self._report = report
self._bitwidth = bitwidth
self._update = update
if server_state_label is not None:
py_typecheck.check_type(server_state_label, str)
self._server_state_label = server_state_label
if client_data_label is not None:
py_typecheck.check_type(client_data_label, str)
self._client_data_label = client_data_label
@property
def initialize(self):
return self._initialize
@property
def prepare(self):
return self._prepare
@property
def work(self):
return self._work
@property
def zero(self):
return self._zero
@property
def accumulate(self):
return self._accumulate
@property
def merge(self):
return self._merge
@property
def report(self):
return self._report
@property
def bitwidth(self):
return self._bitwidth
@property
def update(self):
return self._update
@property
def server_state_label(self):
return self._server_state_label
@property
def client_data_label(self):
return self._client_data_label
@property
def securely_aggregates_tensors(self) -> bool:
"""Whether the `MapReduceForm` uses secure aggregation."""
# Tensors aggregated over `federated_secure_sum_bitwidth` are output in the
# second tuple element from `work()`.
work_result_type = self.work.type_signature.result
assert len(work_result_type) == 2
return not work_result_type[1].is_equivalent_to(
computation_types.StructType([]))
def summary(self, print_fn=print):
"""Prints a string summary of the `MapReduceForm`.
Args:
print_fn: Print function to use. It will be called on each line of the
summary in order to capture the string summary.
"""
for label, comp in (
('initialize', self.initialize),
('prepare', self.prepare),
('work', self.work),
('zero', self.zero),
('accumulate', self.accumulate),
('merge', self.merge),
('report', self.report),
('bitwidth', self.bitwidth),
('update', self.update),
):
# Add sufficient padding to align first column; len('initialize') == 10
print_fn('{:<10}: {}'.format(
label, comp.type_signature.compact_representation()))
|
apache-2.0
| 4,678,343,868,907,108,000
| 39.212195
| 80
| 0.691029
| false
| 4.11969
| false
| false
| false
|
azam-a/gocd2gmail2slack
|
gocd2gmail2slack/integrations.py
|
1
|
1445
|
import gmail as Gm
import messages as Msg
import slack
from cfg.config import (
WEBHOOK_URL,
GOCD_DASHBOARD_URL,
)
def main():
try:
service, labels, messages_details = initialize()
process(service, labels, messages_details)
except:
pass
def initialize():
service = Gm.get_service()
labels = Gm.get_labels(service)
initial_messages = Gm.get_messages(service, include_labels=['UNREAD'])
messages_details = Gm.get_messages_details(service, initial_messages)
return (service, labels, messages_details)
def process(service, labels, messages_details):
for item in messages_details:
subject = Msg.get_subject(item)
if Msg.is_gocd_pattern(subject):
gocd_details = Msg.get_gocd_details(subject)
if slack.is_matching_send_rule(gocd_details):
body = Msg.get_body(item)
changeset = Msg.get_changeset_info(body)
text = (slack
.message_builder(gocd_details,
changeset,
GOCD_DASHBOARD_URL))
slack.send_to_slack(text, WEBHOOK_URL)
Gm.add_label(service, Msg.get_id(item),
'SENT_TO_SLACK', labels)
Gm.remove_label(service, Msg.get_id(item),
'UNREAD', labels)
if __name__ == "__main__":
main()
|
mit
| 4,342,497,860,027,901,400
| 26.264151
| 74
| 0.559862
| false
| 3.753247
| false
| false
| false
|
MahjongRepository/tenhou-python-bot
|
project/game/ai/helpers/defence.py
|
1
|
16886
|
from typing import Optional
class TileDanger:
IMPOSSIBLE_WAIT = {
"value": 0,
"description": "Impossible wait",
}
SAFE_AGAINST_THREATENING_HAND = {
"value": 0,
"description": "Tile can't be used by analyzed threat",
}
# honor tiles
HONOR_THIRD = {
"value": 40,
"description": "Third honor tile (early game)",
}
NON_YAKUHAI_HONOR_SECOND_EARLY = {
"value": 60,
"description": "Second non-yakuhai honor (early game)",
}
NON_YAKUHAI_HONOR_SHONPAI_EARLY = {
"value": 120,
"description": "Shonpai non-yakuhai honor (early game)",
}
YAKUHAI_HONOR_SECOND_EARLY = {
"value": 80,
"description": "Second yakuhai honor (early game)",
}
YAKUHAI_HONOR_SHONPAI_EARLY = {
"value": 160,
"description": "Shonpai yakuhai honor (early game)",
}
DOUBLE_YAKUHAI_HONOR_SECOND_EARLY = {
"value": 120,
"description": "Second double-yakuhai honor (early game)",
}
DOUBLE_YAKUHAI_HONOR_SHONPAI_EARLY = {
"value": 240,
"description": "Shonpai double-yakuhai honor (early game)",
}
NON_YAKUHAI_HONOR_SECOND_MID = {
"value": 80,
"description": "Second non-yakuhai honor (mid game)",
}
NON_YAKUHAI_HONOR_SHONPAI_MID = {
"value": 160,
"description": "Shonpai non-yakuhai honor (mid game)",
}
YAKUHAI_HONOR_SECOND_MID = {
"value": 120,
"description": "Second yakuhai honor (mid game)",
}
DOUBLE_YAKUHAI_HONOR_SECOND_MID = {
"value": 200,
"description": "Second double-yakuhai honor (mid game)",
}
YAKUHAI_HONOR_SHONPAI_MID = {
"value": 240,
"description": "Shonpai yakuhai honor (mid game)",
}
DOUBLE_YAKUHAI_HONOR_SHONPAI_MID = {
"value": 480,
"description": "Shonpai double-yakuhai honor (mid game)",
}
NON_YAKUHAI_HONOR_SECOND_LATE = {
"value": 160,
"description": "Second non-yakuhai honor (late game)",
}
NON_YAKUHAI_HONOR_SHONPAI_LATE = {
"value": 240,
"description": "Shonpai non-yakuhai honor (late game)",
}
YAKUHAI_HONOR_SECOND_LATE = {
"value": 200,
"description": "Second yakuhai honor (late game)",
}
DOUBLE_YAKUHAI_HONOR_SECOND_LATE = {
"value": 300,
"description": "Second double-yakuhai honor (late game)",
}
YAKUHAI_HONOR_SHONPAI_LATE = {
"value": 400,
"description": "Shonpai yakuhai honor (late game)",
}
DOUBLE_YAKUHAI_HONOR_SHONPAI_LATE = {
"value": 600,
"description": "Shonpai double-yakuhai honor (late game)",
}
# kabe tiles
NON_SHONPAI_KABE_STRONG = {
"value": 40,
"description": "Non-shonpai strong kabe tile",
}
SHONPAI_KABE_STRONG = {
"value": 200,
"description": "Shonpai strong kabe tile",
}
NON_SHONPAI_KABE_WEAK = {
"value": 80,
"description": "Non-shonpai weak kabe tile",
}
# weak shonpai kabe is actually less suspicious then a strong one
SHONPAI_KABE_WEAK = {
"value": 120,
"description": "Shonpai weak kabe tile",
}
NON_SHONPAI_KABE_STRONG_OPEN_HAND = {
"value": 60,
"description": "Non-shonpai strong kabe tile (against open hand)",
}
SHONPAI_KABE_STRONG_OPEN_HAND = {
"value": 300,
"description": "Shonpai strong kabe tile (against open hand)",
}
NON_SHONPAI_KABE_WEAK_OPEN_HAND = {
"value": 120,
"description": "Non-shonpai weak kabe tile (against open hand)",
}
SHONPAI_KABE_WEAK_OPEN_HAND = {
"value": 200,
"description": "Shonpai weak kabe tile (against open hand)",
}
# suji tiles
SUJI_19_NOT_SHONPAI = {
"value": 40,
"description": "Non-shonpai 1 or 9 with suji",
}
SUJI_19_SHONPAI = {
"value": 80,
"description": "Shonpai 1 or 9 with suji",
}
SUJI = {
"value": 120,
"description": "Default suji",
}
SUJI_28_ON_RIICHI = {
"value": 300,
"description": "Suji on 2 or 8 on riichi declaration",
}
SUJI_37_ON_RIICHI = {
"value": 400,
"description": "Suji on 3 or 7 on riichi declaration",
}
SUJI_19_NOT_SHONPAI_OPEN_HAND = {
"value": 100,
"description": "Non-shonpai 1 or 9 with suji (against open hand)",
}
SUJI_19_SHONPAI_OPEN_HAND = {
"value": 200,
"description": "Shonpai 1 or 9 with suji (against open hand)",
}
SUJI_OPEN_HAND = {
"value": 160,
"description": "Default suji (against open hand)",
}
# possible ryanmen waits
RYANMEN_BASE_SINGLE = {
"value": 300,
"description": "Base danger for possible wait in a single ryanmen",
}
RYANMEN_BASE_DOUBLE = {
"value": 500,
"description": "Base danger for possible wait in two ryanmens",
}
# bonus dangers for possible ryanmen waits
BONUS_MATAGI_SUJI = {
"value": 80,
"description": "Additional danger for matagi-suji pattern",
}
BONUS_AIDAYONKEN = {
"value": 80,
"description": "Additional danger for aidayonken pattern",
}
BONUS_EARLY_5 = {
"value": 80,
"description": "Additional danger for 1 and 9 in case of early 5 discarded in that suit",
}
BONUS_EARLY_28 = {
"value": -80,
"description": "Negative danger for 19 after early 28",
}
BONUS_EARLY_37 = {
"value": -60,
"description": "Negative danger for 1289 after early 37",
}
# doras
DORA_BONUS = {
"value": 200,
"description": "Additional danger for tile being a dora",
}
DORA_CONNECTOR_BONUS = {
"value": 80,
"description": "Additional danger for tile being dora connector",
}
# early discards - these are considered only if ryanmen is possible
NEGATIVE_BONUS_19_EARLY_2378 = {
"value": -80,
"description": "Subtracted danger for 1 or 9 because of early 2, 3, 7 or 8 discard",
}
NEGATIVE_BONUS_28_EARLY_37 = {
"value": -40,
"description": "Subtracted danger for 2 or 8 because of early 3 or 7 discard",
}
# bonus danger for different yaku
# they may add up
HONITSU_THIRD_HONOR_BONUS_DANGER = {
"value": 80,
"description": "Additional danger for third honor against honitsu hands",
}
HONITSU_SECOND_HONOR_BONUS_DANGER = {
"value": 160,
"description": "Additional danger for second honor against honitsu hands",
}
HONITSU_SHONPAI_HONOR_BONUS_DANGER = {
"value": 280,
"description": "Additional danger for shonpai honor against honitsu hands",
}
TOITOI_SECOND_YAKUHAI_HONOR_BONUS_DANGER = {
"value": 120,
"description": "Additional danger for second honor against honitsu hands",
}
TOITOI_SHONPAI_NON_YAKUHAI_BONUS_DANGER = {
"value": 160,
"description": "Additional danger for non-yakuhai shonpai tiles agains toitoi hands",
}
TOITOI_SHONPAI_YAKUHAI_BONUS_DANGER = {
"value": 240,
"description": "Additional danger for shonpai yakuhai against toitoi hands",
}
TOITOI_SHONPAI_DORA_BONUS_DANGER = {
"value": 240,
"description": "Additional danger for shonpai dora tiles agains toitoi hands",
}
ATODZUKE_YAKUHAI_HONOR_BONUS_DANGER = {
"value": 400,
"description": "Bonus danger yakuhai tiles for atodzuke yakuhai hands",
}
###############
# The following constants don't follow the logic of other constants, so they are not dictionaries
##############
# count of possible forms
FORM_BONUS_DESCRIPTION = "Forms bonus"
FORM_BONUS_KANCHAN = 3
FORM_BONUS_PENCHAN = 3
FORM_BONUS_SYANPON = 12
FORM_BONUS_TANKI = 12
FORM_BONUS_RYANMEN = 8
# suji counting, (SUJI_COUNT_BOUNDARY - n) * SUJI_COUNT_MODIFIER
# We count how many ryanmen waits are still possible. Maximum n is 18, minimum is 1.
# If there are many possible ryanmens left, we consider situation less dangerous
# than if there are few possible ryanmens left.
# If n is 0, we don't consider this as a factor at all, because that means that wait is not ryanmen.
# Actually that should mean that non-ryanmen waits are now much more dangerous that before.
SUJI_COUNT_BOUNDARY = 10
SUJI_COUNT_MODIFIER = 20
# borders indicating late round
ALMOST_LATE_ROUND = 10
LATE_ROUND = 12
VERY_LATE_ROUND = 15
@staticmethod
def make_unverified_suji_coeff(value):
return {"value": value, "description": "Additional bonus for number of unverified suji"}
@staticmethod
def is_safe(danger):
return danger == TileDanger.IMPOSSIBLE_WAIT or danger == TileDanger.SAFE_AGAINST_THREATENING_HAND
class DangerBorder:
IGNORE = 1000000
EXTREME = 1200
VERY_HIGH = 1000
HIGH = 800
UPPER_MEDIUM = 700
MEDIUM = 600
LOWER_MEDIUM = 500
UPPER_LOW = 400
LOW = 300
VERY_LOW = 200
EXTREMELY_LOW = 120
LOWEST = 80
BETAORI = 0
one_step_down_dict = dict(
{
IGNORE: EXTREME,
EXTREME: VERY_HIGH,
VERY_HIGH: HIGH,
HIGH: UPPER_MEDIUM,
UPPER_MEDIUM: MEDIUM,
MEDIUM: LOWER_MEDIUM,
LOWER_MEDIUM: UPPER_LOW,
UPPER_LOW: LOW,
LOW: VERY_LOW,
VERY_LOW: EXTREMELY_LOW,
EXTREMELY_LOW: LOWEST,
LOWEST: BETAORI,
BETAORI: BETAORI,
}
)
one_step_up_dict = dict(
{
IGNORE: IGNORE,
EXTREME: IGNORE,
VERY_HIGH: EXTREME,
HIGH: VERY_HIGH,
UPPER_MEDIUM: HIGH,
MEDIUM: UPPER_MEDIUM,
LOWER_MEDIUM: MEDIUM,
UPPER_LOW: LOWER_MEDIUM,
LOW: UPPER_LOW,
VERY_LOW: LOW,
EXTREMELY_LOW: VERY_LOW,
LOWEST: EXTREMELY_LOW,
# betaori means betaori, don't tune it up
BETAORI: BETAORI,
}
)
late_danger_dict = dict(
{
IGNORE: IGNORE,
EXTREME: VERY_HIGH,
VERY_HIGH: HIGH,
HIGH: UPPER_MEDIUM,
UPPER_MEDIUM: MEDIUM,
MEDIUM: LOWER_MEDIUM,
LOWER_MEDIUM: UPPER_LOW,
UPPER_LOW: LOW,
LOW: VERY_LOW,
VERY_LOW: EXTREMELY_LOW,
EXTREMELY_LOW: LOWEST,
LOWEST: BETAORI,
BETAORI: BETAORI,
}
)
very_late_danger_dict = dict(
{
IGNORE: VERY_HIGH,
EXTREME: HIGH,
VERY_HIGH: UPPER_MEDIUM,
HIGH: MEDIUM,
UPPER_MEDIUM: LOWER_MEDIUM,
MEDIUM: UPPER_LOW,
LOWER_MEDIUM: LOW,
UPPER_LOW: VERY_LOW,
LOW: EXTREMELY_LOW,
VERY_LOW: LOWEST,
EXTREMELY_LOW: BETAORI,
LOWEST: BETAORI,
BETAORI: BETAORI,
}
)
@staticmethod
def tune_down(danger_border, steps):
assert steps >= 0
for _ in range(steps):
danger_border = DangerBorder.one_step_down_dict[danger_border]
return danger_border
@staticmethod
def tune_up(danger_border, steps):
assert steps >= 0
for _ in range(steps):
danger_border = DangerBorder.one_step_up_dict[danger_border]
return danger_border
@staticmethod
def tune(danger_border, value):
if value > 0:
return DangerBorder.tune_up(danger_border, value)
elif value < 0:
return DangerBorder.tune_down(danger_border, abs(value))
return danger_border
@staticmethod
def tune_for_round(player, danger_border, shanten):
danger_border_dict = None
if shanten == 0:
if len(player.discards) > TileDanger.LATE_ROUND:
danger_border_dict = DangerBorder.late_danger_dict
if len(player.discards) > TileDanger.VERY_LATE_ROUND:
danger_border_dict = DangerBorder.very_late_danger_dict
elif shanten == 1:
if len(player.discards) > TileDanger.LATE_ROUND:
danger_border_dict = DangerBorder.very_late_danger_dict
elif shanten == 2:
if len(player.discards) > TileDanger.ALMOST_LATE_ROUND:
danger_border_dict = DangerBorder.late_danger_dict
if len(player.discards) > TileDanger.LATE_ROUND:
return DangerBorder.BETAORI
if not danger_border_dict:
return danger_border
return danger_border_dict[danger_border]
class EnemyDanger:
THREAT_RIICHI = {
"id": "threatening_riichi",
"description": "Enemy called riichi",
}
THREAT_OPEN_HAND_AND_MULTIPLE_DORA = {
"id": "threatening_open_hand_dora",
"description": "Enemy opened hand with 3+ dora and now is 6+ step",
}
THREAT_EXPENSIVE_OPEN_HAND = {
"id": "threatening_3_han_meld",
"description": "Enemy opened hand has 3+ han",
}
THREAT_OPEN_HAND_UNKNOWN_COST = {
"id": "threatening_melds",
"description": "Enemy opened hand and we are not sure if it's expensive",
}
class TileDangerHandler:
"""
Place to keep information of tile danger level for each player
"""
values: dict
weighted_cost: Optional[int]
danger_border: dict
can_be_used_for_ryanmen: bool
# if we estimate that one's threat cost is less than COST_PERCENT_THRESHOLD of other's
# we ignore it when choosing tile for fold
COST_PERCENT_THRESHOLD = 40
def __init__(self):
"""
1, 2, 3 is our opponents seats
"""
self.values = {1: [], 2: [], 3: []}
self.weighted_cost = 0
self.danger_border = {1: {}, 2: {}, 3: {}}
self.can_be_used_for_ryanmen: bool = False
def set_danger(self, player_seat, danger):
self.values[player_seat].append(danger)
def set_danger_border(self, player_seat, danger_border: int, our_hand_cost: int, enemy_hand_cost: int):
self.danger_border[player_seat] = {
"border": danger_border,
"our_hand_cost": our_hand_cost,
"enemy_hand_cost": enemy_hand_cost,
}
def get_danger_reasons(self, player_seat):
return self.values[player_seat]
def get_danger_border(self, player_seat):
return self.danger_border[player_seat]
def get_total_danger_for_player(self, player_seat):
total = sum([x["value"] for x in self.values[player_seat]])
assert total >= 0
return total
def get_max_danger(self):
return max(self._danger_array)
def get_sum_danger(self):
return sum(self._danger_array)
def get_weighted_danger(self):
costs = [
self.get_danger_border(1).get("enemy_hand_cost") or 0,
self.get_danger_border(2).get("enemy_hand_cost") or 0,
self.get_danger_border(3).get("enemy_hand_cost") or 0,
]
max_cost = max(costs)
if max_cost == 0:
return 0
dangers = self._danger_array
weighted = 0
num_dangers = 0
for cost, danger in zip(costs, dangers):
if cost * 100 / max_cost >= self.COST_PERCENT_THRESHOLD:
# divide by 8000 so it's more human-readable
weighted += cost * danger / 8000
num_dangers += 1
assert num_dangers > 0
# this way we balance out tiles that are kinda safe against all the threats
# and tiles that are genbutsu against one threat and are dangerours against the other
if num_dangers == 1:
danger_multiplier = 1
else:
danger_multiplier = 0.8
weighted *= danger_multiplier
return weighted
def get_min_danger_border(self):
return min(self._borders_array)
def clear_danger(self, player_seat):
self.values[player_seat] = []
self.danger_border[player_seat] = {}
def is_danger_acceptable(self):
for border, danger in zip(self._borders_array, self._danger_array):
if border < danger:
return False
return True
@property
def _danger_array(self):
return [
self.get_total_danger_for_player(1),
self.get_total_danger_for_player(2),
self.get_total_danger_for_player(3),
]
@property
def _borders_array(self):
return [
self.get_danger_border(1).get("border") or 0,
self.get_danger_border(2).get("border") or 0,
self.get_danger_border(3).get("border") or 0,
]
|
mit
| 5,193,180,897,741,196,000
| 29.261649
| 107
| 0.574973
| false
| 3.204783
| false
| false
| false
|
iamsteadman/bambu-urlshortener
|
bambu_urlshortener/migrations/0001_initial.py
|
1
|
1644
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ShortURL'
db.create_table('urlshortener_shorturl', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('url', self.gf('django.db.models.fields.URLField')(unique=True, max_length=255)),
('slug', self.gf('django.db.models.fields.CharField')(unique=True, max_length=7)),
('visits', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('last_visited', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal('bambu_urlshortener', ['ShortURL'])
def backwards(self, orm):
# Deleting model 'ShortURL'
db.delete_table('urlshortener_shorturl')
models = {
'bambu_urlshortener.shorturl': {
'Meta': {'object_name': 'ShortURL', 'db_table': "'urlshortener_shorturl'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_visited': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '7'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'visits': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
}
}
complete_apps = ['bambu_urlshortener']
|
apache-2.0
| -556,862,470,174,253,000
| 42.289474
| 109
| 0.600365
| false
| 3.573913
| false
| false
| false
|
YannThorimbert/ThorPy-1.4.3
|
thorpy/elements/slidersetter.py
|
1
|
4792
|
from thorpy.elements.ghost import Ghost
from thorpy.elements.slider import _SliderXSetter
from thorpy.elements.element import Element
from thorpy.miscgui import functions, style, painterstyle
from thorpy.miscgui import storage
class SliderXSetter(Ghost):
"""Set of text, slider and value"""
def __init__(self,
length,
limvals=None,
text="",
elements=None,
normal_params=None,
namestyle=None,
valuestyle=None,
type_=float,
initial_value=None):
"""Slider for choosing a value.
<length>: single int value specifying the length of slider in pixels.
<limvals>: 2-tuple specifying the min and max values.
<text>: text preceeding the element.
<type_>: the type of the number to be chosen (e.g int or float)
<initial_value>: the initial value. If None, set to minimum value.
"""
namestyle = style.STYLE_SLIDER_NAME if namestyle is None else namestyle
valuestyle=style.STYLE_SLIDER_VALUE if valuestyle is None else valuestyle
Ghost.__init__(self, elements, normal_params)
self._slider_el=_SliderXSetter(length, limvals, "", initial_value=initial_value)
self._slider_el.finish()
self.add_elements([self._slider_el])
self._value_type = type_
self._round_decimals = 2
self._name_element = self._get_name_element(text, namestyle) #herite de setter
self._value_element = self._get_value_element(valuestyle)
self.add_elements([self._name_element, self._value_element])
self._name_element.rank = 1
self._slider_el.rank = 2
self._value_element.rank = 3
self.sort_children_by_rank()
self._storer_rect = None
self._refresh_pos()
def finish(self):
Ghost.finish(self)
self._refresh_pos()
self._slider_el._drag_element.set_setter()
value = str(self._slider_el.get_value())
self._value_element.set_text(value)
def set_value(self, value):
self._slider_el.get_dragger().place_at(value)
self.refresh_value()
def show_value(self, show_value):
self._value_element.visible = show_value
def _get_name_element(self, name, namestyle):
painter = functions.obtain_valid_painter(
painterstyle.CHECKER_NAME_PAINTER,
size=style.SIZE)
el = Element(name)
el.set_painter(painter)
if namestyle:
el.set_style(namestyle)
el.finish()
return el
def _get_value_element(self, valuestyle):
painter = functions.obtain_valid_painter(
painterstyle.CHECKER_VALUE_PAINTER,
size=style.CHECK_SIZE)
el = Element(str(self.get_value()))
el.set_painter(painter)
if valuestyle:
el.set_style(valuestyle)
el.finish()
return el
def _refresh_pos(self):
storage.store(self, mode="h")
self.fit_children()
def refresh_value(self):
self._value_element.unblit()
self._value_element.update()
value = str(self.get_value())
self._value_element.set_text(value)
self._value_element.blit()
self._value_element.update()
def get_value(self):
value = self._slider_el.get_value()
return self._value_type(value)
def get_storer_rect(self): #!!! normalement rien besoin
tmp = self.get_value()
self._value_element.set_text(str(self._slider_el._limvals[1]))
rect = self.get_family_rect()
self._value_element.set_text(str(tmp))
return rect
## def set_font_color(self, color, state=None, center_title=True):
## """set font color for a given state"""
## self._name_element.set_font_color(color, state, center_title)
##
## def set_font_size(self, size, state=None, center_title=True):
## """set font size for a given state"""
## SliderX.set_font_size(self, size, state, center_title)
## self._name_element.set_font_size(size, state, center_title)
##
## def set_font_effects(self, biu, state=None, center=True, preserve=False):
## """biu = tuple : (bold, italic, underline)"""
## SliderX.set_font_effects(self, bio, state, center, preserve)
## self._name_element.set_font_effects(biu, state, center, preserve)
## def pix_to_val(self, pix, x0): #!!!!!
## value = SliderX.pix_to_val(self, pix, x0)
## if self._value_type is float:
## return round(value, self._round_decimals)
## elif self._value_type is int:
## return int(round(value))
def get_help_rect(self):
return self._name_element.get_help_rect()
|
mit
| -7,415,068,993,223,257,000
| 36.732283
| 88
| 0.603923
| false
| 3.568131
| false
| false
| false
|
AunShiLord/sympy
|
sympy/physics/tests/test_pring.py
|
4
|
1072
|
from sympy.physics.pring import wavefunction, energy
from sympy import pi, integrate, sqrt, exp, simplify, I
from sympy.abc import m, x, r
from sympy.physics.quantum.constants import hbar
def test_wavefunction():
Psi = {
0: (1/sqrt(2 * pi)),
1: (1/sqrt(2 * pi)) * exp(I * x),
2: (1/sqrt(2 * pi)) * exp(2 * I * x),
3: (1/sqrt(2 * pi)) * exp(3 * I * x)
}
for n in Psi:
assert simplify(wavefunction(n, x) - Psi[n]) == 0
def test_norm(n=1):
# Maximum "n" which is tested:
for i in range(n + 1):
assert integrate(
wavefunction(i, x) * wavefunction(-i, x), (x, 0, 2 * pi)) == 1
def test_orthogonality(n=1):
# Maximum "n" which is tested:
for i in range(n + 1):
for j in range(i+1, n+1):
assert integrate(
wavefunction(i, x) * wavefunction(j, x), (x, 0, 2 * pi)) == 0
def test_energy(n=1):
# Maximum "n" which is tested:
for i in range(n+1):
assert simplify(
energy(i, m, r) - ((i**2 * hbar**2) / (2 * m * r**2))) == 0
|
bsd-3-clause
| -7,457,749,085,143,069,000
| 27.972973
| 77
| 0.528918
| false
| 2.873995
| true
| false
| false
|
olebole/astrometry.net
|
net/sdss_image.py
|
1
|
4000
|
from __future__ import print_function
import math
import os
import urllib
if __name__ == '__main__':
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'astrometry.net.settings'
from astrometry.net.log import *
from astrometry.net.tmpfile import *
from astrometry.net import settings
def plot_sdss_image(wcsfn, plotfn, image_scale=1.0, debug_ps=None):
from astrometry.util import util as anutil
from astrometry.blind import plotstuff as ps
# Parse the wcs.fits file
wcs = anutil.Tan(wcsfn, 0)
# grab SDSS tiles with about the same resolution as this image.
pixscale = wcs.pixel_scale()
pixscale = pixscale / image_scale
logmsg('Original image scale is', wcs.pixel_scale(), 'arcsec/pix; scaled', image_scale, '->', pixscale)
# size of SDSS image tiles to request, in pixels
sdsssize = 512
scale = sdsssize * pixscale / 60.
# healpix-vs-north-up rotation
nside = anutil.healpix_nside_for_side_length_arcmin(scale / math.sqrt(2.))
nside = 2 ** int(math.ceil(math.log(nside)/math.log(2.)))
logmsg('Next power-of-2 nside:', nside)
ra,dec = wcs.radec_center()
logmsg('Image center is RA,Dec', ra, dec)
dirnm = os.path.join(settings.SDSS_TILE_DIR, 'nside%i'%nside)
if not os.path.exists(dirnm):
os.makedirs(dirnm)
#hp = anutil.radecdegtohealpix(ra, dec, nside)
#logmsg('Healpix of center:', hp)
radius = wcs.radius()
hps = anutil.healpix_rangesearch_radec(ra, dec, radius, nside)
logmsg('Healpixes in range:', len(hps), ': ', hps)
scale = math.sqrt(2.) * anutil.healpix_side_length_arcmin(nside) * 60. / float(sdsssize)
logmsg('Grabbing SDSS tile with scale', scale, 'arcsec/pix')
size = [int(image_scale*wcs.imagew),int(image_scale*wcs.imageh)]
plot = ps.Plotstuff(outformat='png', wcsfn=wcsfn, size=size)
plot.scale_wcs(image_scale)
img = plot.image
img.format = ps.PLOTSTUFF_FORMAT_JPG
img.resample = 1
for hp in hps:
fn = os.path.join(dirnm, '%i.jpg'%hp)
logmsg('Checking for filename', fn)
if not os.path.exists(fn):
ra,dec = anutil.healpix_to_radecdeg(hp, nside, 0.5, 0.5)
logmsg('Healpix center is RA,Dec', ra, dec)
url = ('http://skyservice.pha.jhu.edu/DR8/ImgCutout/getjpeg.aspx?' +
'ra=%f&dec=%f&scale=%f&opt=&width=%i&height=%i' %
(ra, dec, scale, sdsssize, sdsssize))
urllib.urlretrieve(url, fn)
logmsg('Wrote', fn)
swcsfn = os.path.join(dirnm, '%i.wcs'%hp)
logmsg('Checking for WCS', swcsfn)
if not os.path.exists(swcsfn):
# Create WCS header
cd = scale / 3600.
swcs = anutil.Tan(ra, dec, sdsssize/2 + 0.5, sdsssize/2 + 0.5,
-cd, 0, 0, -cd, sdsssize, sdsssize)
swcs.write_to(swcsfn)
logmsg('Wrote WCS to', swcsfn)
img.set_wcs_file(swcsfn, 0)
img.set_file(fn)
plot.plot('image')
if debug_ps is not None:
fn = debug_ps.getnext()
plot.write(fn)
print('Wrote', fn)
if debug_ps is not None:
out = plot.outline
plot.color = 'white'
plot.alpha = 0.25
for hp in hps:
swcsfn = os.path.join(dirnm, '%i.wcs'%hp)
ps.plot_outline_set_wcs_file(out, swcsfn, 0)
plot.plot('outline')
plot.write(fn)
print('Wrote', fn)
plot.write(plotfn)
if __name__ == '__main__':
import logging
from astrometry.util import util as anutil
logging.basicConfig(format='%(message)s',
level=logging.DEBUG)
wcsfn = 'wcs.fits'
outfn = 'sdss.png'
if True:
wcs = anutil.Tan(wcsfn)
scale = 640. / wcs.get_width()
print('Scale', scale)
from astrometry.util.plotutils import *
ps = PlotSequence('sdss')
plot_sdss_image(wcsfn, outfn, image_scale=scale, debug_ps=ps)
|
bsd-3-clause
| -633,198,197,810,560,600
| 32.333333
| 107
| 0.59475
| false
| 3.051106
| false
| false
| false
|
galaxor/Nodewatcher
|
nodewatcher/web/nodes/management/commands/preparedb.py
|
1
|
6030
|
import subprocess
import time
import optparse
import os.path
import traceback
from django.conf import settings
from django.core import management
from django.core import serializers
from django.core.management import base as management_base
from django.core.management import color as management_color
from django.db import connection, transaction
# TODO: Change all prints to self.stdout.write for Django 1.3
class Command(management_base.BaseCommand):
"""
This class defines a command for manage.py which prepares
and initializes the database.
"""
args = "[dump_file]"
help = "Prepare and initialize the database. If optional dump_file is specified it is used to populate the database."
requires_model_validation = False
option_list = management_base.BaseCommand.option_list + (
optparse.make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
)
def handle(self, *args, **options):
"""
Prepares and initializes the database.
"""
verbosity = int(options.get('verbosity', 1))
interactive = options.get('interactive', True)
show_traceback = options.get('traceback', False)
def ensure_success(errcode):
if errcode != 0:
raise management_base.CommandError('Command failed to execute, aborting!')
if len(args) > 1:
raise management_base.CommandError('Too many arguments!')
elif len(args) > 0 and not os.path.exists(args[0]):
raise management_base.CommandError("Given dump file '%s' does not exist!" % args[0])
# Determine the database backend
db_backend = settings.DATABASES['default']['ENGINE']
if db_backend.find('postgresql') != -1:
db_backend = 'postgresql'
elif db_backend.find('sqlite') != -1:
db_backend = 'sqlite'
elif db_backend.find('mysql') != -1:
db_backend = 'mysql'
# Close the connection before continuing since the setup script will
# recreate databases
connection.close()
# TODO: manage.py script could be run somewhere else, with some other working directory
if os.path.isfile('scripts/%s_init.sh' % db_backend):
print "!!! NOTE: A setup script exists for your database. Be sure that it"
print "!!! does what you want! You may have to edit the script and YOU"
print "!!! MUST REVIEW IT! Otherwise the script may bork your installation."
if interactive:
print "Press CTRL + C to abort now."
try:
time.sleep(5)
except KeyboardInterrupt:
raise management_base.CommandError('Aborted by user.')
if verbosity >= 1:
print ">>> Executing database setup script 'scripts/%s_init.sh'..." % db_backend
ensure_success(subprocess.call(["scripts/%s_init.sh" % db_backend, settings.DATABASES['default']['NAME']]))
else:
print "!!! NOTE: This command assumes that you have created and configured"
print "!!! a proper database via settings.py! The database MUST be completely"
print "!!! empty (no tables or sequences should be present). If this is not"
print "!!! the case, this operation WILL FAIL!"
if db_backend == 'postgresql':
print "!!!"
print "!!! You are using a PostgreSQL database. Be sure that you have"
print "!!! installed the IP4R extension or schema sync WILL FAIL!"
print "!!! "
print "!!! More information: http://ip4r.projects.postgresql.org"
print "!!!"
if interactive:
print "Press CTRL + C to abort now."
try:
time.sleep(5)
except KeyboardInterrupt:
raise management_base.CommandError('Aborted by user.')
if len(args) > 0:
options['interactive'] = False # We will populate with our data so no need for asking about admin user
if verbosity >= 1:
print ">>> Performing initial database sync..."
management.call_command("syncdb", **options)
if len(args) < 1:
if verbosity >= 1:
print ">>> Initialization completed."
return
if verbosity >= 1:
print ">>> Performing data cleanup..."
try:
cursor = connection.cursor()
cursor.execute("DELETE FROM auth_group_permissions")
cursor.execute("DELETE FROM auth_group")
cursor.execute("DELETE FROM auth_permission")
cursor.execute("DELETE FROM auth_user")
cursor.execute("DELETE FROM django_content_type")
cursor.execute("DELETE FROM django_site")
cursor.execute("DELETE FROM policy_trafficcontrolclass")
transaction.commit_unless_managed()
except:
raise management_base.CommandError('Data cleanup operation failed, aborting!')
if db_backend == 'mysql':
connection.cursor().execute("SET FOREIGN_KEY_CHECKS = 0")
elif db_backend == 'sqlite':
connection.cursor().execute("PRAGMA foreign_keys = 0")
transaction.commit_unless_managed()
if verbosity >= 1:
print ">>> Importing data from '%s'..." % args[0]
transaction.enter_transaction_management()
transaction.managed(True)
models = set()
try:
count = 0
for holder in serializers.deserialize('json', open(args[0], 'r')):
models.add(holder.object.__class__)
holder.save()
count += 1
if verbosity >= 1:
print "Installed %d object(s)" % count
except:
transaction.rollback()
transaction.leave_transaction_management()
if show_traceback:
traceback.print_exc()
raise management_base.CommandError('Data import operation failed, aborting!')
# Reset sequences
for line in connection.ops.sequence_reset_sql(management_color.no_style(), models):
cursor.execute(line)
transaction.commit()
transaction.leave_transaction_management()
connection.close()
# Additional syncdb for fixture overrides
management.call_command("syncdb", **options)
if verbosity >= 1:
print ">>> Import completed."
|
agpl-3.0
| -1,226,115,389,493,172,000
| 34.680473
| 119
| 0.656053
| false
| 4.276596
| false
| false
| false
|
rmst/chi
|
examples/experimental/bdpg_chains2.py
|
1
|
3857
|
""" Bayesian Determinisitc Policy Gradient evaluated on th
didactic "chain" environment
"""
import tensorflow as tf
from gym import Wrapper
from tensorflow.python.layers.utils import smart_cond
from tensorflow.python.ops.variable_scope import get_local_variable
import chi
from chi import Experiment
from chi import experiment, model
from chi.rl import ReplayMemory
# chi.chi.tf_debug = True
from chi.rl.bdpg import BdpgAgent
from chi.rl.ddpg import DdpgAgent
from chi.rl.util import print_env
@experiment
def bdpg_chains2(self: Experiment, logdir=None, env=1, heads=3, n=50, bootstrap=False, sr=50000):
from tensorflow.contrib import layers
import gym
from gym import spaces
from gym import wrappers
import numpy as np
from tensorflow.contrib.framework import arg_scope
def gym_make(id) -> gym.Env:
return gym.make(id)
chi.set_loglevel('debug')
if env == 0:
import gym_mix
from chi.rl.wrappers import PenalizeAction
env = gym_mix.envs.ChainEnv(n)
env = PenalizeAction(env, .001, 1)
elif env == 1:
# env = gym.make('Pendulum-v0')
env = gym.make('MountainCarContinuous-v0')
if bootstrap:
class Noise(Wrapper):
def __init__(self, env):
super().__init__(env)
self.n = 3
self.observation_space = gym.spaces.Box(
np.concatenate((self.observation_space.low, np.full([self.n], -1))),
np.concatenate((self.observation_space.high, np.full([self.n], 1))))
def _reset(self):
s = super()._reset()
self.noise = np.random.uniform(-1, 1, [self.n])
s = np.concatenate([s, self.noise])
return s
def _step(self, action):
s, r, d, i = super()._step(action)
s = np.concatenate([s, self.noise])
return s, r, d, i
env = Noise(env)
print_env(env)
def pp(x):
# v = get_local_variable('noise', [x.shape[0], 100], initializer=tf.random_normal_initializer)
# y = tf.concat(x, v)
return x
def ac(x):
with tf.name_scope('actor_head'):
x = layers.fully_connected(x, 50, biases_initializer=layers.xavier_initializer())
x = layers.fully_connected(x, 50, biases_initializer=layers.xavier_initializer())
# a = layers.fully_connected(x, env.action_space.shape[0], None, weights_initializer=tf.random_normal_initializer(0, 1e-4))
a = layers.fully_connected(x, env.action_space.shape[0], None)
return a
def cr(x, a):
with tf.name_scope('critic_head'):
x = layers.fully_connected(x, 50, biases_initializer=layers.xavier_initializer())
x = tf.concat([x, a], axis=1)
x = layers.fully_connected(x, 50, biases_initializer=layers.xavier_initializer())
# q = layers.fully_connected(x, 1, None, weights_initializer=tf.random_normal_initializer(0, 1e-4))
q = layers.fully_connected(x, 1, None)
return tf.squeeze(q, 1)
if bootstrap:
agent = DdpgAgent(env, ac, cr, replay_start=sr, noise=lambda a: a)
else:
agent = DdpgAgent(env, ac, cr, replay_start=sr)
threshold = getattr(getattr(env, 'spec', None), 'reward_threshold', None)
for ep in range(100000):
R, info = agent.play_episode()
if ep % 20 == 0:
head = info.get('head')
print(f'Return of episode {ep} after timestep {agent.t}: {R} (head = {head}, threshold = {threshold})')
if ep % 100 == 0 and bootstrap:
pass
#
# @chi.function(logging_policy=lambda _: True)
# def plot():
# # obsp = env.observation_space
# # h = obsp.high
# # l = obsp.low
# # x, y = tf.meshgrid(tf.linspace(l[0], h[0], 100), tf.linspace(l[1], h[1], 100))
# # x = tf.reshape(x, [-1])
# # y = tf.reshape(y, [-1])
# # inp = tf.stack(x, y, axis=1)
#
# x = tf.linspace(0, 30, 100)
# x = tf.py_func(env.batch_features, x, tf.float32, stateful=False)
# s = pp(x)
# a0 = actor(s)
# tf.image
|
mit
| -7,825,289,444,006,958,000
| 30.104839
| 129
| 0.635468
| false
| 3.068417
| false
| false
| false
|
phrack/ShootOFF-legacy
|
training_protocols/shoot_dont_shoot/__init__.py
|
1
|
6290
|
# Copyright (c) 2015 phrack. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import random
import threading
from training_protocols.ITrainingProtocol import ITrainingProtocol
class ShootDontShoot(ITrainingProtocol):
def __init__(self, main_window, protocol_operations, targets):
self._operations = protocol_operations
self._operations.clear_shots()
self._continue_protocol = True
self._arena_dimensions = self._operations.get_projector_arena_dimensions()
self._missed_targets = 0
self._bad_hits = 0
self._current_shoot_targets = []
self._current_dont_shoot_targets = []
self._wait_event = threading.Event()
self._operations.add_shot_list_columns(("Target",), [60])
if not self._operations.projector_arena_visible():
self._operations.say("This protocol only works on the projector arena.")
else:
self._add_targets(self._current_shoot_targets, "training_protocols/shoot_dont_shoot/shoot.target")
self._add_targets(self._current_dont_shoot_targets, "training_protocols/shoot_dont_shoot/dont_shoot.target")
self._operations.show_text_on_feed("missed targets: 0\nbad hits: 0")
self._new_round_thread = Thread(target=self._new_round,
name="new_round_thread")
self._new_round_thread.start()
def _add_targets(self, target_list, name):
# Put up between zero and three targets
target_count = random.randrange(0, 4)
for i in range(0, target_count):
x = random.randrange(0, self._arena_dimensions[0] - 100)
y = random.randrange(0, self._arena_dimensions[1] - 100)
target_list.append(self._operations.add_projector_target(name, x, y))
def shot_listener(self, shot, shot_list_item, is_hit):
return
def hit_listener(self, region, tags, shot, shot_list_item):
if "subtarget" in tags:
target_name = self._operations.get_target_name(region)
if tags["subtarget"] == "shoot":
self._remove_target(target_name)
self._current_shoot_targets.remove(target_name)
self._operations.append_shot_item_values(shot_list_item,
(tags["subtarget"],))
elif tags["subtarget"] == "dont_shoot":
self._remove_target(target_name)
self._current_dont_shoot_targets.remove(target_name)
self._bad_hits += 1
self._operations.append_shot_item_values(shot_list_item,
(tags["subtarget"],))
self._operations.say("Bad shoot!")
def _new_round(self):
# Wait ten seconds before starting another round
self._wait_event.wait(10)
if self._continue_protocol:
missed = len(self._current_shoot_targets)
self._missed_targets += missed
if missed > 0:
self._operations.say("You missed " + str(missed) + " shoot targets.")
self._operations.clear_shots()
message = "missed targets: %d\nbad hits: %d" % (self._missed_targets, self._bad_hits)
self._operations.show_text_on_feed(message)
self._remove_old_targets(self._current_shoot_targets)
self._current_shoot_targets = []
self._remove_old_targets(self._current_dont_shoot_targets)
self._current_dont_shoot_targets = []
self._add_targets(self._current_shoot_targets, "training_protocols/shoot_dont_shoot/shoot.target")
self._add_targets(self._current_dont_shoot_targets, "training_protocols/shoot_dont_shoot/dont_shoot.target")
if self._continue_protocol:
self._new_round()
def _remove_target(self, target_name):
self._operations.delete_projector_target(target_name)
def _remove_old_targets(self, target_list):
for target in target_list:
self._remove_target(target)
def reset(self, targets):
self._missed_targets = 0
self._bad_hits = 0
if not self._operations.projector_arena_visible():
self._operations.say("This protocol only works on the projector arena.")
else:
self._remove_old_targets(self._current_shoot_targets)
self._current_shoot_targets = []
self._remove_old_targets(self._current_dont_shoot_targets)
self._current_dont_shoot_targets = []
self._add_targets(self._current_shoot_targets, "training_protocols/shoot_dont_shoot/shoot.target")
self._add_targets(self._current_dont_shoot_targets, "training_protocols/shoot_dont_shoot/dont_shoot.target")
message = "missed targets: %d\nbad hits: %d" % (self._missed_targets, self._bad_hits)
self._operations.show_text_on_feed(message)
self._new_round_thread = Thread(target=self._new_round,
name="new_round_thread")
self._new_round_thread.start()
def destroy(self):
self._continue_protocol = False
self._wait_event.set()
self._remove_old_targets(self._current_shoot_targets)
self._remove_old_targets(self._current_dont_shoot_targets)
def get_info():
protocol_info = {}
protocol_info["name"] = "Shoot Don't Shoot"
protocol_info["version"] = "1.0"
protocol_info["creator"] = "phrack"
desc = "This protocol randomly puts up targets and gives you 10 seconds"
desc += "to decide which ones to shoot and which ones to ignore. If "
desc += "you do not shoot a target you are supposed to shoot, it gets "
desc += "added to your missed targets counter and the protocol says "
desc += "how many targets you missed. If you hit a target you were not "
desc += "supposed to hit, the protocol says 'bad shoot!'. Shoot the targets "
desc += "with the red ring, don't shoot the other targets."
protocol_info["description"] = desc
return protocol_info
def load(main_window, protocol_operations, targets):
return ShootDontShoot(main_window, protocol_operations, targets)
|
bsd-3-clause
| 5,913,113,314,758,436,000
| 42.082192
| 122
| 0.619237
| false
| 3.755224
| false
| false
| false
|
yiannist/ganeti
|
lib/server/noded.py
|
1
|
42846
|
#
#
# Copyright (C) 2006, 2007, 2010, 2011, 2012, 2014 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Ganeti node daemon"""
# pylint: disable=C0103
# C0103: Functions in this module need to have a given name structure,
# and the name of the daemon doesn't match
import os
import sys
import logging
import signal
import codecs
from optparse import OptionParser
from ganeti import backend
from ganeti import constants
from ganeti import objects
from ganeti import errors
from ganeti import jstore
from ganeti import daemon
from ganeti import http
from ganeti import utils
from ganeti.storage import container
from ganeti import serializer
from ganeti import netutils
from ganeti import pathutils
from ganeti import ssconf
import ganeti.http.server # pylint: disable=W0611
queue_lock = None
def _extendReasonTrail(trail, source, reason=""):
"""Extend the reason trail with noded information
The trail is extended by appending the name of the noded functionality
"""
assert trail is not None
trail_source = "%s:%s" % (constants.OPCODE_REASON_SRC_NODED, source)
trail.append((trail_source, reason, utils.EpochNano()))
def _PrepareQueueLock():
"""Try to prepare the queue lock.
@return: None for success, otherwise an exception object
"""
global queue_lock # pylint: disable=W0603
if queue_lock is not None:
return None
# Prepare job queue
try:
queue_lock = jstore.InitAndVerifyQueue(must_lock=False)
return None
except EnvironmentError, err:
return err
def _RequireJobQueueLock(fn):
"""Decorator for job queue manipulating functions.
"""
QUEUE_LOCK_TIMEOUT = 10
def wrapper(*args, **kwargs):
# Locking in exclusive, blocking mode because there could be several
# children running at the same time. Waiting up to 10 seconds.
if _PrepareQueueLock() is not None:
raise errors.JobQueueError("Job queue failed initialization,"
" cannot update jobs")
queue_lock.Exclusive(blocking=True, timeout=QUEUE_LOCK_TIMEOUT)
try:
return fn(*args, **kwargs)
finally:
queue_lock.Unlock()
return wrapper
def _DecodeImportExportIO(ieio, ieioargs):
"""Decodes import/export I/O information.
"""
if ieio == constants.IEIO_RAW_DISK:
assert len(ieioargs) == 1
return (objects.Disk.FromDict(ieioargs[0]), )
if ieio == constants.IEIO_SCRIPT:
assert len(ieioargs) == 2
return (objects.Disk.FromDict(ieioargs[0]), ieioargs[1])
return ieioargs
def _DefaultAlternative(value, default):
"""Returns value or, if evaluating to False, a default value.
Returns the given value, unless it evaluates to False. In the latter case the
default value is returned.
@param value: Value to return if it doesn't evaluate to False
@param default: Default value
@return: Given value or the default
"""
if value:
return value
return default
class MlockallRequestExecutor(http.server.HttpServerRequestExecutor):
"""Subclass ensuring request handlers are locked in RAM.
"""
def __init__(self, *args, **kwargs):
utils.Mlockall()
http.server.HttpServerRequestExecutor.__init__(self, *args, **kwargs)
class NodeRequestHandler(http.server.HttpServerHandler):
"""The server implementation.
This class holds all methods exposed over the RPC interface.
"""
# too many public methods, and unused args - all methods get params
# due to the API
# pylint: disable=R0904,W0613
def __init__(self):
http.server.HttpServerHandler.__init__(self)
self.noded_pid = os.getpid()
def HandleRequest(self, req):
"""Handle a request.
"""
if req.request_method.upper() != http.HTTP_POST:
raise http.HttpBadRequest("Only the POST method is supported")
path = req.request_path
if path.startswith("/"):
path = path[1:]
method = getattr(self, "perspective_%s" % path, None)
if method is None:
raise http.HttpNotFound()
try:
result = (True, method(serializer.LoadJson(req.request_body)))
except backend.RPCFail, err:
# our custom failure exception; str(err) works fine if the
# exception was constructed with a single argument, and in
# this case, err.message == err.args[0] == str(err)
result = (False, str(err))
except errors.QuitGanetiException, err:
# Tell parent to quit
logging.info("Shutting down the node daemon, arguments: %s",
str(err.args))
os.kill(self.noded_pid, signal.SIGTERM)
# And return the error's arguments, which must be already in
# correct tuple format
result = err.args
except Exception, err: # pylint: disable=W0703
logging.exception("Error in RPC call")
result = (False, "Error while executing backend function: %s" % str(err))
return serializer.DumpJson(result)
# the new block devices --------------------------
@staticmethod
def perspective_blockdev_create(params):
"""Create a block device.
"""
(bdev_s, size, owner, on_primary, info, excl_stor) = params
bdev = objects.Disk.FromDict(bdev_s)
if bdev is None:
raise ValueError("can't unserialize data!")
return backend.BlockdevCreate(bdev, size, owner, on_primary, info,
excl_stor)
@staticmethod
def perspective_blockdev_convert(params):
"""Copy data from source block device to target.
"""
disk_src, disk_dest = params
bdev_src = objects.Disk.FromDict(disk_src)
bdev_dest = objects.Disk.FromDict(disk_dest)
return backend.BlockdevConvert(bdev_src, bdev_dest)
@staticmethod
def perspective_blockdev_pause_resume_sync(params):
"""Pause/resume sync of a block device.
"""
disks_s, pause = params
disks = [objects.Disk.FromDict(bdev_s) for bdev_s in disks_s]
return backend.BlockdevPauseResumeSync(disks, pause)
@staticmethod
def perspective_blockdev_image(params):
"""Image a block device.
"""
bdev_s, image, size = params
bdev = objects.Disk.FromDict(bdev_s)
return backend.BlockdevImage(bdev, image, size)
@staticmethod
def perspective_blockdev_wipe(params):
"""Wipe a block device.
"""
bdev_s, offset, size = params
bdev = objects.Disk.FromDict(bdev_s)
return backend.BlockdevWipe(bdev, offset, size)
@staticmethod
def perspective_blockdev_remove(params):
"""Remove a block device.
"""
bdev_s = params[0]
bdev = objects.Disk.FromDict(bdev_s)
return backend.BlockdevRemove(bdev)
@staticmethod
def perspective_blockdev_rename(params):
"""Remove a block device.
"""
devlist = [(objects.Disk.FromDict(ds), uid) for ds, uid in params[0]]
return backend.BlockdevRename(devlist)
@staticmethod
def perspective_blockdev_assemble(params):
"""Assemble a block device.
"""
bdev_s, idict, on_primary, idx = params
bdev = objects.Disk.FromDict(bdev_s)
instance = objects.Instance.FromDict(idict)
if bdev is None:
raise ValueError("can't unserialize data!")
return backend.BlockdevAssemble(bdev, instance, on_primary, idx)
@staticmethod
def perspective_blockdev_shutdown(params):
"""Shutdown a block device.
"""
bdev_s = params[0]
bdev = objects.Disk.FromDict(bdev_s)
if bdev is None:
raise ValueError("can't unserialize data!")
return backend.BlockdevShutdown(bdev)
@staticmethod
def perspective_blockdev_addchildren(params):
"""Add a child to a mirror device.
Note: this is only valid for mirror devices. It's the caller's duty
to send a correct disk, otherwise we raise an error.
"""
bdev_s, ndev_s = params
bdev = objects.Disk.FromDict(bdev_s)
ndevs = [objects.Disk.FromDict(disk_s) for disk_s in ndev_s]
if bdev is None or ndevs.count(None) > 0:
raise ValueError("can't unserialize data!")
return backend.BlockdevAddchildren(bdev, ndevs)
@staticmethod
def perspective_blockdev_removechildren(params):
"""Remove a child from a mirror device.
This is only valid for mirror devices, of course. It's the callers
duty to send a correct disk, otherwise we raise an error.
"""
bdev_s, ndev_s = params
bdev = objects.Disk.FromDict(bdev_s)
ndevs = [objects.Disk.FromDict(disk_s) for disk_s in ndev_s]
if bdev is None or ndevs.count(None) > 0:
raise ValueError("can't unserialize data!")
return backend.BlockdevRemovechildren(bdev, ndevs)
@staticmethod
def perspective_blockdev_getmirrorstatus(params):
"""Return the mirror status for a list of disks.
"""
disks = [objects.Disk.FromDict(dsk_s)
for dsk_s in params[0]]
return [status.ToDict()
for status in backend.BlockdevGetmirrorstatus(disks)]
@staticmethod
def perspective_blockdev_getmirrorstatus_multi(params):
"""Return the mirror status for a list of disks.
"""
(node_disks, ) = params
disks = [objects.Disk.FromDict(dsk_s) for dsk_s in node_disks]
result = []
for (success, status) in backend.BlockdevGetmirrorstatusMulti(disks):
if success:
result.append((success, status.ToDict()))
else:
result.append((success, status))
return result
@staticmethod
def perspective_blockdev_find(params):
"""Expose the FindBlockDevice functionality for a disk.
This will try to find but not activate a disk.
"""
disk = objects.Disk.FromDict(params[0])
result = backend.BlockdevFind(disk)
if result is None:
return None
return result.ToDict()
@staticmethod
def perspective_blockdev_snapshot(params):
"""Create a snapshot device.
Note that this is only valid for LVM and ExtStorage disks, if we get passed
something else we raise an exception. The snapshot device can be
remove by calling the generic block device remove call.
"""
(disk, snap_name, snap_size) = params
cfbd = objects.Disk.FromDict(disk)
return backend.BlockdevSnapshot(cfbd, snap_name, snap_size)
@staticmethod
def perspective_blockdev_grow(params):
"""Grow a stack of devices.
"""
if len(params) < 5:
raise ValueError("Received only %s parameters in blockdev_grow,"
" old master?" % len(params))
cfbd = objects.Disk.FromDict(params[0])
amount = params[1]
dryrun = params[2]
backingstore = params[3]
excl_stor = params[4]
return backend.BlockdevGrow(cfbd, amount, dryrun, backingstore, excl_stor)
@staticmethod
def perspective_blockdev_close(params):
"""Closes the given block devices.
"""
disks = [objects.Disk.FromDict(cf) for cf in params[1]]
return backend.BlockdevClose(params[0], disks)
@staticmethod
def perspective_blockdev_open(params):
"""Opens the given block devices.
"""
disks = [objects.Disk.FromDict(cf) for cf in params[1]]
exclusive = params[2]
return backend.BlockdevOpen(params[0], disks, exclusive)
@staticmethod
def perspective_blockdev_getdimensions(params):
"""Compute the sizes of the given block devices.
"""
disks = [objects.Disk.FromDict(cf) for cf in params[0]]
return backend.BlockdevGetdimensions(disks)
@staticmethod
def perspective_blockdev_setinfo(params):
"""Sets metadata information on the given block device.
"""
(disk, info) = params
disk = objects.Disk.FromDict(disk)
return backend.BlockdevSetInfo(disk, info)
# blockdev/drbd specific methods ----------
@staticmethod
def perspective_drbd_disconnect_net(params):
"""Disconnects the network connection of drbd disks.
Note that this is only valid for drbd disks, so the members of the
disk list must all be drbd devices.
"""
(disks,) = params
disks = [objects.Disk.FromDict(disk) for disk in disks]
return backend.DrbdDisconnectNet(disks)
@staticmethod
def perspective_drbd_attach_net(params):
"""Attaches the network connection of drbd disks.
Note that this is only valid for drbd disks, so the members of the
disk list must all be drbd devices.
"""
disks, multimaster = params
disks = [objects.Disk.FromDict(disk) for disk in disks]
return backend.DrbdAttachNet(disks, multimaster)
@staticmethod
def perspective_drbd_wait_sync(params):
"""Wait until DRBD disks are synched.
Note that this is only valid for drbd disks, so the members of the
disk list must all be drbd devices.
"""
(disks,) = params
disks = [objects.Disk.FromDict(disk) for disk in disks]
return backend.DrbdWaitSync(disks)
@staticmethod
def perspective_drbd_needs_activation(params):
"""Checks if the drbd devices need activation
Note that this is only valid for drbd disks, so the members of the
disk list must all be drbd devices.
"""
(disks,) = params
disks = [objects.Disk.FromDict(disk) for disk in disks]
return backend.DrbdNeedsActivation(disks)
@staticmethod
def perspective_drbd_helper(_):
"""Query drbd helper.
"""
return backend.GetDrbdUsermodeHelper()
# export/import --------------------------
@staticmethod
def perspective_finalize_export(params):
"""Expose the finalize export functionality.
"""
instance = objects.Instance.FromDict(params[0])
snap_disks = []
for disk in params[1]:
if isinstance(disk, bool):
snap_disks.append(disk)
else:
snap_disks.append(objects.Disk.FromDict(disk))
return backend.FinalizeExport(instance, snap_disks)
@staticmethod
def perspective_export_info(params):
"""Query information about an existing export on this node.
The given path may not contain an export, in which case we return
None.
"""
path = params[0]
return backend.ExportInfo(path)
@staticmethod
def perspective_export_list(params):
"""List the available exports on this node.
Note that as opposed to export_info, which may query data about an
export in any path, this only queries the standard Ganeti path
(pathutils.EXPORT_DIR).
"""
return backend.ListExports()
@staticmethod
def perspective_export_remove(params):
"""Remove an export.
"""
export = params[0]
return backend.RemoveExport(export)
# block device ---------------------
@staticmethod
def perspective_bdev_sizes(params):
"""Query the list of block devices
"""
devices = params[0]
return backend.GetBlockDevSizes(devices)
# volume --------------------------
@staticmethod
def perspective_lv_list(params):
"""Query the list of logical volumes in a given volume group.
"""
vgname = params[0]
return backend.GetVolumeList(vgname)
@staticmethod
def perspective_vg_list(params):
"""Query the list of volume groups.
"""
return backend.ListVolumeGroups()
# Storage --------------------------
@staticmethod
def perspective_storage_list(params):
"""Get list of storage units.
"""
(su_name, su_args, name, fields) = params
return container.GetStorage(su_name, *su_args).List(name, fields)
@staticmethod
def perspective_storage_modify(params):
"""Modify a storage unit.
"""
(su_name, su_args, name, changes) = params
return container.GetStorage(su_name, *su_args).Modify(name, changes)
@staticmethod
def perspective_storage_execute(params):
"""Execute an operation on a storage unit.
"""
(su_name, su_args, name, op) = params
return container.GetStorage(su_name, *su_args).Execute(name, op)
# bridge --------------------------
@staticmethod
def perspective_bridges_exist(params):
"""Check if all bridges given exist on this node.
"""
bridges_list = params[0]
return backend.BridgesExist(bridges_list)
# instance --------------------------
@staticmethod
def perspective_instance_os_add(params):
"""Install an OS on a given instance.
"""
inst_s = params[0]
inst = objects.Instance.FromDict(inst_s)
reinstall = params[1]
debug = params[2]
return backend.InstanceOsAdd(inst, reinstall, debug)
@staticmethod
def perspective_instance_run_rename(params):
"""Runs the OS rename script for an instance.
"""
inst_s, old_name, debug = params
inst = objects.Instance.FromDict(inst_s)
return backend.RunRenameInstance(inst, old_name, debug)
@staticmethod
def perspective_instance_shutdown(params):
"""Shutdown an instance.
"""
instance = objects.Instance.FromDict(params[0])
timeout = params[1]
trail = params[2]
_extendReasonTrail(trail, "shutdown")
return backend.InstanceShutdown(instance, timeout, trail)
@staticmethod
def perspective_instance_start(params):
"""Start an instance.
"""
(instance_name, startup_paused, trail) = params
instance = objects.Instance.FromDict(instance_name)
_extendReasonTrail(trail, "start")
return backend.StartInstance(instance, startup_paused, trail)
@staticmethod
def perspective_hotplug_device(params):
"""Hotplugs device to a running instance.
"""
(idict, action, dev_type, ddict, extra, seq) = params
instance = objects.Instance.FromDict(idict)
if dev_type == constants.HOTPLUG_TARGET_DISK:
device = objects.Disk.FromDict(ddict)
elif dev_type == constants.HOTPLUG_TARGET_NIC:
device = objects.NIC.FromDict(ddict)
else:
assert dev_type in constants.HOTPLUG_ALL_TARGETS
return backend.HotplugDevice(instance, action, dev_type, device, extra, seq)
@staticmethod
def perspective_hotplug_supported(params):
"""Checks if hotplug is supported.
"""
instance = objects.Instance.FromDict(params[0])
return backend.HotplugSupported(instance)
@staticmethod
def perspective_instance_metadata_modify(params):
"""Modify instance metadata.
"""
instance = params[0]
return backend.ModifyInstanceMetadata(instance)
@staticmethod
def perspective_migration_info(params):
"""Gather information about an instance to be migrated.
"""
instance = objects.Instance.FromDict(params[0])
return backend.MigrationInfo(instance)
@staticmethod
def perspective_accept_instance(params):
"""Prepare the node to accept an instance.
"""
instance, info, target = params
instance = objects.Instance.FromDict(instance)
return backend.AcceptInstance(instance, info, target)
@staticmethod
def perspective_instance_finalize_migration_dst(params):
"""Finalize the instance migration on the destination node.
"""
instance, info, success = params
instance = objects.Instance.FromDict(instance)
return backend.FinalizeMigrationDst(instance, info, success)
@staticmethod
def perspective_instance_migrate(params):
"""Migrates an instance.
"""
cluster_name, instance, target, live = params
instance = objects.Instance.FromDict(instance)
return backend.MigrateInstance(cluster_name, instance, target, live)
@staticmethod
def perspective_instance_start_postcopy(params):
""" Switches a migrating instance from precopy to postcopy mode
"""
instance, = params
instance = objects.Instance.FromDict(instance)
return backend.StartPostcopy(instance)
@staticmethod
def perspective_instance_finalize_migration_src(params):
"""Finalize the instance migration on the source node.
"""
instance, success, live = params
instance = objects.Instance.FromDict(instance)
return backend.FinalizeMigrationSource(instance, success, live)
@staticmethod
def perspective_instance_get_migration_status(params):
"""Reports migration status.
"""
instance = objects.Instance.FromDict(params[0])
return backend.GetMigrationStatus(instance).ToDict()
@staticmethod
def perspective_instance_reboot(params):
"""Reboot an instance.
"""
instance = objects.Instance.FromDict(params[0])
reboot_type = params[1]
shutdown_timeout = params[2]
trail = params[3]
_extendReasonTrail(trail, "reboot")
return backend.InstanceReboot(instance, reboot_type, shutdown_timeout,
trail)
@staticmethod
def perspective_instance_balloon_memory(params):
"""Modify instance runtime memory.
"""
instance_dict, memory = params
instance = objects.Instance.FromDict(instance_dict)
return backend.InstanceBalloonMemory(instance, memory)
@staticmethod
def perspective_instance_info(params):
"""Query instance information.
"""
(instance_name, hypervisor_name, hvparams) = params
return backend.GetInstanceInfo(instance_name, hypervisor_name, hvparams)
@staticmethod
def perspective_instance_migratable(params):
"""Query whether the specified instance can be migrated.
"""
instance = objects.Instance.FromDict(params[0])
return backend.GetInstanceMigratable(instance)
@staticmethod
def perspective_all_instances_info(params):
"""Query information about all instances.
"""
(hypervisor_list, all_hvparams) = params
return backend.GetAllInstancesInfo(hypervisor_list, all_hvparams)
@staticmethod
def perspective_instance_console_info(params):
"""Query information on how to get console access to instances
"""
return backend.GetInstanceConsoleInfo(params)
@staticmethod
def perspective_instance_list(params):
"""Query the list of running instances.
"""
(hypervisor_list, hvparams) = params
return backend.GetInstanceList(hypervisor_list, hvparams)
# node --------------------------
@staticmethod
def perspective_node_has_ip_address(params):
"""Checks if a node has the given ip address.
"""
return netutils.IPAddress.Own(params[0])
@staticmethod
def perspective_node_info(params):
"""Query node information.
"""
(storage_units, hv_specs) = params
return backend.GetNodeInfo(storage_units, hv_specs)
@staticmethod
def perspective_etc_hosts_modify(params):
"""Modify a node entry in /etc/hosts.
"""
backend.EtcHostsModify(params[0], params[1], params[2])
return True
@staticmethod
def perspective_node_verify(params):
"""Run a verify sequence on this node.
"""
(what, cluster_name, hvparams) = params
return backend.VerifyNode(what, cluster_name, hvparams)
@classmethod
def perspective_node_verify_light(cls, params):
"""Run a light verify sequence on this node.
This call is meant to perform a less strict verification of the node in
certain situations. Right now, it is invoked only when a node is just about
to be added to a cluster, and even then, it performs the same checks as
L{perspective_node_verify}.
"""
return cls.perspective_node_verify(params)
@staticmethod
def perspective_node_start_master_daemons(params):
"""Start the master daemons on this node.
"""
return backend.StartMasterDaemons(params[0])
@staticmethod
def perspective_node_activate_master_ip(params):
"""Activate the master IP on this node.
"""
master_params = objects.MasterNetworkParameters.FromDict(params[0])
return backend.ActivateMasterIp(master_params, params[1])
@staticmethod
def perspective_node_deactivate_master_ip(params):
"""Deactivate the master IP on this node.
"""
master_params = objects.MasterNetworkParameters.FromDict(params[0])
return backend.DeactivateMasterIp(master_params, params[1])
@staticmethod
def perspective_node_stop_master(params):
"""Stops master daemons on this node.
"""
return backend.StopMasterDaemons()
@staticmethod
def perspective_node_change_master_netmask(params):
"""Change the master IP netmask.
"""
return backend.ChangeMasterNetmask(params[0], params[1], params[2],
params[3])
@staticmethod
def perspective_node_leave_cluster(params):
"""Cleanup after leaving a cluster.
"""
return backend.LeaveCluster(params[0])
@staticmethod
def perspective_node_volumes(params):
"""Query the list of all logical volume groups.
"""
return backend.NodeVolumes()
@staticmethod
def perspective_node_demote_from_mc(params):
"""Demote a node from the master candidate role.
"""
return backend.DemoteFromMC()
@staticmethod
def perspective_node_powercycle(params):
"""Tries to powercycle the node.
"""
(hypervisor_type, hvparams) = params
return backend.PowercycleNode(hypervisor_type, hvparams)
@staticmethod
def perspective_node_configure_ovs(params):
"""Sets up OpenvSwitch on the node.
"""
(ovs_name, ovs_link) = params
return backend.ConfigureOVS(ovs_name, ovs_link)
@staticmethod
def perspective_node_crypto_tokens(params):
"""Gets the node's public crypto tokens.
"""
token_requests = params[0]
return backend.GetCryptoTokens(token_requests)
@staticmethod
def perspective_node_ensure_daemon(params):
"""Ensure daemon is running.
"""
(daemon_name, run) = params
return backend.EnsureDaemon(daemon_name, run)
@staticmethod
def perspective_node_ssh_key_add(params):
"""Distributes a new node's SSH key if authorized.
"""
(node_uuid, node_name, potential_master_candidates,
to_authorized_keys, to_public_keys, get_public_keys) = params
return backend.AddNodeSshKey(node_uuid, node_name,
potential_master_candidates,
to_authorized_keys=to_authorized_keys,
to_public_keys=to_public_keys,
get_public_keys=get_public_keys)
@staticmethod
def perspective_node_ssh_keys_renew(params):
"""Generates a new root SSH key pair on the node.
"""
(node_uuids, node_names, master_candidate_uuids,
potential_master_candidates, old_key_type, new_key_type,
new_key_bits) = params
return backend.RenewSshKeys(node_uuids, node_names, master_candidate_uuids,
potential_master_candidates, old_key_type,
new_key_type, new_key_bits)
@staticmethod
def perspective_node_ssh_key_remove(params):
"""Removes a node's SSH key from the other nodes' SSH files.
"""
(node_uuid, node_name,
master_candidate_uuids, potential_master_candidates,
from_authorized_keys, from_public_keys, clear_authorized_keys,
clear_public_keys, readd) = params
return backend.RemoveNodeSshKey(node_uuid, node_name,
master_candidate_uuids,
potential_master_candidates,
from_authorized_keys=from_authorized_keys,
from_public_keys=from_public_keys,
clear_authorized_keys=clear_authorized_keys,
clear_public_keys=clear_public_keys,
readd=readd)
# cluster --------------------------
@staticmethod
def perspective_version(params):
"""Query version information.
"""
return constants.PROTOCOL_VERSION
@staticmethod
def perspective_upload_file(params):
"""Upload a file.
Note that the backend implementation imposes strict rules on which
files are accepted.
"""
return backend.UploadFile(*(params[0]))
@staticmethod
def perspective_upload_file_single(params):
"""Upload a file.
Note that the backend implementation imposes strict rules on which
files are accepted.
"""
return backend.UploadFile(*params)
@staticmethod
def perspective_master_node_name(params):
"""Returns the master node name.
"""
return backend.GetMasterNodeName()
@staticmethod
def perspective_run_oob(params):
"""Runs oob on node.
"""
output = backend.RunOob(params[0], params[1], params[2], params[3])
if output:
result = serializer.LoadJson(output)
else:
result = None
return result
@staticmethod
def perspective_restricted_command(params):
"""Runs a restricted command.
"""
(cmd, ) = params
return backend.RunRestrictedCmd(cmd)
@staticmethod
def perspective_write_ssconf_files(params):
"""Write ssconf files.
"""
(values,) = params
return ssconf.WriteSsconfFiles(values)
@staticmethod
def perspective_get_watcher_pause(params):
"""Get watcher pause end.
"""
return utils.ReadWatcherPauseFile(pathutils.WATCHER_PAUSEFILE)
@staticmethod
def perspective_set_watcher_pause(params):
"""Set watcher pause.
"""
(until, ) = params
return backend.SetWatcherPause(until)
@staticmethod
def perspective_get_file_info(params):
"""Get info on whether a file exists and its properties.
"""
(path, ) = params
return backend.GetFileInfo(path)
# os -----------------------
@staticmethod
def perspective_os_diagnose(params):
"""Query detailed information about existing OSes.
"""
return backend.DiagnoseOS()
@staticmethod
def perspective_os_validate(params):
"""Run a given OS' validation routine.
"""
required, name, checks, params, force_variant = params
return backend.ValidateOS(required, name, checks, params, force_variant)
@staticmethod
def perspective_os_export(params):
"""Export an OS definition into an instance specific package.
"""
instance = objects.Instance.FromDict(params[0])
override_env = params[1]
return backend.ExportOS(instance, override_env)
# extstorage -----------------------
@staticmethod
def perspective_extstorage_diagnose(params):
"""Query detailed information about existing extstorage providers.
"""
return backend.DiagnoseExtStorage()
# hooks -----------------------
@staticmethod
def perspective_hooks_runner(params):
"""Run hook scripts.
"""
hpath, phase, env = params
hr = backend.HooksRunner()
return hr.RunHooks(hpath, phase, env)
# iallocator -----------------
@staticmethod
def perspective_iallocator_runner(params):
"""Run an iallocator script.
"""
name, idata, ial_params_dict = params
ial_params = []
for ial_param in ial_params_dict.items():
if ial_param[1] is not None:
ial_params.append("--" + ial_param[0] + "=" + ial_param[1])
else:
ial_params.append("--" + ial_param[0])
iar = backend.IAllocatorRunner()
return iar.Run(name, idata, ial_params)
# test -----------------------
@staticmethod
def perspective_test_delay(params):
"""Run test delay.
"""
duration = params[0]
status, rval = utils.TestDelay(duration)
if not status:
raise backend.RPCFail(rval)
return rval
# file storage ---------------
@staticmethod
def perspective_file_storage_dir_create(params):
"""Create the file storage directory.
"""
file_storage_dir = params[0]
return backend.CreateFileStorageDir(file_storage_dir)
@staticmethod
def perspective_file_storage_dir_remove(params):
"""Remove the file storage directory.
"""
file_storage_dir = params[0]
return backend.RemoveFileStorageDir(file_storage_dir)
@staticmethod
def perspective_file_storage_dir_rename(params):
"""Rename the file storage directory.
"""
old_file_storage_dir = params[0]
new_file_storage_dir = params[1]
return backend.RenameFileStorageDir(old_file_storage_dir,
new_file_storage_dir)
# jobs ------------------------
@staticmethod
@_RequireJobQueueLock
def perspective_jobqueue_update(params):
"""Update job queue.
"""
(file_name, content) = params
return backend.JobQueueUpdate(file_name, content)
@staticmethod
@_RequireJobQueueLock
def perspective_jobqueue_purge(params):
"""Purge job queue.
"""
return backend.JobQueuePurge()
@staticmethod
@_RequireJobQueueLock
def perspective_jobqueue_rename(params):
"""Rename a job queue file.
"""
# TODO: What if a file fails to rename?
return [backend.JobQueueRename(old, new) for old, new in params[0]]
@staticmethod
@_RequireJobQueueLock
def perspective_jobqueue_set_drain_flag(params):
"""Set job queue's drain flag.
"""
(flag, ) = params
return jstore.SetDrainFlag(flag)
# hypervisor ---------------
@staticmethod
def perspective_hypervisor_validate_params(params):
"""Validate the hypervisor parameters.
"""
(hvname, hvparams) = params
return backend.ValidateHVParams(hvname, hvparams)
# Crypto
@staticmethod
def perspective_x509_cert_create(params):
"""Creates a new X509 certificate for SSL/TLS.
"""
(validity, ) = params
return backend.CreateX509Certificate(validity)
@staticmethod
def perspective_x509_cert_remove(params):
"""Removes a X509 certificate.
"""
(name, ) = params
return backend.RemoveX509Certificate(name)
# Import and export
@staticmethod
def perspective_import_start(params):
"""Starts an import daemon.
"""
(opts_s, instance, component, (dest, dest_args)) = params
opts = objects.ImportExportOptions.FromDict(opts_s)
return backend.StartImportExportDaemon(constants.IEM_IMPORT, opts,
None, None,
objects.Instance.FromDict(instance),
component, dest,
_DecodeImportExportIO(dest,
dest_args))
@staticmethod
def perspective_export_start(params):
"""Starts an export daemon.
"""
(opts_s, host, port, instance, component, (source, source_args)) = params
opts = objects.ImportExportOptions.FromDict(opts_s)
return backend.StartImportExportDaemon(constants.IEM_EXPORT, opts,
host, port,
objects.Instance.FromDict(instance),
component, source,
_DecodeImportExportIO(source,
source_args))
@staticmethod
def perspective_impexp_status(params):
"""Retrieves the status of an import or export daemon.
"""
return backend.GetImportExportStatus(params[0])
@staticmethod
def perspective_impexp_abort(params):
"""Aborts an import or export.
"""
return backend.AbortImportExport(params[0])
@staticmethod
def perspective_impexp_cleanup(params):
"""Cleans up after an import or export.
"""
return backend.CleanupImportExport(params[0])
def CheckNoded(options, args):
"""Initial checks whether to run or exit with a failure.
"""
if args: # noded doesn't take any arguments
print >> sys.stderr, ("Usage: %s [-f] [-d] [-p port] [-b ADDRESS]" %
sys.argv[0])
sys.exit(constants.EXIT_FAILURE)
if options.max_clients < 1:
print >> sys.stderr, ("%s --max-clients argument must be >= 1" %
sys.argv[0])
sys.exit(constants.EXIT_FAILURE)
try:
codecs.lookup("string-escape")
except LookupError:
print >> sys.stderr, ("Can't load the string-escape code which is part"
" of the Python installation. Is your installation"
" complete/correct? Aborting.")
sys.exit(constants.EXIT_FAILURE)
def SSLVerifyPeer(conn, cert, errnum, errdepth, ok):
"""Callback function to verify a peer against the candidate cert map.
Note that we have a chicken-and-egg problem during cluster init and upgrade.
This method checks whether the incoming connection comes from a master
candidate by comparing it to the master certificate map in the cluster
configuration. However, during cluster init and cluster upgrade there
are various RPC calls done to the master node itself, before the candidate
certificate list is established and the cluster configuration is written.
In this case, we cannot check against the master candidate map.
This problem is solved by checking whether the candidate map is empty. An
initialized 2.11 or higher cluster has at least one entry for the master
node in the candidate map. If the map is empty, we know that we are still
in the bootstrap/upgrade phase. In this case, we read the server certificate
digest and compare it to the incoming request.
This means that after an upgrade of Ganeti, the system continues to operate
like before, using server certificates only. After the client certificates
are generated with ``gnt-cluster renew-crypto --new-node-certificates``,
RPC communication is switched to using client certificates and the trick of
using server certificates does not work anymore.
@type conn: C{OpenSSL.SSL.Connection}
@param conn: the OpenSSL connection object
@type cert: C{OpenSSL.X509}
@param cert: the peer's SSL certificate
@type errdepth: integer
@param errdepth: number of the step in the certificate chain starting at 0
for the actual client certificate.
"""
# some parameters are unused, but this is the API
# pylint: disable=W0613
# If we receive a certificate from the certificate chain that is higher
# than the lowest element of the chain, we have to check it against the
# server certificate.
if errdepth > 0:
server_digest = utils.GetCertificateDigest(
cert_filename=pathutils.NODED_CERT_FILE)
match = cert.digest("sha1") == server_digest
if not match:
logging.debug("Received certificate from the certificate chain, which"
" does not match the server certficate. Digest of the"
" received certificate: %s. Digest of the server"
" certificate: %s.", cert.digest("sha1"), server_digest)
return match
elif errdepth == 0:
sstore = ssconf.SimpleStore()
try:
candidate_certs = sstore.GetMasterCandidatesCertMap()
except errors.ConfigurationError:
logging.info("No candidate certificates found. Switching to "
"bootstrap/update mode.")
candidate_certs = None
if not candidate_certs:
candidate_certs = {
constants.CRYPTO_BOOTSTRAP: utils.GetCertificateDigest(
cert_filename=pathutils.NODED_CERT_FILE)}
match = cert.digest("sha1") in candidate_certs.values()
if not match:
logging.debug("Received certificate which is not a certificate of a"
" master candidate. Certificate digest: %s. List of master"
" candidate certificate digests: %s.", cert.digest("sha1"),
str(candidate_certs))
return match
else:
logging.error("Invalid errdepth value: %s.", errdepth)
return False
def PrepNoded(options, _):
"""Preparation node daemon function, executed with the PID file held.
"""
if options.mlock:
request_executor_class = MlockallRequestExecutor
try:
utils.Mlockall()
except errors.NoCtypesError:
logging.warning("Cannot set memory lock, ctypes module not found")
request_executor_class = http.server.HttpServerRequestExecutor
else:
request_executor_class = http.server.HttpServerRequestExecutor
# Read SSL certificate
if options.ssl:
ssl_params = http.HttpSslParams(ssl_key_path=options.ssl_key,
ssl_cert_path=options.ssl_cert)
else:
ssl_params = None
err = _PrepareQueueLock()
if err is not None:
# this might be some kind of file-system/permission error; while
# this breaks the job queue functionality, we shouldn't prevent
# startup of the whole node daemon because of this
logging.critical("Can't init/verify the queue, proceeding anyway: %s", err)
handler = NodeRequestHandler()
mainloop = daemon.Mainloop()
server = http.server.HttpServer(
mainloop, options.bind_address, options.port, options.max_clients,
handler, ssl_params=ssl_params, ssl_verify_peer=True,
request_executor_class=request_executor_class,
ssl_verify_callback=SSLVerifyPeer)
server.Start()
return (mainloop, server)
def ExecNoded(options, args, prep_data): # pylint: disable=W0613
"""Main node daemon function, executed with the PID file held.
"""
(mainloop, server) = prep_data
try:
mainloop.Run()
finally:
server.Stop()
def Main():
"""Main function for the node daemon.
"""
parser = OptionParser(description="Ganeti node daemon",
usage=("%prog [-f] [-d] [-p port] [-b ADDRESS]"
" [-i INTERFACE]"),
version="%%prog (ganeti) %s" %
constants.RELEASE_VERSION)
parser.add_option("--no-mlock", dest="mlock",
help="Do not mlock the node memory in ram",
default=True, action="store_false")
parser.add_option("--max-clients", dest="max_clients",
default=20, type="int",
help="Number of simultaneous connections accepted"
" by noded")
daemon.GenericMain(constants.NODED, parser, CheckNoded, PrepNoded, ExecNoded,
default_ssl_cert=pathutils.NODED_CERT_FILE,
default_ssl_key=pathutils.NODED_CERT_FILE,
console_logging=True,
warn_breach=True)
|
bsd-2-clause
| -8,683,240,034,905,567,000
| 28.346575
| 80
| 0.66697
| false
| 4.023099
| false
| false
| false
|
prefetchnta/questlab
|
bin/x64bin/python/37/Lib/xmlrpc/server.py
|
1
|
37658
|
r"""XML-RPC Servers.
This module can be used to create simple XML-RPC servers
by creating a server and either installing functions, a
class instance, or by extending the SimpleXMLRPCServer
class.
It can also be used to handle XML-RPC requests in a CGI
environment using CGIXMLRPCRequestHandler.
The Doc* classes can be used to create XML-RPC servers that
serve pydoc-style documentation in response to HTTP
GET requests. This documentation is dynamically generated
based on the functions and methods registered with the
server.
A list of possible usage patterns follows:
1. Install functions:
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.serve_forever()
2. Install an instance:
class MyFuncs:
def __init__(self):
# make all of the sys functions available through sys.func_name
import sys
self.sys = sys
def _listMethods(self):
# implement this method so that system.listMethods
# knows to advertise the sys methods
return list_public_methods(self) + \
['sys.' + method for method in list_public_methods(self.sys)]
def pow(self, x, y): return pow(x, y)
def add(self, x, y) : return x + y
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_introspection_functions()
server.register_instance(MyFuncs())
server.serve_forever()
3. Install an instance with custom dispatch method:
class Math:
def _listMethods(self):
# this method must be present for system.listMethods
# to work
return ['add', 'pow']
def _methodHelp(self, method):
# this method must be present for system.methodHelp
# to work
if method == 'add':
return "add(2,3) => 5"
elif method == 'pow':
return "pow(x, y[, z]) => number"
else:
# By convention, return empty
# string if no help is available
return ""
def _dispatch(self, method, params):
if method == 'pow':
return pow(*params)
elif method == 'add':
return params[0] + params[1]
else:
raise ValueError('bad method')
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_introspection_functions()
server.register_instance(Math())
server.serve_forever()
4. Subclass SimpleXMLRPCServer:
class MathServer(SimpleXMLRPCServer):
def _dispatch(self, method, params):
try:
# We are forcing the 'export_' prefix on methods that are
# callable through XML-RPC to prevent potential security
# problems
func = getattr(self, 'export_' + method)
except AttributeError:
raise Exception('method "%s" is not supported' % method)
else:
return func(*params)
def export_add(self, x, y):
return x + y
server = MathServer(("localhost", 8000))
server.serve_forever()
5. CGI script:
server = CGIXMLRPCRequestHandler()
server.register_function(pow)
server.handle_request()
"""
# Written by Brian Quinlan (brian@sweetapp.com).
# Based on code written by Fredrik Lundh.
from xmlrpc.client import Fault, dumps, loads, gzip_encode, gzip_decode
from http.server import BaseHTTPRequestHandler
from functools import partial
from inspect import signature
import html
import http.server
import socketserver
import sys
import os
import re
import pydoc
import traceback
try:
import fcntl
except ImportError:
fcntl = None
def resolve_dotted_attribute(obj, attr, allow_dotted_names=True):
"""resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d
Resolves a dotted attribute name to an object. Raises
an AttributeError if any attribute in the chain starts with a '_'.
If the optional allow_dotted_names argument is false, dots are not
supported and this function operates similar to getattr(obj, attr).
"""
if allow_dotted_names:
attrs = attr.split('.')
else:
attrs = [attr]
for i in attrs:
if i.startswith('_'):
raise AttributeError(
'attempt to access private attribute "%s"' % i
)
else:
obj = getattr(obj,i)
return obj
def list_public_methods(obj):
"""Returns a list of attribute strings, found in the specified
object, which represent callable attributes"""
return [member for member in dir(obj)
if not member.startswith('_') and
callable(getattr(obj, member))]
class SimpleXMLRPCDispatcher:
"""Mix-in class that dispatches XML-RPC requests.
This class is used to register XML-RPC method handlers
and then to dispatch them. This class doesn't need to be
instanced directly when used by SimpleXMLRPCServer but it
can be instanced when used by the MultiPathXMLRPCServer
"""
def __init__(self, allow_none=False, encoding=None,
use_builtin_types=False):
self.funcs = {}
self.instance = None
self.allow_none = allow_none
self.encoding = encoding or 'utf-8'
self.use_builtin_types = use_builtin_types
def register_instance(self, instance, allow_dotted_names=False):
"""Registers an instance to respond to XML-RPC requests.
Only one instance can be installed at a time.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called. Methods beginning with an '_'
are considered private and will not be called by
SimpleXMLRPCServer.
If a registered function matches an XML-RPC request, then it
will be called instead of the registered instance.
If the optional allow_dotted_names argument is true and the
instance does not have a _dispatch method, method names
containing dots are supported and resolved, as long as none of
the name segments start with an '_'.
*** SECURITY WARNING: ***
Enabling the allow_dotted_names options allows intruders
to access your module's global variables and may allow
intruders to execute arbitrary code on your machine. Only
use this option on a secure, closed network.
"""
self.instance = instance
self.allow_dotted_names = allow_dotted_names
def register_function(self, function=None, name=None):
"""Registers a function to respond to XML-RPC requests.
The optional name argument can be used to set a Unicode name
for the function.
"""
# decorator factory
if function is None:
return partial(self.register_function, name=name)
if name is None:
name = function.__name__
self.funcs[name] = function
return function
def register_introspection_functions(self):
"""Registers the XML-RPC introspection methods in the system
namespace.
see http://xmlrpc.usefulinc.com/doc/reserved.html
"""
self.funcs.update({'system.listMethods' : self.system_listMethods,
'system.methodSignature' : self.system_methodSignature,
'system.methodHelp' : self.system_methodHelp})
def register_multicall_functions(self):
"""Registers the XML-RPC multicall method in the system
namespace.
see http://www.xmlrpc.com/discuss/msgReader$1208"""
self.funcs.update({'system.multicall' : self.system_multicall})
def _marshaled_dispatch(self, data, dispatch_method = None, path = None):
"""Dispatches an XML-RPC method from marshalled (XML) data.
XML-RPC methods are dispatched from the marshalled (XML) data
using the _dispatch method and the result is returned as
marshalled data. For backwards compatibility, a dispatch
function can be provided as an argument (see comment in
SimpleXMLRPCRequestHandler.do_POST) but overriding the
existing method through subclassing is the preferred means
of changing method dispatch behavior.
"""
try:
params, method = loads(data, use_builtin_types=self.use_builtin_types)
# generate response
if dispatch_method is not None:
response = dispatch_method(method, params)
else:
response = self._dispatch(method, params)
# wrap response in a singleton tuple
response = (response,)
response = dumps(response, methodresponse=1,
allow_none=self.allow_none, encoding=self.encoding)
except Fault as fault:
response = dumps(fault, allow_none=self.allow_none,
encoding=self.encoding)
except:
# report exception back to server
exc_type, exc_value, exc_tb = sys.exc_info()
try:
response = dumps(
Fault(1, "%s:%s" % (exc_type, exc_value)),
encoding=self.encoding, allow_none=self.allow_none,
)
finally:
# Break reference cycle
exc_type = exc_value = exc_tb = None
return response.encode(self.encoding, 'xmlcharrefreplace')
def system_listMethods(self):
"""system.listMethods() => ['add', 'subtract', 'multiple']
Returns a list of the methods supported by the server."""
methods = set(self.funcs.keys())
if self.instance is not None:
# Instance can implement _listMethod to return a list of
# methods
if hasattr(self.instance, '_listMethods'):
methods |= set(self.instance._listMethods())
# if the instance has a _dispatch method then we
# don't have enough information to provide a list
# of methods
elif not hasattr(self.instance, '_dispatch'):
methods |= set(list_public_methods(self.instance))
return sorted(methods)
def system_methodSignature(self, method_name):
"""system.methodSignature('add') => [double, int, int]
Returns a list describing the signature of the method. In the
above example, the add method takes two integers as arguments
and returns a double result.
This server does NOT support system.methodSignature."""
# See http://xmlrpc.usefulinc.com/doc/sysmethodsig.html
return 'signatures not supported'
def system_methodHelp(self, method_name):
"""system.methodHelp('add') => "Adds two integers together"
Returns a string containing documentation for the specified method."""
method = None
if method_name in self.funcs:
method = self.funcs[method_name]
elif self.instance is not None:
# Instance can implement _methodHelp to return help for a method
if hasattr(self.instance, '_methodHelp'):
return self.instance._methodHelp(method_name)
# if the instance has a _dispatch method then we
# don't have enough information to provide help
elif not hasattr(self.instance, '_dispatch'):
try:
method = resolve_dotted_attribute(
self.instance,
method_name,
self.allow_dotted_names
)
except AttributeError:
pass
# Note that we aren't checking that the method actually
# be a callable object of some kind
if method is None:
return ""
else:
return pydoc.getdoc(method)
def system_multicall(self, call_list):
"""system.multicall([{'methodName': 'add', 'params': [2, 2]}, ...]) => \
[[4], ...]
Allows the caller to package multiple XML-RPC calls into a single
request.
See http://www.xmlrpc.com/discuss/msgReader$1208
"""
results = []
for call in call_list:
method_name = call['methodName']
params = call['params']
try:
# XXX A marshalling error in any response will fail the entire
# multicall. If someone cares they should fix this.
results.append([self._dispatch(method_name, params)])
except Fault as fault:
results.append(
{'faultCode' : fault.faultCode,
'faultString' : fault.faultString}
)
except:
exc_type, exc_value, exc_tb = sys.exc_info()
try:
results.append(
{'faultCode' : 1,
'faultString' : "%s:%s" % (exc_type, exc_value)}
)
finally:
# Break reference cycle
exc_type = exc_value = exc_tb = None
return results
def _dispatch(self, method, params):
"""Dispatches the XML-RPC method.
XML-RPC calls are forwarded to a registered function that
matches the called XML-RPC method name. If no such function
exists then the call is forwarded to the registered instance,
if available.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called.
Methods beginning with an '_' are considered private and will
not be called.
"""
try:
# call the matching registered function
func = self.funcs[method]
except KeyError:
pass
else:
if func is not None:
return func(*params)
raise Exception('method "%s" is not supported' % method)
if self.instance is not None:
if hasattr(self.instance, '_dispatch'):
# call the `_dispatch` method on the instance
return self.instance._dispatch(method, params)
# call the instance's method directly
try:
func = resolve_dotted_attribute(
self.instance,
method,
self.allow_dotted_names
)
except AttributeError:
pass
else:
if func is not None:
return func(*params)
raise Exception('method "%s" is not supported' % method)
class SimpleXMLRPCRequestHandler(BaseHTTPRequestHandler):
"""Simple XML-RPC request handler class.
Handles all HTTP POST requests and attempts to decode them as
XML-RPC requests.
"""
# Class attribute listing the accessible path components;
# paths not on this list will result in a 404 error.
rpc_paths = ('/', '/RPC2')
#if not None, encode responses larger than this, if possible
encode_threshold = 1400 #a common MTU
#Override form StreamRequestHandler: full buffering of output
#and no Nagle.
wbufsize = -1
disable_nagle_algorithm = True
# a re to match a gzip Accept-Encoding
aepattern = re.compile(r"""
\s* ([^\s;]+) \s* #content-coding
(;\s* q \s*=\s* ([0-9\.]+))? #q
""", re.VERBOSE | re.IGNORECASE)
def accept_encodings(self):
r = {}
ae = self.headers.get("Accept-Encoding", "")
for e in ae.split(","):
match = self.aepattern.match(e)
if match:
v = match.group(3)
v = float(v) if v else 1.0
r[match.group(1)] = v
return r
def is_rpc_path_valid(self):
if self.rpc_paths:
return self.path in self.rpc_paths
else:
# If .rpc_paths is empty, just assume all paths are legal
return True
def do_POST(self):
"""Handles the HTTP POST request.
Attempts to interpret all HTTP POST requests as XML-RPC calls,
which are forwarded to the server's _dispatch method for handling.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
try:
# Get arguments by reading body of request.
# We read this in chunks to avoid straining
# socket.read(); around the 10 or 15Mb mark, some platforms
# begin to have problems (bug #792570).
max_chunk_size = 10*1024*1024
size_remaining = int(self.headers["content-length"])
L = []
while size_remaining:
chunk_size = min(size_remaining, max_chunk_size)
chunk = self.rfile.read(chunk_size)
if not chunk:
break
L.append(chunk)
size_remaining -= len(L[-1])
data = b''.join(L)
data = self.decode_request_content(data)
if data is None:
return #response has been sent
# In previous versions of SimpleXMLRPCServer, _dispatch
# could be overridden in this class, instead of in
# SimpleXMLRPCDispatcher. To maintain backwards compatibility,
# check to see if a subclass implements _dispatch and dispatch
# using that method if present.
response = self.server._marshaled_dispatch(
data, getattr(self, '_dispatch', None), self.path
)
except Exception as e: # This should only happen if the module is buggy
# internal error, report as HTTP server error
self.send_response(500)
# Send information about the exception if requested
if hasattr(self.server, '_send_traceback_header') and \
self.server._send_traceback_header:
self.send_header("X-exception", str(e))
trace = traceback.format_exc()
trace = str(trace.encode('ASCII', 'backslashreplace'), 'ASCII')
self.send_header("X-traceback", trace)
self.send_header("Content-length", "0")
self.end_headers()
else:
self.send_response(200)
self.send_header("Content-type", "text/xml")
if self.encode_threshold is not None:
if len(response) > self.encode_threshold:
q = self.accept_encodings().get("gzip", 0)
if q:
try:
response = gzip_encode(response)
self.send_header("Content-Encoding", "gzip")
except NotImplementedError:
pass
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
def decode_request_content(self, data):
#support gzip encoding of request
encoding = self.headers.get("content-encoding", "identity").lower()
if encoding == "identity":
return data
if encoding == "gzip":
try:
return gzip_decode(data)
except NotImplementedError:
self.send_response(501, "encoding %r not supported" % encoding)
except ValueError:
self.send_response(400, "error decoding gzip content")
else:
self.send_response(501, "encoding %r not supported" % encoding)
self.send_header("Content-length", "0")
self.end_headers()
def report_404 (self):
# Report a 404 error
self.send_response(404)
response = b'No such page'
self.send_header("Content-type", "text/plain")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
def log_request(self, code='-', size='-'):
"""Selectively log an accepted request."""
if self.server.logRequests:
BaseHTTPRequestHandler.log_request(self, code, size)
class SimpleXMLRPCServer(socketserver.TCPServer,
SimpleXMLRPCDispatcher):
"""Simple XML-RPC server.
Simple XML-RPC server that allows functions and a single instance
to be installed to handle requests. The default implementation
attempts to dispatch XML-RPC calls to the functions or instance
installed in the server. Override the _dispatch method inherited
from SimpleXMLRPCDispatcher to change this behavior.
"""
allow_reuse_address = True
# Warning: this is for debugging purposes only! Never set this to True in
# production code, as will be sending out sensitive information (exception
# and stack trace details) when exceptions are raised inside
# SimpleXMLRPCRequestHandler.do_POST
_send_traceback_header = False
def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
logRequests=True, allow_none=False, encoding=None,
bind_and_activate=True, use_builtin_types=False):
self.logRequests = logRequests
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding, use_builtin_types)
socketserver.TCPServer.__init__(self, addr, requestHandler, bind_and_activate)
class MultiPathXMLRPCServer(SimpleXMLRPCServer):
"""Multipath XML-RPC Server
This specialization of SimpleXMLRPCServer allows the user to create
multiple Dispatcher instances and assign them to different
HTTP request paths. This makes it possible to run two or more
'virtual XML-RPC servers' at the same port.
Make sure that the requestHandler accepts the paths in question.
"""
def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
logRequests=True, allow_none=False, encoding=None,
bind_and_activate=True, use_builtin_types=False):
SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests, allow_none,
encoding, bind_and_activate, use_builtin_types)
self.dispatchers = {}
self.allow_none = allow_none
self.encoding = encoding or 'utf-8'
def add_dispatcher(self, path, dispatcher):
self.dispatchers[path] = dispatcher
return dispatcher
def get_dispatcher(self, path):
return self.dispatchers[path]
def _marshaled_dispatch(self, data, dispatch_method = None, path = None):
try:
response = self.dispatchers[path]._marshaled_dispatch(
data, dispatch_method, path)
except:
# report low level exception back to server
# (each dispatcher should have handled their own
# exceptions)
exc_type, exc_value = sys.exc_info()[:2]
try:
response = dumps(
Fault(1, "%s:%s" % (exc_type, exc_value)),
encoding=self.encoding, allow_none=self.allow_none)
response = response.encode(self.encoding, 'xmlcharrefreplace')
finally:
# Break reference cycle
exc_type = exc_value = None
return response
class CGIXMLRPCRequestHandler(SimpleXMLRPCDispatcher):
"""Simple handler for XML-RPC data passed through CGI."""
def __init__(self, allow_none=False, encoding=None, use_builtin_types=False):
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding, use_builtin_types)
def handle_xmlrpc(self, request_text):
"""Handle a single XML-RPC request"""
response = self._marshaled_dispatch(request_text)
print('Content-Type: text/xml')
print('Content-Length: %d' % len(response))
print()
sys.stdout.flush()
sys.stdout.buffer.write(response)
sys.stdout.buffer.flush()
def handle_get(self):
"""Handle a single HTTP GET request.
Default implementation indicates an error because
XML-RPC uses the POST method.
"""
code = 400
message, explain = BaseHTTPRequestHandler.responses[code]
response = http.server.DEFAULT_ERROR_MESSAGE % \
{
'code' : code,
'message' : message,
'explain' : explain
}
response = response.encode('utf-8')
print('Status: %d %s' % (code, message))
print('Content-Type: %s' % http.server.DEFAULT_ERROR_CONTENT_TYPE)
print('Content-Length: %d' % len(response))
print()
sys.stdout.flush()
sys.stdout.buffer.write(response)
sys.stdout.buffer.flush()
def handle_request(self, request_text=None):
"""Handle a single XML-RPC request passed through a CGI post method.
If no XML data is given then it is read from stdin. The resulting
XML-RPC response is printed to stdout along with the correct HTTP
headers.
"""
if request_text is None and \
os.environ.get('REQUEST_METHOD', None) == 'GET':
self.handle_get()
else:
# POST data is normally available through stdin
try:
length = int(os.environ.get('CONTENT_LENGTH', None))
except (ValueError, TypeError):
length = -1
if request_text is None:
request_text = sys.stdin.read(length)
self.handle_xmlrpc(request_text)
# -----------------------------------------------------------------------------
# Self documenting XML-RPC Server.
class ServerHTMLDoc(pydoc.HTMLDoc):
"""Class used to generate pydoc HTML document for a server"""
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
# XXX Note that this regular expression does not allow for the
# hyperlinking of arbitrary strings being used as method
# names. Only methods with names consisting of word characters
# and '.'s are hyperlinked.
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?((?:\w|\.)+))\b')
while 1:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append('self.<strong>%s</strong>' % name)
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return ''.join(results)
def docroutine(self, object, name, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
title = '<a name="%s"><strong>%s</strong></a>' % (
self.escape(anchor), self.escape(name))
if callable(object):
argspec = str(signature(object))
else:
argspec = '(...)'
if isinstance(object, tuple):
argspec = object[0] or argspec
docstring = object[1] or ""
else:
docstring = pydoc.getdoc(object)
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
doc = self.markup(
docstring, self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def docserver(self, server_name, package_documentation, methods):
"""Produce HTML documentation for an XML-RPC server."""
fdict = {}
for key, value in methods.items():
fdict[key] = '#-' + key
fdict[value] = fdict[key]
server_name = self.escape(server_name)
head = '<big><big><strong>%s</strong></big></big>' % server_name
result = self.heading(head, '#ffffff', '#7799ee')
doc = self.markup(package_documentation, self.preformat, fdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
contents = []
method_items = sorted(methods.items())
for key, value in method_items:
contents.append(self.docroutine(value, key, funcs=fdict))
result = result + self.bigsection(
'Methods', '#ffffff', '#eeaa77', ''.join(contents))
return result
class XMLRPCDocGenerator:
"""Generates documentation for an XML-RPC server.
This class is designed as mix-in and should not
be constructed directly.
"""
def __init__(self):
# setup variables used for HTML documentation
self.server_name = 'XML-RPC Server Documentation'
self.server_documentation = \
"This server exports the following methods through the XML-RPC "\
"protocol."
self.server_title = 'XML-RPC Server Documentation'
def set_server_title(self, server_title):
"""Set the HTML title of the generated server documentation"""
self.server_title = server_title
def set_server_name(self, server_name):
"""Set the name of the generated HTML server documentation"""
self.server_name = server_name
def set_server_documentation(self, server_documentation):
"""Set the documentation string for the entire server."""
self.server_documentation = server_documentation
def generate_html_documentation(self):
"""generate_html_documentation() => html documentation for the server
Generates HTML documentation for the server using introspection for
installed functions and instances that do not implement the
_dispatch method. Alternatively, instances can choose to implement
the _get_method_argstring(method_name) method to provide the
argument string used in the documentation and the
_methodHelp(method_name) method to provide the help text used
in the documentation."""
methods = {}
for method_name in self.system_listMethods():
if method_name in self.funcs:
method = self.funcs[method_name]
elif self.instance is not None:
method_info = [None, None] # argspec, documentation
if hasattr(self.instance, '_get_method_argstring'):
method_info[0] = self.instance._get_method_argstring(method_name)
if hasattr(self.instance, '_methodHelp'):
method_info[1] = self.instance._methodHelp(method_name)
method_info = tuple(method_info)
if method_info != (None, None):
method = method_info
elif not hasattr(self.instance, '_dispatch'):
try:
method = resolve_dotted_attribute(
self.instance,
method_name
)
except AttributeError:
method = method_info
else:
method = method_info
else:
assert 0, "Could not find method in self.functions and no "\
"instance installed"
methods[method_name] = method
documenter = ServerHTMLDoc()
documentation = documenter.docserver(
self.server_name,
self.server_documentation,
methods
)
return documenter.page(html.escape(self.server_title), documentation)
class DocXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
"""XML-RPC and documentation request handler class.
Handles all HTTP POST requests and attempts to decode them as
XML-RPC requests.
Handles all HTTP GET requests and interprets them as requests
for documentation.
"""
def do_GET(self):
"""Handles the HTTP GET request.
Interpret all HTTP GET requests as requests for server
documentation.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
response = self.server.generate_html_documentation().encode('utf-8')
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
class DocXMLRPCServer( SimpleXMLRPCServer,
XMLRPCDocGenerator):
"""XML-RPC and HTML documentation server.
Adds the ability to serve server documentation to the capabilities
of SimpleXMLRPCServer.
"""
def __init__(self, addr, requestHandler=DocXMLRPCRequestHandler,
logRequests=True, allow_none=False, encoding=None,
bind_and_activate=True, use_builtin_types=False):
SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests,
allow_none, encoding, bind_and_activate,
use_builtin_types)
XMLRPCDocGenerator.__init__(self)
class DocCGIXMLRPCRequestHandler( CGIXMLRPCRequestHandler,
XMLRPCDocGenerator):
"""Handler for XML-RPC data and documentation requests passed through
CGI"""
def handle_get(self):
"""Handles the HTTP GET request.
Interpret all HTTP GET requests as requests for server
documentation.
"""
response = self.generate_html_documentation().encode('utf-8')
print('Content-Type: text/html')
print('Content-Length: %d' % len(response))
print()
sys.stdout.flush()
sys.stdout.buffer.write(response)
sys.stdout.buffer.flush()
def __init__(self):
CGIXMLRPCRequestHandler.__init__(self)
XMLRPCDocGenerator.__init__(self)
if __name__ == '__main__':
import datetime
class ExampleService:
def getData(self):
return '42'
class currentTime:
@staticmethod
def getCurrentTime():
return datetime.datetime.now()
with SimpleXMLRPCServer(("localhost", 8000)) as server:
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.register_instance(ExampleService(), allow_dotted_names=True)
server.register_multicall_functions()
print('Serving XML-RPC on localhost port 8000')
print('It is advisable to run this example server within a secure, closed network.')
try:
server.serve_forever()
except KeyboardInterrupt:
print("\nKeyboard interrupt received, exiting.")
sys.exit(0)
|
lgpl-2.1
| -9,123,508,251,613,869,000
| 35.923464
| 92
| 0.571831
| false
| 4.644549
| false
| false
| false
|
ZoomerAnalytics/catalyst
|
catalyst/json_dbapi/__init__.py
|
1
|
2214
|
import json, datetime, os
from .sqlparser import parse
def Date(year, month, day):
raise NotImplementedError()
def Timestamp(hour, minute, second):
raise NotImplementedError()
def Timestamp(year, month, day, hour, minute, second):
raise NotImplementedError()
def DateFromTicks(ticks):
raise NotImplementedError()
def TimeFromTicks(ticks):
raise NotImplementedError()
def TimeStampFromTicks(ticks):
raise NotImplementedError()
def Binary(value):
return bytes(value)
STRING = str
BINARY = bytes
NUMBER = float
DATETIME = datetime.datetime
ROWID = int
class JsonDBAPICursor(object):
def __init__(self, owner):
self.owner = owner
self.arraysize = 1
self._results = None
@property
def description(self):
raise NotImplementedError()
@property
def rowcount(self):
raise NotImplementedError()
def close(self):
pass
def execute(self, operation, parameters=None):
stmt = parse(operation)
ret, self._results = stmt.execute(parameters)
raise Exception("Operation '%s' not supported" % operation)
def executemany(self, operation, parameter_seq):
raise NotImplementedError()
def fetchone(self):
raise NotImplementedError()
def fetchmany(self, size=None):
if size is None:
size = self.arraysize
raise NotImplementedError()
def fetchall(self):
raise NotImplementedError()
def setinputsizes(self, sizes):
pass
def setoutputsize(self, size, column=None):
pass
class JsonDBAPIConnection(object):
def __init__(self, filename):
self.filename = filename
if os.path.isfile(filename):
with open(filename, "r") as f:
self.j = json.load(f)
else:
self.j = {}
def close(self):
pass
def commit(self):
raise NotImplementedError()
def cursor(self):
return JsonDBAPICursor(self)
def rollback(self):
pass
apilevel = "1.0"
threadsafety = 0
paramstyle = "format"
def connect(filename):
return JsonDBAPIConnection(filename)
Error = Exception
DatabaseError = Exception
|
mit
| -2,250,958,578,212,698,600
| 17.771186
| 67
| 0.64318
| false
| 4.257692
| false
| false
| false
|
semitki/semitki
|
api/sonetworks/migrations/0006_auto_20170216_2125.py
|
1
|
1630
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-16 21:25
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sonetworks', '0005_auto_20170202_2319'),
]
operations = [
migrations.CreateModel(
name='GroupedSocialAccounts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.RenameModel(
old_name='AccountsGroup',
new_name='SocialAccount',
),
migrations.RenameModel(
old_name='UserAccount',
new_name='SocialAccountsGroup',
),
migrations.RemoveField(
model_name='useraccountsgroup',
name='account_group_id',
),
migrations.RemoveField(
model_name='useraccountsgroup',
name='user_account_id',
),
migrations.DeleteModel(
name='UserAccountsGroup',
),
migrations.AddField(
model_name='groupedsocialaccounts',
name='social_account_group_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sonetworks.SocialAccountsGroup'),
),
migrations.AddField(
model_name='groupedsocialaccounts',
name='social_account_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sonetworks.SocialAccount'),
),
]
|
mit
| 490,810,210,341,913,500
| 30.960784
| 118
| 0.586503
| false
| 4.289474
| false
| false
| false
|
btaylor66/SREAchievements
|
sreachievementswebapp/models/person.py
|
1
|
1194
|
"""Database model for a person
"""
import collections
import operator
import json
from sreachievementswebapp.dbmodels import db
from sqlalchemy.ext.hybrid import hybrid_property
m2m_person_achievement = db.Table(
'm2m_person_achievement',
db.Column('achievement_id', db.Integer, db.ForeignKey('achievement.id')),
db.Column('person_id', db.Integer, db.ForeignKey('person.id')),
db.PrimaryKeyConstraint('achievement_id', 'person_id')
)
class Person(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
username = db.Column(db.String(50), unique=True)
fullname = db.Column(db.String(50))
# team_id = db.Column(db.Integer, db.ForeignKey('teams.team_id'), nullable=True)
achievements = db.relationship('Achievement', secondary=m2m_person_achievement, backref='Person')
# team = db.relationship("Teams", back_populates="users")
# team = db.relationship("Teams")
known_achievements = []
# def __init__(self, username, fullname, team_id, team):
def __init__(self, username, fullname):
self.username = username
self.fullname = fullname
# self.team_id = team_id
# self.team = team
|
gpl-3.0
| -3,030,146,035,369,877,000
| 28.85
| 101
| 0.685092
| false
| 3.316667
| false
| false
| false
|
Ensembles/ert
|
python/python/ert/job_queue/job_status_type_enum.py
|
1
|
2741
|
# Copyright (C) 2013 Statoil ASA, Norway.
#
# The file 'job_status_type_enum.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
from cwrap import BaseCEnum
from ert.job_queue import JOB_QUEUE_LIB
class JobStatusType(BaseCEnum):
TYPE_NAME = "job_status_type_enum"
JOB_QUEUE_NOT_ACTIVE = None # This value is used in external query routines - for jobs which are (currently) not active. */
JOB_QUEUE_WAITING = None # A node which is waiting in the internal queue.
JOB_QUEUE_SUBMITTED = None # Internal status: It has has been submitted - the next status update will (should) place it as pending or running.
JOB_QUEUE_PENDING = None # A node which is pending - a status returned by the external system. I.e LSF
JOB_QUEUE_RUNNING = None # The job is running
JOB_QUEUE_DONE = None # The job is done - but we have not yet checked if the target file is produced */
JOB_QUEUE_EXIT = None # The job has exited - check attempts to determine if we retry or go to complete_fail */
JOB_QUEUE_IS_KILLED = None # The job has been killed, following a JOB_QUEUE_DO_KILL - can restart. */
JOB_QUEUE_DO_KILL = None # The the job should be killed, either due to user request, or automated measures - the job can NOT be restarted.. */
JOB_QUEUE_SUCCESS = None
JOB_QUEUE_RUNNING_CALLBACK = None
JOB_QUEUE_FAILED = None
JOB_QUEUE_DO_KILL_NODE_FAILURE = None
JOB_QUEUE_STATUS_FAILURE = None
JobStatusType.addEnum("JOB_QUEUE_NOT_ACTIVE", 1)
JobStatusType.addEnum("JOB_QUEUE_WAITING", 4)
JobStatusType.addEnum("JOB_QUEUE_SUBMITTED", 8)
JobStatusType.addEnum("JOB_QUEUE_PENDING", 16)
JobStatusType.addEnum("JOB_QUEUE_RUNNING", 32)
JobStatusType.addEnum("JOB_QUEUE_DONE", 64)
JobStatusType.addEnum("JOB_QUEUE_EXIT", 128)
JobStatusType.addEnum("JOB_QUEUE_IS_KILLED", 4096)
JobStatusType.addEnum("JOB_QUEUE_DO_KILL", 8192)
JobStatusType.addEnum("JOB_QUEUE_SUCCESS", 16384)
JobStatusType.addEnum("JOB_QUEUE_RUNNING_CALLBACK", 32768)
JobStatusType.addEnum("JOB_QUEUE_FAILED", 65536)
JobStatusType.addEnum("JOB_QUEUE_DO_KILL_NODE_FAILURE", 131072)
JobStatusType.addEnum("JOB_QUEUE_STATUS_FAILURE", 262144)
|
gpl-3.0
| 5,301,143,350,003,913,000
| 52.745098
| 152
| 0.721634
| false
| 3.33455
| false
| false
| false
|
ollej/shoutbridge
|
src/plugins/GoldQuest.py
|
1
|
17185
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
The MIT License
Copyright (c) 2011 Olle Johansson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from sqlalchemy import Table, Column, Integer, Boolean, String, MetaData, ForeignKey, Sequence, create_engine
from sqlalchemy.orm import mapper, sessionmaker
import random
import cmd
import yaml
from utils.Conf import *
class Hero(object):
id = 0
name = ''
health = None
strength = None
hurt = None
kills = None
gold = None
level = None
alive = None
def __init__(self):
self.hurt = 0
self.kills = 0
self.gold = 0
self.level = 1
self.alive = True
def reroll(self, name=None):
self.health = self.roll(20, 5)
self.strength = self.roll(20, 5)
self.hurt = 0
self.kills = 0
self.gold = 0
self.level = 1
self.alive = True
if name:
self.name = name
else:
self.name = self.random_name()
def search_treasure(self):
luck = self.roll(100)
if luck > 50:
found_gold = self.roll(self.level)
self.gold = self.gold + found_gold
return found_gold
return 0
def injure(self, hurt):
self.hurt = self.hurt + hurt
if self.hurt > self.health:
self.alive = False
def fight(self, monster):
#print("Monster:", monster.health, monster.strength)
while monster.health >= 0 and self.hurt < self.health:
hit = self.roll(self.strength)
killed = monster.injure(hit)
#print("Hit:", hit, "Monster Health:", monster.health)
if not killed:
monster_hit = self.roll(monster.strength)
self.injure(monster_hit)
#print("Monster Hits:", monster_hit, "Hero Hurt:", self.hurt)
if self.hurt > self.health:
self.alive = False
else:
self.kills = self.kills + 1
return self.alive
def rest(self):
if self.hurt > 0:
heal = self.roll(10)
if heal > self.hurt:
heal = self.hurt
self.hurt = self.hurt - heal
return heal
return 0
def go_deeper(self, depth=None):
if not depth:
depth = 1
self.level = self.level + depth
return self.level
def roll(self, sides, times=1):
total = 0
for i in range(times):
total = total + random.randint(1, sides)
return total
def random_name(self):
name = random.choice(['Conan', 'Canon', 'Hercules', 'Robin', 'Dante', 'Legolas', 'Buffy', 'Xena'])
epithet = random.choice(['Barbarian', 'Invincible', 'Mighty', 'Hairy', 'Bastard', 'Slayer'])
return '%s the %s' % (name, epithet)
def get_attributes(self):
attribs = self.__dict__
attribs['status'] = ""
if not self.alive:
attribs['status'] = " (Deceased)"
#for k, v in attribs.items():
# print k, v
return attribs
#return self.__dict__
def get_charsheet(self):
msg = "%(name)s%(status)s - Strength: %(strength)d Health: %(health)d Hurt: %(hurt)d Kills: %(kills)d Gold: %(gold)d Level: %(level)d"
msg = msg % self.get_attributes()
return msg
class Monster(object):
name = None
strength = None
health = None
level = None
def __init__(self, level=None, name=None, boss=False):
if not level:
level = 1
self.strength = random.randint(1, level)
self.health = random.randint(1, level)
if boss:
self.strength = self.strength + level
self.health = self.health + level
if name:
self.name = name
else:
self.name = self.random_name()
def injure(self, hurt):
"""
Injure the monster with hurt points. Returns True if the monster died.
"""
self.health = self.health - hurt
if self.health <= 0:
self.level.kill_monster()
return True
else:
return False
def random_name(self):
return random.choice([
"an orc", "an ogre", "a bunch of goblins", "a giant spider",
"a cyclops", "a minotaur", "a horde of kobolds",
"a rattling skeleton", "a large troll", "a moaning zombie",
"a swarm of vampire bats", "a baby hydra", "a giant monster ant",
"a slithering lizard", "an angry lion", "three hungry bears",
"a hell hound", "a pack of rabid dogs", "a werewolf",
"an ice demon", "a fire wraith", "a groaning ghoul",
"two goblins", "a three-headed hyena", "a giant monster worm",
"a slobbering were-pig"
])
class Level(object):
depth = None
killed = None
looted = None
boss = None
text = None
def __init__(self, depth=None):
self.killed = 0
self.looted = 0
if depth:
self.depth = depth
else:
self.depth = 1
def get_monster(self, name):
if self.killed == self.depth - 1:
boss = True
if self.boss:
name = self.boss
else:
boss = False
if self.has_monsters():
monster = Monster(self.depth, name, boss)
monster.level = self
return monster
def get_loot(self):
loot = 0
if self.can_loot():
self.looted = self.looted + 1
luck = random.randint(1, 100)
if luck > 20:
loot = random.randint(1, self.depth)
elif luck < 5:
loot = 0 - luck
return loot
def kill_monster(self):
if self.has_monsters():
self.killed = self.killed + 1
return True
return False
def has_monsters(self):
if self.killed < self.depth:
return True
return False
def can_loot(self):
if self.looted < self.killed:
return True
return False
class GoldQuest(BridgeClass):
_gamedata = None
cfg = None
hero = None
level = None
def __init__(self, cfg):
"""
Setup Sqlite SQL tables and start a db session.
The database will be saved in C{extras/goldquest.db}
Calls L{setup_tables} to setup table metadata and L{setup_session}
to instantiate the db session.
"""
self.cfg = cfg
try:
debug = self.cfg.get_bool('debug')
except AttributeError:
debug = False
self.read_texts()
self.engine = create_engine('sqlite:///extras/quest.db', echo=debug)
self.setup_tables()
self.setup_session()
self.hero = self.get_alive_hero()
if self.hero and not self.level:
self.level = Level(self.hero.level)
def setup_session(self):
"""
Start a SQLAlchemy db session.
Saves the session instance in C{self.session}
"""
Session = sessionmaker(bind=self.engine)
self.session = Session()
def setup_tables(self):
"""
Defines the tables to use for L{Hero}
The Metadata instance is saved to C{self.metadata}
"""
self.metadata = MetaData()
hero_table = Table('hero', self.metadata,
Column('id', Integer, Sequence('hero_id_seq'), primary_key=True),
Column('name', String(100)),
Column('health', Integer),
Column('strength', Integer),
Column('hurt', Integer),
Column('kills', Integer),
Column('gold', Integer),
Column('level', Integer),
Column('alive', Boolean),
)
mapper(Hero, hero_table)
level_table = Table('level', self.metadata,
Column('id', Integer, Sequence('hero_id_seq'), primary_key=True),
Column('depth', Integer),
Column('killed', Integer),
Column('looted', Integer),
)
mapper(Level, level_table)
self.metadata.create_all(self.engine)
def read_texts(self):
f = open('extras/goldquest.dat')
self._gamedata = yaml.load(f)
f.close()
def get_text(self, text):
texts = self._gamedata['texts'][text]
if not texts:
return None
elif isinstance(texts, basestring):
return texts
else:
return random.choice(texts)
def get_level_texts(self, depth):
for lvl in self._gamedata['level']:
if lvl['level'] == depth:
return lvl
def get_monster(self, lvl=None):
if not lvl:
lvl = self.level.depth or 1
monsters = []
for monster in self._gamedata['monster']:
if lvl >= monster['lowlevel'] and monster['highlevel'] == 0 or lvl <= monster['highlevel']:
monsters.append(monster['name'])
if monsters:
name = random.choice(monsters)
else:
name = None
return self.level.get_monster(name)
def play(self, command):
msg = ""
command = command.strip().lower()
try:
(command, rest) = command.split(' ')
except ValueError:
rest = ""
rest = rest.strip()
if command in ['reroll']:
return self.reroll()
if not self.hero or not self.hero.alive:
return self.get_text('nochampion')
if command in ['rest', 'vila']:
msg = self.rest()
elif command in ['fight', 'kill', 'slay', u'slåss']:
msg = self.fight()
elif command in ['deeper', 'descend', 'vidare']:
msg = self.go_deeper(rest)
elif command in ['loot', 'search', u'sök', 'finna']:
msg = self.search_treasure()
elif command in ['charsheet', 'stats', u'formulär']:
msg = self.show_charsheet()
else:
return None
self.save_data()
return msg
def save_data(self):
self.session.add(self.hero)
self.session.add(self.level)
self.session.commit()
def get_alive_hero(self):
hero = self.session.query(Hero).filter_by(alive=True).first()
return hero
def get_level(self, lvl):
level = self.session.query(Level).filter_by(depth=lvl).first()
if not level:
level = Level(lvl)
texts = self.get_level_texts(lvl)
if texts:
for k, v in texts.items():
if v:
setattr(level, k , v)
if not level.boss:
level.boss = random.choice(self._gamedata['boss'])
return level
def reroll(self):
if self.hero and self.hero.alive:
msg = self.get_text('noreroll') % self.hero.get_attributes()
return msg
else:
# Delete all old Level data.
self.session.query(Level).delete()
# Reroll new hero.
self.hero = Hero()
self.hero.reroll()
self.level = self.get_level(self.hero.level)
self.save_data()
msg = self.get_text('newhero')
msg = msg % self.hero.get_attributes()
msg = msg + " " + self.level.text
return msg
def search_treasure(self):
#loot = self.hero.search_treasure()
attribs = self.hero.get_attributes()
if self.level.can_loot():
loot = self.level.get_loot()
attribs['loot'] = loot
if loot > 0:
msg = self.get_text('foundloot')
# Should be a method on Hero
self.hero.gold = self.hero.gold + loot
elif loot < 0:
attribs['trap_hurt'] = abs(loot)
self.hero.injure(attribs['trap_hurt'])
msg = self.get_text('foundtrap')
else:
msg = self.get_text('nogold')
else:
msg = self.get_text('noloot')
msg = msg % attribs
return msg
def sneak_attack(self):
if self.level.has_monsters():
#self.logprint("Monsters are available to sneak attack.")
unlucky = self.roll(100)
#self.logprint("unlucky:", unlucky)
if unlucky < 20:
#self.logprint("Sneak attack!")
monster = self.get_monster(self.level.depth)
won = self.hero.fight(monster)
if won:
msg = self.get_text('rest_attack_won')
else:
msg = self.get_text('rest_attack_lost')
attribs = self.hero.get_attributes()
attribs['monster_name'] = monster.name
msg = msg % attribs
return msg
def rest(self):
# If there are monsters alive on the level, there is a
# risk of a sneak attack while resting.
msg = self.sneak_attack()
if msg:
return msg
rested = self.hero.rest()
if rested:
if self.hero.hurt:
restmsg = self.get_text('rests')
else:
restmsg = self.get_text('healed')
else:
restmsg = self.get_text('alreadyhealed')
attribs = self.hero.get_attributes()
attribs['rested'] = rested
msg = restmsg % attribs
return msg
def go_deeper(self, levels=1):
try:
levels = int(levels)
except ValueError:
levels = 1
if levels > 10:
levels = 10
depth = self.hero.go_deeper(levels)
self.level = self.get_level(depth)
msg = self.level.text or self.get_text('deeper')
msg = msg % self.hero.get_attributes()
return msg
def fight(self):
monster = self.get_monster(self.level.depth)
attribs = self.hero.get_attributes()
if not monster:
msg = self.get_text('nomonsters')
return msg % attribs
won = self.hero.fight(monster)
if won:
msg = self.get_text('killed')
attribs['slayed'] = self.get_text('slayed')
else:
msg = self.get_text('died')
attribs['monster'] = monster.name
msg = msg % attribs
msg = self.firstupper(msg)
return msg
def roll(self, sides, times=1):
total = 0
for i in range(times):
total = total + random.randint(1, sides)
return total
def show_charsheet(self):
msg = self.get_text('charsheet')
return msg % self.hero.get_attributes()
def firstupper(self, text):
first = text[0].upper()
return first + text[1:]
class Game(cmd.Cmd):
prompt = 'GoldQuest> '
intro = "Welcome to GoldQuest!"
game = None
def preloop(self):
cfg = Conf('../config.ini', 'LOCAL')
self.game = GoldQuest(cfg)
def default(self, line):
ret = self.game.play(line)
if ret:
print ret
def do_fight(self, line):
"Find a new monster and fight it to the death!"
print self.game.play('fight')
def do_charsheet(self, line):
"Show the character sheet for the current hero."
print self.game.play('charsheet')
def do_reroll(self, line):
"Reroll a new hero if the village doesn't have one already."
print self.game.play('reroll')
def do_rest(self, line):
"Makes the hero rest for a while to regain hurt."
print self.game.play('rest')
def do_loot(self, line):
"The hero will search for loot in the hope to find gold."
print self.game.play('loot')
def do_deeper(self, line):
"Tells the hero to go deeper into the dungeon."
if line:
cmd = 'deeper %s' % line
else:
cmd = 'deeper'
print self.game.play(cmd)
def do_quit(self, line):
"Quit Game"
print "A strange game. The only winning move is not to play."
return True
if __name__ == '__main__':
Game().cmdloop()
|
mit
| -5,522,686,286,455,691,000
| 30.354015
| 142
| 0.545396
| false
| 3.76468
| false
| false
| false
|
merriam/techtree
|
bin/check_version.py
|
1
|
7249
|
#!/usr/bin/env python3
"""
A little program to check if there are later versions of packages I rely upon.
This may grow into a full fledged service, but not today.
"""
import subprocess
import re
import sys
class Program:
"""A known program that can be checked to see the installed version
number matches the published version.
A note on security: checking requires running shell programs. If
a recipe runs something bad, like 'rm foo', then that will run.
"""
def __init__(self, name, installed_command, installed_regex,
published_command, published_regex):
self.name = name
self.installed_command = installed_command
# command can have shell charcters, e.g., "cat * | grep -i version"
# must return 0 (Unix all ok code)
self.installed_regex = installed_regex
# run this regex on the output, match version as capture group 1
self.published_command = published_command
self.published_regex = published_regex
def _e_get_version_number(self, for_error_message, command, regex):
# pylint: disable=no-self-use
""" returns (err, version_number). Just internal repeated code. """
# TODO: This just doesn't cleanly grab stderr.
try:
out = subprocess.check_output(command, shell=True)
except subprocess.CalledProcessError:
return "Could not cleanly execute command to check {} version.".format(
for_error_message), None
if type(regex) == str:
out = str(out, "utf-8") # if regex was not bytes, treat output as unicode
try:
version = re.search(regex, out).group(1)
except AttributeError:
return "Could not match version number in {} command output.", None
except IndexError:
return "{} regex matched but did not have a group (parenthesis)", None
return None, version
def err_check(self):
"""return None if this program is up to date with known programs,
else returns a string with the error.
"""
err, installed_version = self._e_get_version_number(
"installed", self.installed_command, self.installed_regex)
if err:
return err
err, published_version = self._e_get_version_number(
"published", self.published_command, self.published_regex)
if err:
return err
if published_version != installed_version:
return "Versions do not match. Installed {}, but published {}".format(
installed_version, published_version)
return None
class KnownPrograms:
""".known_programs{name} is a Program that could be checked. Only
need to create a single instance."""
def __init__(self):
self.known_programs = {}
def add(self, name, installed_command, installed_regex,
published_command, published_regex):
""" Add this to list of known programs """
program = Program(name, installed_command, installed_regex,
published_command, published_regex)
self.known_programs[name] = program
@classmethod
def usual_suspects(cls):
""" return a set of the usual known programs """
known = cls()
known.add('VirtualBox',
'VirtualBox --help',
r'Oracle VM VirtualBox Manager (\d.\d.\d)',
'curl --silent https://www.virtualbox.org/wiki/Downloads',
r'OS X.*/virtualbox/(\d\.\d\.\d)/')
known.add('Docker',
'docker --version',
r'Docker version (\d.\d.\d)',
'curl --silent https://raw.github.com/dotcloud/docker/release/VERSION',
r'(\d.\d.\d)')
return known
def _add_pass(known):
known.add("_pass", "echo True", "(True)", "echo True", "(True)")
def test_simple_pass():
known = KnownPrograms()
_add_pass(known)
assert "_pass" in known.known_programs
assert "_mystery" not in known.known_programs
assert known.known_programs["_pass"].err_check() is None
def _add_failures(known):
# hate to repeat code
known.add("_version_mismatch", "echo True", "(True)", "echo False", "(False)")
known.add("_installed_will_not_run", "//bad_command", "True", "echo False",
"(False)")
known.add("_no_group_in_installed_regex", "echo True", "True", "echo True",
"(True)")
known.add("_no_group_in_publshed_regex", "echo True", "(True)", "echo True",
"True")
known.add("_installed_will_not_match", "echo True", "(bad_regex)", "echo True",
"(True)")
known.add("_published_will_not_run", "echo True", "(True)", "//bad_command",
"(True)")
known.add("_published_will_not_match", "echo True", "(True)", "echo True",
"(bad_regex)")
def test_failures():
known = KnownPrograms()
_add_failures(known)
for program in known.known_programs.values():
assert program.err_check() is not None
class ProgramSuite:
"""A set of installed programs to check.
Each program is identified by a name, which should correspond to a
list of known programs that can be checked.
There are really only a few possible errors: don't know how to
check, failed to run installed programs, failed to run published
programs, version numbers don't match. Faling to run might be in
the exec or matching the version number. These can be strings for now.
"""
def __init__(self, program_list=None):
if program_list == None:
self.programs = []
else:
self.programs = program_list.split()
def check(self, known):
""" return True if everything up to date, else false.
Print status to terminal.
"""
print("Checking versions...")
all_OK = True
for name in self.programs:
if name not in known.known_programs:
print("{}: FAIL Could not match program in list of "
"known programs".format(name))
all_OK = False
else:
err = known.known_programs[name].err_check()
if err:
print("{}: FAIL {}".format(name, err))
all_OK = False
else:
print("{}: PASS".format(name))
if all_OK:
print("Versions are all up to date.")
else:
print("Failure while checking versions.")
return all_OK
def test_suite_passes():
known = KnownPrograms()
_add_pass(known)
_add_failures(known)
assert ProgramSuite("_pass _pass _pass").check(known)
assert ProgramSuite("").check(known)
assert not ProgramSuite("_pass _version_mismatch _pass").check(known)
assert not ProgramSuite("_pass _unknown _pass").check(known)
def test_usual_suspects():
known = KnownPrograms.usual_suspects()
assert "Docker" in known.known_programs.keys()
if __name__ == "__main__":
usual = KnownPrograms.usual_suspects()
is_ok = ProgramSuite("Docker VirtualBox").check(usual)
if is_ok: # Unix has 0 as success, 1 for fail.
sys.exit(0)
else:
sys.exit(1)
|
mit
| -293,875,068,873,349,600
| 36.559585
| 86
| 0.599117
| false
| 4.130484
| false
| false
| false
|
hanx11/psmonitor
|
bottle_example.py
|
1
|
1289
|
from bottle import Bottle
import pymongo
load = Bottle()
conn = pymongo.MongoReplicaSetClient(
'example01.com, example02.com',
replicaSet='rs1',
)
db = conn.reports
@load.get('/<server>')
def get_loaddata(server):
cpu_user = list()
cpu_nice = list()
cpu_system = list()
cpu_idle = list()
cpu_irq = list()
disk_root_free = list()
phymem_free = list()
data_cursor = list()
if server == 'example02':
data_cursor = db.example02.find()
elif server == 'example01':
data_cursor = db.example01.find()
for data in data_cursor:
date = '%s' % data['date']
cpu_user.append([date, data['cpu']['user']])
cpu_nice.append([date, data['cpu']['nice']])
cpu_system.append([date, data['cpu']['system']])
cpu_idle.append([date, data['cpu']['idle']])
cpu_irq.append([date, data['cpu']['irq']])
disk_root_free.append([date, data['disk_root']])
phymem_free.append([date, data['phymem']])
return {
'cpu_user': cpu_user,
'cpu_irq': cpu_irq,
'cpu_system': cpu_system,
'cpu_nice': cpu_nice,
'cpu_idle': cpu_idle,
'disk_root_free': disk_root_free,
'phymem_free': phymem_free,
}
|
mit
| -1,367,374,833,513,415,200
| 25.854167
| 56
| 0.547711
| false
| 3.392105
| false
| false
| false
|
ichika/yoineko
|
core.py
|
1
|
3654
|
#!/usr/bin/env python3
import sys
import socket
import json
import db
class NodeBase:
"""base class of node"""
def call(self, addr, msg, wait=True):
"""do request to other node and return result"""
request = bytes(json.dumps(msg), 'utf-8')
print('request', request)
self.socket.sendto(request, addr)
if wait:
response, addr = self.socket.recvfrom(1024)
print('response', response)
return json.loads(response.decode())
class Watashi(NodeBase):
"""my node"""
host = 'localhost'
port = 2000
port_xmpp = 2012
def __init__(self, port=None, db_name='data'):
"""run my node"""
db.init(db_name)
if port:
self.port = port
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.bind((self.host, self.port))
for node in db.Node.select():
print('exist', node, node.addr, node.port)
self.call(node.addr, 'hello')
#self.listen()
self.listen_xmpp()
def listen(self):
"""listen for events"""
print('listen', self.host, self.port)
while True:
response, addr = self.socket.recvfrom(1024)
print('receive', response, addr)
self.call(addr, 'asd', wait=False)
def listen_xmpp(self):
"""listen for jabber connections"""
connection_data = b'''<?xml version='1.0'?>
<stream:stream xmlns:stream='http://etherx.jabber.org/streams'id='1'
xmlns='jabber:client' from='localhost'>'''
auth1_data = b'''<iq type='result' from='localhost' id='auth_1'>
<query xmlns='jabber:iq:auth'>
<username/>
<password/>
<resource/>
</query>
</iq>'''
auth2_data = b'''<iq type='result' from='localhost' id='auth_2'/>'''
roster_data = b'''<iq id='aab2a' type='result' from='localhost'>
<query xmlns='jabber:iq:roster'>
<item jid='sabine@yak' name='sabine' subscription='both'>
<group>Family</group>
</item>
</query>
</iq>'''
list_data = b'''<iq id='aab3a' type='result'/><iq id='aab5a' type='result'/>'''
print('listen xmpp', self.host, self.port_xmpp)
self.socket_xmpp = socket.socket()
self.socket_xmpp.bind((self.host, self.port_xmpp))
self.socket_xmpp.listen(5)
connect, addr = self.socket_xmpp.accept()
print('connect xmpp', connect, addr)
# connection
data = connect.recv(1024)
print('receive', data)
connect.send(connection_data)
print('send ', connection_data)
data = connect.recv(1024)
print('receive', data)
connect.send(auth1_data)
print('send ', auth1_data)
data = connect.recv(1024)
print('receive', data)
connect.send(auth2_data)
print('send ', auth2_data)
data = connect.recv(1024)
print('receive', data)
connect.send(roster_data)
print('send ', roster_data)
data = connect.recv(1024)
print('receive', data)
connect.send(list_data)
print('send ', list_data)
data = connect.recv(1024)
print('receive', data)
data = connect.recv(1024)
print('receive', data)
class Node(NodeBase):
"""known node"""
if __name__ == '__main__':
opts = {}
if len(sys.argv) == 3:
opts['port'] = int(sys.argv[1])
opts['db_name'] = sys.argv[2]
Watashi(**opts)
|
mit
| 278,101,519,890,364,600
| 27.325581
| 87
| 0.536946
| false
| 3.687185
| false
| false
| false
|
HaraldWeber/client
|
src/fa/path.py
|
1
|
6571
|
# -------------------------------------------------------------------------------
# Copyright (c) 2012 Gael Honorez.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the GNU Public License v3.0
# which accompanies this distribution, and is available at
# http://www.gnu.org/licenses/gpl.html
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#-------------------------------------------------------------------------------
import os
import sys
from PyQt4 import QtCore
import logging
import util
logger = logging.getLogger(__name__)
__author__ = 'Thygrrr, Dragonfire'
def steamPath():
try:
import _winreg
steam_key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, "Software\\Valve\\Steam", 0, (_winreg.KEY_WOW64_64KEY + _winreg.KEY_ALL_ACCESS))
return _winreg.QueryValueEx(steam_key, "SteamPath")[0].replace("/", "\\")
except StandardError, e:
return None
def getGameFolderFA():
settings = QtCore.QSettings("ForgedAllianceForever", "FA Lobby")
settings.beginGroup("ForgedAlliance")
gameFolderFA = unicode(settings.value("app/path"))
settings.endGroup()
return fixFolderPathFA(gameFolderFA)
def setGameFolderFA(newGameFolderFA):
logger.info("Setting game path to: %s" % newGameFolderFA)
settings = QtCore.QSettings("ForgedAllianceForever", "FA Lobby")
settings.beginGroup("ForgedAlliance")
settings.setValue("app/path", newGameFolderFA)
settings.endGroup()
settings.sync()
def getGameFolderSC():
settings = QtCore.QSettings("ForgedAllianceForever", "FA Lobby")
settings.beginGroup("SupremeCommanderVanilla")
gameFolderSC = unicode(settings.value("app/path"))
settings.endGroup()
return gameFolderSC
def setGameFolderSC(newGameFolderSC):
settings = QtCore.QSettings("ForgedAllianceForever", "FA Lobby")
settings.beginGroup("SupremeCommanderVanilla")
settings.setValue("app/path", newGameFolderSC)
settings.endGroup()
settings.sync()
def fixFolderPathFA(gameFolderFA):
"""
Correct the game folder, e.g. if you selected the bin folder or exe.
"""
normPath = os.path.normpath(gameFolderFA)
notAllowed = [u'\\bin', u'\\bin\\SupremeCommander.exe']
for check in notAllowed:
if normPath.endswith(check):
newPath = normPath[:-len(check)]
# check if the new folder is valid
if validatePath(newPath):
setGameFolderFA(newPath)
return newPath
return gameFolderFA
def writeFAPathLua():
"""
Writes a small lua file to disk that helps the new SupComDataPath.lua find the actual install of the game
"""
name = os.path.join(util.APPDATA_DIR, u"fa_path.lua")
code = u"fa_path = '" + getGameFolderFA().replace(u"\\", u"\\\\") + u"'\n"
if getGameFolderSC():
code = code + u"sc_path = '" + getGameFolderSC().replace(u"\\", u"\\\\") + u"'\n"
gamepath_sc = util.settings.value("SupremeCommander/app/path", type=str)
if gamepath_sc:
code = code + u"sc_path = '" + gamepath_sc.replace(u"\\", u"\\\\") + u"'\n"
with open(name, "w+") as lua:
lua.write(code.encode("utf-8"))
lua.flush()
os.fsync(lua.fileno()) # Ensuring the file is absolutely, positively on disk.
def typicalForgedAlliancePaths():
"""
Returns a list of the most probable paths where Supreme Commander: Forged Alliance might be installed
"""
pathlist = [
getGameFolderFA(),
#Retail path
os.path.expandvars("%ProgramFiles%\\THQ\\Gas Powered Games\\Supreme Commander - Forged Alliance"),
#Direct2Drive Paths
#... allegedly identical to impulse paths - need to confirm this
#Impulse/GameStop Paths - might need confirmation yet
os.path.expandvars("%ProgramFiles%\\Supreme Commander - Forged Alliance"),
#Guessed Steam path
os.path.expandvars("%ProgramFiles%\\Steam\\steamapps\\common\\supreme commander forged alliance")
]
#Registry Steam path
steam_path = steamPath()
if steam_path:
pathlist.append(os.path.join(steam_path, "SteamApps", "common", "Supreme Commander Forged Alliance"))
return filter(validatePath, pathlist)
def typicalSupComPaths():
"""
Returns a list of the most probable paths where Supreme Commander might be installed
"""
pathlist = [
getGameFolderSC(),
#Retail path
os.path.expandvars("%ProgramFiles%\\THQ\\Gas Powered Games\\Supreme Commander"),
#Direct2Drive Paths
#... allegedly identical to impulse paths - need to confirm this
#Impulse/GameStop Paths - might need confirmation yet
os.path.expandvars("%ProgramFiles%\\Supreme Commander"),
#Guessed Steam path
os.path.expandvars("%ProgramFiles%\\Steam\\steamapps\\common\\supreme commander")
]
#Registry Steam path
steam_path = steamPath()
if steam_path:
pathlist.append(os.path.join(steam_path, "SteamApps", "common", "Supreme Commander"))
return filter(validatePath, pathlist)
def validatePath(path):
try:
# Supcom only supports Ascii Paths
if not path.decode("ascii"): return False
#We check whether the base path and a gamedata/lua.scd file exists. This is a mildly naive check, but should suffice
if not os.path.isdir(path): return False
if not os.path.isfile(os.path.join(path, r'gamedata', r'lua.scd')): return False
#Reject or fix paths that end with a slash.
#LATER: this can have all sorts of intelligent logic added
#Suggested: Check if the files are actually the right ones, if not, tell the user what's wrong with them.
if path.endswith("/"): return False
if path.endswith("\\"): return False
return True
except:
_, value, _ = sys.exc_info()
logger.error(u"Path validation failed: " + unicode(value))
return False
def autoDetectPath():
for path in typicalForgedAlliancePaths():
if validatePath(path):
return path
return None
|
gpl-3.0
| 1,149,006,220,164,790,800
| 34.139037
| 143
| 0.656521
| false
| 3.714528
| false
| false
| false
|
russellb/nova
|
nova/vnc/xvp_proxy.py
|
1
|
6374
|
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Eventlet WSGI Services to proxy VNC for XCP protocol."""
import socket
import webob
import eventlet
import eventlet.green
import eventlet.greenio
import eventlet.wsgi
from nova import context
from nova import flags
from nova import log as logging
from nova.openstack.common import cfg
from nova import rpc
from nova import version
from nova import wsgi
LOG = logging.getLogger(__name__)
xvp_proxy_opts = [
cfg.IntOpt('xvpvncproxy_port',
default=6081,
help='Port that the XCP VNC proxy should bind to'),
cfg.StrOpt('xvpvncproxy_host',
default='0.0.0.0',
help='Address that the XCP VNC proxy should bind to'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(xvp_proxy_opts)
flags.DECLARE('consoleauth_topic', 'nova.consoleauth')
class XCPVNCProxy(object):
"""Class to use the xvp auth protocol to proxy instance vnc consoles."""
def one_way_proxy(self, source, dest):
"""Proxy tcp connection from source to dest."""
while True:
try:
d = source.recv(32384)
except Exception as e:
d = None
# If recv fails, send a write shutdown the other direction
if d is None or len(d) == 0:
dest.shutdown(socket.SHUT_WR)
break
# If send fails, terminate proxy in both directions
try:
# sendall raises an exception on write error, unlike send
dest.sendall(d)
except Exception as e:
source.close()
dest.close()
break
def handshake(self, req, connect_info, sockets):
"""Execute hypervisor-specific vnc auth handshaking (if needed)."""
host = connect_info['host']
port = int(connect_info['port'])
server = eventlet.connect((host, port))
# Handshake as necessary
if connect_info.get('internal_access_path'):
server.sendall("CONNECT %s HTTP/1.1\r\n\r\n" %
connect_info['internal_access_path'])
data = ""
while True:
b = server.recv(1)
if b:
data += b
if data.find("\r\n\r\n") != -1:
if not data.split("\r\n")[0].find("200"):
LOG.audit(_("Error in handshake: %s"), data)
return
break
if not b or len(data) > 4096:
LOG.audit(_("Error in handshake: %s"), data)
return
client = req.environ['eventlet.input'].get_socket()
client.sendall("HTTP/1.1 200 OK\r\n\r\n")
socketsserver = None
sockets['client'] = client
sockets['server'] = server
def proxy_connection(self, req, connect_info, start_response):
"""Spawn bi-directional vnc proxy."""
sockets = {}
t0 = eventlet.spawn(self.handshake, req, connect_info, sockets)
t0.wait()
if not sockets.get('client') or not sockets.get('server'):
LOG.audit(_("Invalid request: %s"), req)
start_response('400 Invalid Request',
[('content-type', 'text/html')])
return "Invalid Request"
client = sockets['client']
server = sockets['server']
t1 = eventlet.spawn(self.one_way_proxy, client, server)
t2 = eventlet.spawn(self.one_way_proxy, server, client)
t1.wait()
t2.wait()
# Make sure our sockets are closed
server.close()
client.close()
def __call__(self, environ, start_response):
try:
req = webob.Request(environ)
LOG.audit(_("Request: %s"), req)
token = req.params.get('token')
if not token:
LOG.audit(_("Request made with missing token: %s"), req)
start_response('400 Invalid Request',
[('content-type', 'text/html')])
return "Invalid Request"
ctxt = context.get_admin_context()
connect_info = rpc.call(ctxt, FLAGS.consoleauth_topic,
{'method': 'check_token',
'args': {'token': token}})
if not connect_info:
LOG.audit(_("Request made with invalid token: %s"), req)
start_response('401 Not Authorized',
[('content-type', 'text/html')])
return "Not Authorized"
return self.proxy_connection(req, connect_info, start_response)
except Exception as e:
LOG.audit(_("Unexpected error: %s"), e)
class SafeHttpProtocol(eventlet.wsgi.HttpProtocol):
"""HttpProtocol wrapper to suppress IOErrors.
The proxy code above always shuts down client connections, so we catch
the IOError that raises when the SocketServer tries to flush the
connection.
"""
def finish(self):
try:
eventlet.green.BaseHTTPServer.BaseHTTPRequestHandler.finish(self)
except IOError:
pass
eventlet.greenio.shutdown_safe(self.connection)
self.connection.close()
def get_wsgi_server():
LOG.audit(_("Starting nova-xvpvncproxy node (version %s)"),
version.version_string_with_vcs())
return wsgi.Server("XCP VNC Proxy",
XCPVNCProxy(),
protocol=SafeHttpProtocol,
host=FLAGS.xvpvncproxy_host,
port=FLAGS.xvpvncproxy_port)
|
apache-2.0
| 2,121,944,898,252,933,000
| 33.085561
| 77
| 0.564167
| false
| 4.330163
| false
| false
| false
|
peragro/peragro-index
|
damn_index/cli.py
|
1
|
3711
|
from __future__ import absolute_import
from __future__ import print_function
import sys
import argparse
import json
import copy
from elasticsearch import Elasticsearch
from damn_at.utilities import unique_asset_id_reference_from_fields
'''
pt a ../peragro-test-files/mesh/blender/cube1.blend -f json-pretty\
| pt index elastic\
| pt index stats
'''
def index(asset):
es = Elasticsearch()
ret = es.index(index='damn', doc_type='asset',
id=asset['id'], body=asset)
print(ret)
def create_argparse(parser, subparsers):
subparse = subparsers.add_parser(
"index", # aliases=("i",),
help="Anything to do with indexing",
)
subsubparsers = subparse.add_subparsers(
title='subcommands',
description='valid subcommands',
help='additional help',
)
create_argparse_elastic(subparse, subsubparsers)
create_argparse_generate_search(subparse, subsubparsers)
create_argparse_stats(subparse, subsubparsers)
def create_argparse_elastic(parser, subparsers):
subparse = subparsers.add_parser(
"elastic", # aliases=("transform",),
help="index the given file description to elasticsearch",
)
subparse.add_argument(
'infile', nargs='?',
type=argparse.FileType('r'),
default=sys.stdin)
def transform(args):
data = args.infile.read()
data = json.loads(data)
file_hash = data["file"]["hash"]
assets = []
file_copy = copy.deepcopy(data)
file_copy['metadata'] = file_copy.get('metadata', {})
del file_copy['assets']
del file_copy['metadata']
for asset in data['assets']:
subname = asset['asset']['subname']
mimetype = asset['asset']['mimetype']
id = unique_asset_id_reference_from_fields(file_hash, subname, mimetype)
a = {'id': id, 'file': file_copy}
asset['metadata'] = asset.get('metadata', {})
a.update(asset)
assets.append(a)
for asset in assets:
index(asset)
subparse.set_defaults(
func=lambda args:
transform(args),
)
def create_argparse_generate_search(parser, subparsers):
subparse = subparsers.add_parser(
"generate-search", # aliases=("transform",),
help="Generate a faceted search",
)
def search(args):
from damn_at import Analyzer
from damn_at.utilities import get_metadatavalue_fieldname
m = Analyzer().get_supported_metadata()
ret = {'aggs': {},
'query': {'match_all': {}},
'from': 3, 'size': 1, }
for mime, metas in list(m.items()):
for meta, type in metas:
field_name = get_metadatavalue_fieldname(type)
ret['aggs'][meta] = {'terms': {'field': 'metadata.'+meta+'.'+field_name}}
print(json.dumps(ret, indent=2))
subparse.set_defaults(
func=lambda args:
search(args),
)
def create_argparse_stats(parser, subparsers):
subparse = subparsers.add_parser(
"stats", # aliases=("transform",),
help="Generate stats from an ES bulk upload",
)
subparse.add_argument(
'infile', nargs='?',
type=argparse.FileType('r'),
default=sys.stdin)
def stats(args):
data = args.infile.read()
data = json.loads(data)
print('Uploaded: {0:>6}'.format(len(data['items'])))
print('Errors: {0:>6}'.format(data['errors']))
print('took: {:>6} ms'.format(data['took']))
if data['errors']:
sys.exit(1)
subparse.set_defaults(
func=lambda args:
stats(args),
)
|
bsd-3-clause
| -1,764,579,313,117,547,800
| 28.927419
| 89
| 0.586904
| false
| 3.845596
| false
| false
| false
|
Daniel-Brosnan-Blazquez/DIT-100
|
lib/bitOps.py
|
1
|
1763
|
# -*- coding: utf-8 -*-
def CheckBit(value, position):
mask = 1 << position
return value & mask == mask
def SetBit(value, position):
return value | (1 << position)
def ClearBit(value, position):
return value & ~(1 << position)
def FlipBit(value, position):
return value ^ (1 << position)
def CheckBits(value, mask):
return value & mask == mask
def SetBits(value, mask):
return value | mask
def ClearBits(value, mask):
return value & (~mask)
def FlipBits(value, mask):
return value ^ mask
def SetValueUnderMask(valueToSetUnderMask, currentValue, mask):
currentValueCleared = ClearBits(currentValue, mask) # clear bits under mask
i = 0
while (mask % 2 == 0 and mask != 0x00):
mask = mask >> 1
i += 1
return SetBits(valueToSetUnderMask << i, currentValueCleared)
def GetValueUnderMask(currentValue, mask):
currentValueCleared = ClearBits(currentValue, ~mask) # clear bits not under mask
i = 0
while (mask % 2 == 0 and mask != 0x00):
mask = mask >> 1
i += 1
return currentValueCleared >> i
def GetValueUnderMaskDictMatch(currentValue, mask, dictionary):
value = GetValueUnderMask (currentValue, mask)
# Return name that maps the current value
for key in dictionary.keys():
if dictionary[key] == value:
return key
return None
def TwosComplementToByte(value):
if value >= 0 and value <= 127:
return value
else:
return value - 256
def TwosComplementToCustom(value, signBitPosition):
if value >= 0 and value <= (1<<signBitPosition)-1:
return value
else:
return value - (2 << signBitPosition)
|
gpl-3.0
| -3,122,407,504,018,430,000
| 26.435484
| 84
| 0.617697
| false
| 3.783262
| false
| false
| false
|
ilmir-k/website-addons
|
website_sales_team/website_sales_team_models.py
|
1
|
2489
|
# -*- coding: utf-8 -*-
from openerp import SUPERUSER_ID
from openerp import fields
from openerp import models
class ProductTemplate(models.Model):
_inherit = 'product.template'
def _get_default_section_id(self):
return self.env.user.default_section_id
section_id = fields.Many2one('crm.case.section', 'Sales Team', default=_get_default_section_id)
section_member_ids = fields.Many2many('res.users', 'Sales Team members', related='section_id.member_ids')
section_public_categ_ids = fields.Many2many('product.public.category', related='section_id.public_categ_ids')
class CrmCaseSection(models.Model):
_inherit = "crm.case.section"
product_ids = fields.One2many('product.template', 'section_id', string='Products')
website_description = fields.Html('Description for the website', translate=True)
public_categ_ids = fields.Many2many('product.public.category', 'section_public_categ_rel', 'section_id', 'category_id', string='Allowed public categories', help='All child categories are also allowed automatically')
sale_description = fields.Char('Sale description', help='This text is added to email for customer')
class ResUsers(models.Model):
_inherit = 'res.users'
section_ids = fields.Many2many('crm.case.section', 'sale_member_rel', 'member_id', 'section_id', 'Sales Team')
def _get_group(self, cr, uid, context=None):
dataobj = self.pool.get('ir.model.data')
result = []
try:
dummy, group_id = dataobj.get_object_reference(cr, SUPERUSER_ID, 'base', 'group_user')
result.append(group_id)
# dummy,group_id = dataobj.get_object_reference(cr, SUPERUSER_ID, 'base', 'group_partner_manager')
# result.append(group_id)
except ValueError:
# If these groups does not exists anymore
pass
return result
_defaults = {
'groups_id': _get_group,
}
class ProductPublicCategory(models.Model):
_inherit = "product.public.category"
section_ids = fields.Many2many('crm.case.section', 'section_public_categ_rel', 'category_id', 'section_id', string='Sales teams')
class SaleOrder(models.Model):
_inherit = 'sale.order'
parent_id = fields.Many2one('sale.order', 'Parent')
child_ids = fields.One2many('sale.order', 'parent_id', 'Child orders')
_track = {
'state': {'website_sales_team.mt_order_created': lambda self, cr, uid, obj, ctx=None: obj.state in ['draft']}
}
|
lgpl-3.0
| 750,377,855,924,190,700
| 37.890625
| 219
| 0.672961
| false
| 3.591631
| false
| false
| false
|
R2pChyou/starcheat
|
starcheat/gui/ship.py
|
1
|
2414
|
"""
Qt ship management dialog
"""
import logging
from gui.common import ListEdit
import qt_ship
from PyQt5.QtWidgets import QDialog
class Ship():
def __init__(self, main_window):
self.dialog = QDialog(main_window.window)
self.ui = qt_ship.Ui_Dialog()
self.ui.setupUi(self.dialog)
self.main_window = main_window
self.assets = main_window.assets
self.player = main_window.player
self.ship_upgrades = self.player.get_ship_upgrades()
self.ai = self.player.get_ai()
self.ui.capabilities_button.clicked.connect(self.edit_capabilities)
self.ui.available_missions_button.clicked.connect(self.edit_available)
self.ui.completed_missions_button.clicked.connect(self.edit_completed)
self.update()
def update(self):
self.ui.crew_size.setValue(self.ship_upgrades["crewSize"])
self.ui.upgrade_level.setValue(self.ship_upgrades["shipLevel"])
self.ui.max_fuel.setValue(self.ship_upgrades["maxFuel"])
self.ui.capabilities.setText(", ".join(self.ship_upgrades["capabilities"]))
self.ui.available_missions.setText(", ".join(self.ai["availableMissions"]))
self.ui.completed_missions.setText(", ".join(self.ai["completedMissions"]))
def edit_capabilities(self):
edit = ListEdit(self.dialog, self.ship_upgrades["capabilities"])
ok = edit.dialog.exec()
if ok == 1:
self.ship_upgrades["capabilities"] = edit.get_list()
self.update()
def edit_available(self):
edit = ListEdit(self.dialog, self.ai["availableMissions"])
ok = edit.dialog.exec()
if ok == 1:
self.ai["availableMissions"] = edit.get_list()
self.update()
def edit_completed(self):
edit = ListEdit(self.dialog, self.ai["completedMissions"])
ok = edit.dialog.exec()
if ok == 1:
self.ai["completedMissions"] = edit.get_list()
self.update()
def write_ship(self):
self.ship_upgrades["crewSize"] = self.ui.crew_size.value()
self.ship_upgrades["maxFuel"] = self.ui.max_fuel.value()
self.ship_upgrades["shipLevel"] = self.ui.upgrade_level.value()
self.player.set_ship_upgrades(self.ship_upgrades)
self.player.set_ai(self.ai)
logging.debug("Wrote ship/ai")
self.main_window.window.setWindowModified(True)
|
mit
| -6,686,477,986,343,633,000
| 33.985507
| 83
| 0.637531
| false
| 3.534407
| false
| false
| false
|
genesi/cardapio
|
src/plugins/tracker_fts.py
|
1
|
4314
|
#
# Copyright (C) 2010 Cardapio Team (tvst@hotmail.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
class CardapioPlugin(CardapioPluginInterface):
author = _('Cardapio Team')
name = _('Full-text file search')
description = _('Search <b>inside</b> local files and folders indexed with Tracker')
url = ''
help_text = ''
version = '1.43'
plugin_api_version = 1.40
search_delay_type = 'local'
default_keyword = 'ftstracker'
category_name = _('Results within files')
category_icon = 'system-search'
icon = 'system-search'
category_tooltip = _('Results found inside the files in your computer')
hide_from_sidebar = True
def __init__(self, cardapio_proxy, category):
self.c = cardapio_proxy
try:
from os.path import split
from urllib2 import quote, splittype
except Exception, exception:
self.c.write_to_log(self, 'Could not import certain modules', is_error = True)
self.c.write_to_log(self, exception, is_error = True)
self.loaded = False
return
self.split = split
self.quote = quote
self.splittype = splittype
self.tracker = None
bus = dbus.SessionBus()
if bus.request_name('org.freedesktop.Tracker1') == dbus.bus.REQUEST_NAME_REPLY_IN_QUEUE:
tracker_object = bus.get_object('org.freedesktop.Tracker1', '/org/freedesktop/Tracker1/Resources')
self.tracker = dbus.Interface(tracker_object, 'org.freedesktop.Tracker1.Resources')
else:
self.c.write_to_log(self, 'Could not connect to Tracker', is_error = True)
self.loaded = False
bus.release_name('org.freedesktop.Tracker1')
return
if (which("tracker-needle") is not None):
self.action_command = r"tracker-needle '%s'"
else:
self.action_command = r"tracker-search-tool '%s'"
self.action = {
'name' : _('Show additional results'),
'tooltip' : _('Show additional search results in the Tracker search tool'),
'icon name' : 'system-search',
'type' : 'callback',
'command' : self.more_results_action,
'context menu' : None,
}
self.loaded = True
def search(self, text, result_limit):
self.current_query = text
text = self.quote(text).lower()
self.tracker.SparqlQuery(
"""
SELECT ?uri ?mime
WHERE {
?item a nie:InformationElement;
fts:match "%s";
nie:url ?uri;
nie:mimeType ?mime;
tracker:available true.
}
LIMIT %d
"""
% (text, result_limit),
dbus_interface='org.freedesktop.Tracker1.Resources',
reply_handler=self.prepare_and_handle_search_result,
error_handler=self.handle_search_error
)
# not using: ORDER BY DESC(fts:rank(?item))
def handle_search_error(self, error):
self.c.handle_search_error(self, error)
def prepare_and_handle_search_result(self, results):
formatted_results = []
for result in results:
dummy, canonical_path = self.splittype(result[0])
parent_name, child_name = self.split(canonical_path)
icon_name = result[1]
formatted_result = {
'name' : child_name,
'icon name' : icon_name,
'tooltip' : result[0],
'command' : canonical_path,
'type' : 'xdg',
'context menu' : None,
}
formatted_results.append(formatted_result)
if results:
formatted_results.append(self.action)
self.c.handle_search_result(self, formatted_results, self.current_query)
def more_results_action(self, text):
try:
subprocess.Popen(self.action_command % text, shell = True)
except OSError, e:
self.c.write_to_log(self, 'Error launching plugin action.', is_error = True)
self.c.write_to_log(self, e, is_error = True)
|
gpl-3.0
| 8,854,574,007,254,268,000
| 27.012987
| 101
| 0.659017
| false
| 3.214605
| false
| false
| false
|
LaiTash/OEUO-python
|
profiles/default/scripts/journal_event.py
|
1
|
1099
|
from uo.serpent.script import ScriptBase
from uo import manager
import gevent
import re
class BindObj(object):
def __init__(self, regexp, callback):
self.regexp = re.compile(regexp)
self.callback = callback
class JournalScannerScript(ScriptBase):
script_name = "Journal scanner"
def load(self):
"""
:type manager manager
"""
global UO
UO = manager.UO
self.binds = set()
self.old_ref = 0
def bind(self, regexp, callback):
bobj = BindObj(regexp, callback)
self.binds.add(bobj)
def scan(self):
newRef, nCont = UO.ScanJournal(self.old_ref)
for line_i in xrange(nCont):
line, col = UO.GetJournal(line_i)
for bind in self.binds:
if bind.regexp.match(line):
bind.callback(line)
self.old_ref, nCont = UO.ScanJournal(newRef)
def main(self):
self.old_ref, nCont = UO.ScanJournal(self.old_ref)
while True:
self.scan()
gevent.sleep(.1)
|
gpl-3.0
| 8,903,835,215,289,260,000
| 23.422222
| 58
| 0.5596
| false
| 3.712838
| false
| false
| false
|
madarivi/PianoSimulation
|
Parameters/parametersAd5.py
|
1
|
1862
|
import numpy as np
# in this file the parameters used in the simulation are set
# string parameters
f1 = 934.60 # fundamental string frequency
l = 0.200 # string length
d = 0.898e-3
rhoV = 7850.
A = np.pi * (d/2.)**2
m_s = A * l * rhoV # total string mass
print m_s
b1 = 1.1 # air damping coefficient
b2 = 2.7e-4 # string internal friction coefficient
rho = m_s/l # linear string density
t_e = rho * 4. * l**2 * f1**2
print t_e
c = (t_e/rho)**.5 # wave velocity
E = 2.02e11
S = np.pi * (d/2.)**2
I = np.pi * d**4 / 64.
epsilon = (I/A) * (E*S) / (t_e*l**2)
print epsilon
kappa = epsilon*(c**2)*(l**2) # string stiffness coefficient
# sampling parameters
t = 3. # simulation time
f_s = 32*44.1e3 # sampling frequency
m = 140 # number of string segments
dx = l/m # spatial grid distance
dt = 1/f_s # time step
n_t = int(t/dt) # number of time steps
labda = c*dt/dx # cfl number
n = m+1 # number of gridpoints
# hammer parameters
m_h = 7.33e-3 # hammer mass
p = 2.793 # hammer felt stiffness exponent
b_h = 1.e-4 # fluid damping coefficient
k = 8.600e10 # hammer felt stiffness
a = 0.12 # relative striking position
v_h = 5. # initial hammer velocity
x0 = a*l # hammer impact point
n0 = int(a*n) # hammer impact index
# boundary parameters
zeta_l = 1.e20 # left end normalized impedance
zeta_b = 1000. # bridge normalized impedance
x = np.linspace(0, l, n) # spatial grid points
g = np.cos(50*np.pi*(x-x0))*(np.abs(x-x0) < .005) # hammer impact window
print "stable?", labda < 0.8, "=> labda:", labda
print f1
print c / (2*l)
|
mit
| 6,467,938,529,171,412,000
| 33.481481
| 75
| 0.542427
| false
| 2.9792
| false
| true
| false
|
AllYarnsAreBeautiful/knittingpattern
|
knittingpattern/__init__.py
|
1
|
4042
|
"""The knitting pattern module.
Load and convert knitting patterns using the convenience functions listed
below.
"""
# there should be no imports
#: the version of the knitting pattern library
__version__ = '0.1.19'
#: an empty knitting pattern set as specification
EMPTY_KNITTING_PATTERN_SET = {"version": "0.1", "type": "knitting pattern",
"patterns": []}
def load_from():
"""Create a loader to load knitting patterns with.
:return: the loader to load objects with
:rtype: knittingpattern.Loader.JSONLoader
Example:
.. code:: python
import knittingpattern, webbrowser
k = knittingpattern.load_from().example("Cafe.json")
webbrowser.open(k.to_svg(25).temporary_path(".svg"))
"""
from .ParsingSpecification import new_knitting_pattern_set_loader
return new_knitting_pattern_set_loader()
def load_from_object(object_):
"""Load a knitting pattern from an object.
:rtype: knittingpattern.KnittingPatternSet.KnittingPatternSet
"""
return load_from().object(object_)
def load_from_string(string):
"""Load a knitting pattern from a string.
:rtype: knittingpattern.KnittingPatternSet.KnittingPatternSet
"""
return load_from().string(string)
def load_from_file(file):
"""Load a knitting pattern from a file-like object.
:rtype: knittingpattern.KnittingPatternSet.KnittingPatternSet
"""
return load_from().file(file)
def load_from_path(path):
"""Load a knitting pattern from a file behind located at `path`.
:rtype: knittingpattern.KnittingPatternSet.KnittingPatternSet
"""
return load_from().path(path)
def load_from_url(url):
"""Load a knitting pattern from a url.
:rtype: knittingpattern.KnittingPatternSet.KnittingPatternSet
"""
return load_from().url(url)
def load_from_relative_file(module, path_relative_to):
"""Load a knitting pattern from a path relative to a module.
:param str module: can be a module's file, a module's name or
a module's path.
:param str path_relative_to: is the path relative to the modules location.
The result is loaded from this.
:rtype: knittingpattern.KnittingPatternSet.KnittingPatternSet
"""
return load_from().relative_file(module, path_relative_to)
def convert_from_image(colors=("white", "black")):
"""Convert and image to a knitting pattern.
:return: a loader
:rtype: knittingpattern.Loader.PathLoader
:param tuple colors: the colors to convert to
.. code:: python
convert_from_image().path("pattern.png").path("pattern.json")
convert_from_image().path("pattern.png").knitting_pattern()
.. seealso:: :mod:`knittingoattern.convert.image_to_knitting_pattern`
"""
from .convert.image_to_knittingpattern import \
convert_image_to_knitting_pattern
return convert_image_to_knitting_pattern(colors=colors)
def new_knitting_pattern(id_, name=None):
"""Create a new knitting pattern.
:return: a new empty knitting pattern.
:param id_: the id of the knitting pattern
:param name: the name of the knitting pattern or :obj:`None` if the
:paramref:`id_` should be used
:rtype: knittingpattern.KnittingPattern.KnittingPattern
.. seealso:: :meth:`KnittingPatternSet.add_new_pattern()
<knittingpattern.KnittingPatternSet.KnittingPatternSet.add_new_pattern>`
"""
knitting_pattern_set = new_knitting_pattern_set()
return knitting_pattern_set.add_new_pattern(id_, name)
def new_knitting_pattern_set():
"""Create a new, empty knitting pattern set.
:rtype: knittingpattern.KnittingPatternSet.KnittingPatternSet
:return: a new, empty knitting pattern set
"""
return load_from_object(EMPTY_KNITTING_PATTERN_SET)
__all__ = ["load_from_object", "load_from_string", "load_from_file",
"load_from_path", "load_from_url", "load_from_relative_file",
"convert_from_image", "load_from", "new_knitting_pattern",
"new_knitting_pattern_set"]
|
lgpl-3.0
| 2,694,559,779,322,161,700
| 29.164179
| 78
| 0.690252
| false
| 3.454701
| false
| false
| false
|
joshuahellier/PhDStuff
|
codes/kmc/batchJobs/rateCaculation/mainStuff/tempSteadyFlow.py
|
1
|
13591
|
import sys
import os
import math
import shutil as sh
resultDir = os.environ.get('RESULTS')
tempDir = os.environ.get('TMPDIR')
if resultDir == None or tempDir == None :
print ("WARNING! $RESULTS not set! Attempt to write results will fail!\n")
# Expecting input botConc, topConc, rateConstFull, sysSize, analInterval, numStepsEquilib, numStepsSnapshot, numStepsAnal, numStepsReq, numPasses, timeInterval, fileCode
from KMCLib import *
from KMCLib.Backend import Backend
import numpy
from RateCalc import *
botConc = float(sys.argv[1])
topConc = float(sys.argv[2])
rateConstFull = float(sys.argv[3])
sysSize = int(sys.argv[4])
analInterval = int(sys.argv[5])
numStepsEquilib = int(sys.argv[6])
numStepsSnapshot = int(sys.argv[7])
numStepsAnal = int(sys.argv[8])
numStepsReq = int(sys.argv[9])
numPasses = int(sys.argv[10])
timeInterval = float(sys.argv[11])
fileInfo = sys.argv[12]
tempFolderName = sys.argv[13]
resultsPlace = resultDir+"/"+fileInfo+"/"
tempPlace = tempDir+"/"+tempFolderName+"/"
#tempPlace = "/tmp/"+tempFolderName+"/"
if not os.path.exists(resultsPlace):
os.makedirs(resultsPlace)
if not os.path.exists(tempPlace):
os.makedirs(tempPlace)
with open(resultsPlace+'settings', 'w') as f:
f.write('BotConcentration = ' + str(botConc) +'\n')
f.write('TopConcentration = ' + str(topConc) +'\n')
f.write('FullRate = ' + str(rateConstFull) +'\n')
f.write('SysSize = ' + str(sysSize) +'\n')
f.write('TimeInterval = ' + str(timeInterval) +'\n')
f.write('AnalInterval = ' +str(analInterval) + '\n')
f.write('NumStepsEquilib = '+str(numStepsEquilib) +'\n')
f.write('NumStepsSnapshot = '+str(numStepsSnapshot)+'\n')
f.write('NumStepsAnal = '+str(numStepsAnal) +'\n')
"""I've put this in the file to make command line input easier"""
# Load the configuration and interactions.
# We're in 1d, so everything's a bit trivial
cell_vectors = [[1.0,0.0,0.0],
[0.0,1.0,0.0],
[0.0,0.0,1.0]]
# Only bothering with one set
basis_points = [[0.0, 0.0, 0.0]]
unit_cell = KMCUnitCell(cell_vectors=cell_vectors,
basis_points=basis_points)
# Define the lattice.
xRep = 1
yRep = 1
zRep = sysSize
numPoints = xRep*(zRep+4)*yRep
lattice = KMCLattice(unit_cell=unit_cell,
repetitions=(xRep,yRep,zRep+4),
periodic=(False, False, True))
# Generate the initial types. There's double-layered section of "To" at the top and "Bo" at the bottom
avConc = 0.5*(botConc+topConc)
types = ["V"]*numPoints
types[0] = "BoV"
types[1] = "BoV"
types[-2] = "ToV"
types[-1] = "ToV"
for i in range(int(zRep*avConc)):
# find a site which is not yet occupied by a "O" type.
pos = int(numpy.random.rand()*zRep+2.0)
while (types[pos] != "V"):
pos = int(numpy.random.rand()*zRep+2.0)
# Set the type.
types[pos] = "O"
"""
for i in range(2, numPoints-2):
if i < numPoints/2:
types[i] = "O"
else:
types[i] = "V"
"""
# Setup the configuration.
configuration = KMCConfiguration(lattice=lattice,
types=types,
possible_types=["O","V","ToV","BoV", "ToO", "BoO"])
# Rates.
rateConstEmpty = 1.0
topSpawn = math.sqrt(topConc/(1.0-topConc))
botSpawn = math.sqrt(botConc/(1.0-botConc))
topDespawn = 1.0/topSpawn
botDespawn = 1.0/botSpawn
#
##
###
"""I've put the processes in here to make it easier to adjust them via command line arguments."""
# Fill the list of processes.
processes = []
# Only on the first set of basis_points for O/V
basis_sites = [0]
# Bulk processes
# Up, empty.
#0
elements_before = ["O", "V"]
elements_after = ["V", "O"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Will customise
# Down, empty.
#1
elements_before = ["O", "V"]
elements_after = ["V", "O"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 0.0, -1.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Will customise
# Now for Oxygen annihilation at the top boundary
#2
elements_before = ["O", "ToV"]
elements_after = ["V", "ToV"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Will customise the rate constant
# Oxygen creation at the top boundary
#3
elements_before = ["ToO", "V"]
elements_after = ["ToO", "O"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 0.0, -1.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Now for Oxygen annihilation at the bottom boundary
#4
elements_before = ["O", "BoV"]
elements_after = ["V", "BoV"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 0.0, -1.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Obviously the rate constant will be customised
# Oxygen creation at the bottom boundary
#5
elements_before = ["BoO", "V"]
elements_after = ["BoO", "O"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Boundary Oxygen creation at the bottom boundary
#6
elements_before = ["BoV"]
elements_after = ["BoO"]
coordinates = [[0.0, 0.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Boundary Oxygen annihilation at the bottom boundary
#7
elements_before = ["BoO"]
elements_after = ["BoV"]
coordinates = [[0.0, 0.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Boundary Oxygen creation at the top boundary
#8
elements_before = ["ToV"]
elements_after = ["ToO"]
coordinates = [[0.0, 0.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Boundary Oxygen annihilation at the bottom boundary
#9
elements_before = ["ToO"]
elements_after = ["ToV"]
coordinates = [[0.0, 0.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Create the interactions object.
interactions = KMCInteractions(processes, implicit_wildcards=True)
# Define the custom rates calculator, using the lol model as a template
class lolModelRates(KMCRateCalculatorPlugin):
# Class for defining the custom rates function for the KMCLib paper.
def rate(self, geometry, elements_before, elements_after, rate_constant, process_number, global_coordinate):
if process_number == 0:
if len([e for e in elements_before if e == "O"]) + len([e for e in elements_before if e == "ToO"]) + len([e for e in elements_before if e == "BoO"]) == 2:
return rateConstFull
else:
return rateConstEmpty
if process_number == 1:
if len([e for e in elements_before if e == "O"]) + len([e for e in elements_before if e == "ToO"]) + len([e for e in elements_before if e == "BoO"]) == 2:
return rateConstFull
else:
return rateConstEmpty
if process_number == 2:
if len([e for e in elements_before if e == "O"]) + len([e for e in elements_before if e == "ToO"]) + len([e for e in elements_before if e == "BoO"]) == 2:
return rateConstFull
else:
return rateConstEmpty
if process_number == 4:
if len([e for e in elements_before if e == "O"]) + len([e for e in elements_before if e == "ToO"]) + len([e for e in elements_before if e == "BoO"]) == 2:
return rateConstFull
else:
return rateConstEmpty
if process_number == 3:
if len([e for e in elements_before if e == "O"]) + len([e for e in elements_before if e == "ToO"]) + len([e for e in elements_before if e == "BoO"]) == 2:
return rateConstFull
else:
return rateConstEmpty
if process_number == 5:
if len([e for e in elements_before if e == "O"]) + len([e for e in elements_before if e == "ToO"]) + len([e for e in elements_before if e == "BoO"]) == 2:
return rateConstFull
else:
return rateConstEmpty
if process_number == 6:
return botSpawn
if process_number == 7:
return botDespawn
if process_number == 8:
return topSpawn
if process_number == 9:
return topDespawn
def cutoff(self):
# Overloaded base class API function
return 1.0
interactions.setRateCalculator(rate_calculator=lolModelRates)
"""End of processes"""
###
##
#
# Create the model.
model = KMCLatticeModel(configuration, interactions)
compositionTracker = Composition(time_interval=timeInterval)
# Define the parameters; not entirely sure if these are sensible or not...
control_parameters_equilib = KMCControlParameters(number_of_steps=numStepsEquilib, analysis_interval=numStepsEquilib/100,
dump_interval=numStepsEquilib/100)
control_parameters_req = KMCControlParameters(number_of_steps=numStepsReq, analysis_interval=numStepsReq/100,
dump_interval=numStepsReq/100)
control_parameters_anal = KMCControlParameters(number_of_steps=numStepsAnal, analysis_interval=1,
dump_interval=numStepsAnal/100)
# Run the simulation - save trajectory to resultsPlace, which should by now exist
model.run(control_parameters_equilib, trajectory_filename=(tempPlace+"equilibTraj.tr"))
with open(tempPlace+"inBot.dat", 'w') as f:
pass
with open(tempPlace+"outBot.dat", 'w') as f:
pass
with open(tempPlace+"inTop.dat", 'w') as f:
pass
with open(tempPlace+"outTop.dat", 'w') as f:
pass
if not os.path.exists(tempPlace+"composition"):
os.makedirs(tempPlace+"composition")
for passNum in range(0, numPasses):
processStatsOxInBot = RateCalc(processes=[5])
processStatsOxOutBot = RateCalc(processes=[4])
processStatsOxInTop = RateCalc(processes=[3])
processStatsOxOutTop = RateCalc(processes=[2])
compositionTracker = Composition(time_interval=timeInterval)
model.run(control_parameters_req, trajectory_filename=(tempPlace+"mainTraj.tr"))
model.run(control_parameters_anal, trajectory_filename=(tempPlace+"mainTraj.tr"), analysis=[processStatsOxInBot, processStatsOxOutBot, processStatsOxInTop, processStatsOxOutTop, compositionTracker])
with open(tempPlace+"inBot.dat", 'a') as f:
processStatsOxInBot.printResults(f)
with open(tempPlace+"outBot.dat", 'a') as f:
processStatsOxOutBot.printResults(f)
with open(tempPlace+"inTop.dat", 'a') as f:
processStatsOxInTop.printResults(f)
with open(tempPlace+"outTop.dat", 'a') as f:
processStatsOxOutTop.printResults(f)
with open(tempPlace+"composition/composition"+str(passNum)+".dat", 'w') as f:
compositionTracker.printResults(f)
if not os.path.exists(resultsPlace+"composition"):
os.makedirs(resultsPlace+"composition")
sh.copy(tempPlace+"inBot.dat", resultsPlace+"inBot.dat")
sh.copy(tempPlace+"outBot.dat", resultsPlace+"outBot.dat")
sh.copy(tempPlace+"inTop.dat", resultsPlace+"inTop.dat")
sh.copy(tempPlace+"outTop.dat", resultsPlace+"outTop.dat")
sh.copy(tempPlace+"mainTraj.tr", resultsPlace+"mainTraj.tr")
for passNum in range(0, numPasses):
sh.copy(tempPlace+"composition/composition"+str(passNum)+".dat", resultsPlace+"composition/composition"+str(passNum)+".dat")
sh.rmtree(tempPlace)
print("Process would appear to have succesfully terminated! How very suspicious...")
|
mit
| -1,052,233,033,452,451,000
| 35.534946
| 202
| 0.610919
| false
| 3.460031
| false
| false
| false
|
wideioltd/mimejson
|
mimejson/mimetype/video_opencv.py
|
1
|
3911
|
# ############################################################################
# |W|I|D|E|I|O|L|T|D|W|I|D|E|I|O|L|T|D|W|I|D|E|I|O|L|T|D|W|I|D|E|I|O|L|T|D|
# Copyright (c) WIDE IO LTD
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the WIDE IO LTD nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
# |D|O|N|O|T|R|E|M|O|V|E|!|D|O|N|O|T|R|E|M|O|V|E|!|D|O|N|O|T|R|E|M|O|V|E|!|
# ############################################################################
import functools
import os
import cv2.cv as cv
class Serializer:
mimetype = (
"video/FMP4",
"video/DIVX"
)
@staticmethod
def can_apply(obj):
frames = None
required = ("$name$", "$fps$", "$encodage$",
"$frame_size$", "$color$", "$frames_list$")
if not isinstance(obj, dict):
return False
for check in required:
if check not in obj:
return False
frames = obj["$frames_list$"]
if frames is None or not isinstance(frames, list):
return False
for frame in frames:
if not isinstance(frame, cv.iplimage):
return False
return True
@classmethod
def serialize(cls, obj, pathdir):
fn = os.path.join(pathdir, obj["$name$"])
writer = cv.CreateVideoWriter(fn, obj["$encodage$"], obj["$fps$"],
obj["$frame_size$"], obj["$color$"])
write = functools.partial(cv.WriteFrame, writer)
map(write, obj["$frames_list$"])
return {'$path$': fn, '$length$': os.stat(fn).st_size,
'$mimetype$': obj["$mimetype$"]}
@staticmethod
def deserialize(obj, filepath):
video = cv.CaptureFromFile(obj["$path$"])
obj["$frames_list$"] = []
obj["$color$"] = 1
obj["$name$"] = os.path.basename(obj["$path$"])
obj["$fps$"] = int(cv.GetCaptureProperty(video, cv.CV_CAP_PROP_FPS))
obj["$encodage$"] = int(cv.GetCaptureProperty(video,
cv.CV_CAP_PROP_FOURCC))
f_w = int(cv.GetCaptureProperty(video, cv.CV_CAP_PROP_FRAME_WIDTH))
f_h = int(cv.GetCaptureProperty(video, cv.CV_CAP_PROP_FRAME_HEIGHT))
obj["$frame_size$"] = (f_w, f_h)
del obj["$path$"]
nu_frame = cv.GetCaptureProperty(video, cv.CV_CAP_PROP_FRAME_COUNT)
for i in range(int(nu_frame)):
frame = cv.QueryFrame(video)
obj["$frames_list$"].append(cv.CloneImage(frame))
return obj
|
bsd-3-clause
| 678,036,403,070,895,600
| 43.443182
| 78
| 0.601892
| false
| 3.689623
| false
| false
| false
|
sergiorb/askkit
|
askkit/settings.py
|
1
|
12452
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Django settings for askkit project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from django.utils.translation import ugettext_lazy as _
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
if 'SECRET_KEY' in os.environ:
SECRET_KEY = os.environ['SECRET_KEY']
else:
SECRET_KEY = 'mysecretkey'
# SECURITY WARNING: don't run with debug turned on in production!
if 'DJANGO_DEBUG' in os.environ:
DEBUG = True
TEMPLATE_DEBUG = True
else:
DEBUG = False
TEMPLATE_DEBUG = False
# SSL settings
if not DEBUG:
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
CSRF_COOKIE_SECURE = True
SECURE_SSL_REDIRECT = True
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SECURE_HSTS_SECONDS = 31536000
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_FRAME_DENY = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
ADMINS = (('Sergio', 's.romerobarra@gmail.com'),)
ALLOWED_HOSTS = ['localhost', 'askkit-dev-env.elasticbeanstalk.com', 'askkit-prod-env.elasticbeanstalk.com', 'askkit.net', 'www.askkit.net',]
# Application definition
INSTALLED_APPS = (
#'admin_tools',
#'admin_tools.theming',
#'admin_tools.menu',
#'admin_tools.dashboard',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# The Django sites framework is required
'django.contrib.sites',
'core',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.twitter',
#'allauth.socialaccount.providers.facebook',
'users',
'questions',
#'debug_toolbar',
'crispy_forms',
#'rest_framework',
#'betterforms',
'datetimewidget',
'redactor',
'imagekit',
'captcha',
'django_ses',
'storages',
'admin_honeypot',
'compressor',
'djangosecure',
'sanitizer',
)
SITE_ID = 1
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'djangosecure.middleware.SecurityMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'core.middleware.LocaleMiddleware',
'htmlmin.middleware.HtmlMinifyMiddleware',
'htmlmin.middleware.MarkRequestMiddleware',
)
ROOT_URLCONF = 'askkit.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates',],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
# `allauth` needs this from django
'django.core.context_processors.request',
# `allauth` specific context processors
'allauth.account.context_processors.account',
'allauth.socialaccount.context_processors.socialaccount',
'core.context_processors.common_timezones',
'core.context_processors.debug',
'core.context_processors.get_adsense_user',
'core.context_processors.get_adsense_main',
'core.context_processors.get_adsense_yes',
'core.context_processors.get_analytics_id',
#'core.context_processors.current_timezone',
'django.template.context_processors.i18n',
],
},
},
]
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
WSGI_APPLICATION = 'askkit.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
if 'RDS_DB_NAME' in os.environ:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': os.environ['RDS_DB_NAME'],
'USER': os.environ['RDS_USERNAME'],
'PASSWORD': os.environ['RDS_PASSWORD'],
'HOST': os.environ['RDS_HOSTNAME'],
'PORT': os.environ['RDS_PORT'],
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'mydatabase',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en'
#LANGUAGE_COOKIE_NAME = 'askkit_language'
LANGUAGES = (
('en', _('English')),
#('es', _('Spanish')),
#('it', _('Italian')),
#('fr', _('French')),
#('de', _('German')),
)
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'locale'),
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "files", "static"),
)
STATIC_URL = '/static/'
#STATIC_ROOT = os.path.join(BASE_DIR, "static")
MEDIA_URL = "/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, "files", "media")
############################################################################################
### COMPRESS CONFIG ########################################################################
############################################################################################
COMPRESS_STORAGE = 'custom_storages.StaticStorage'
COMPRESS_URL = STATIC_URL
COMPRESS_ROOT = os.path.join(BASE_DIR, "files")
############################################################################################
### AMAZON S3 STORAGES CONFIG ##############################################################
############################################################################################
### AWS4-HMAC-SHA256 ERROR WORKARROUND ###################################
os.environ['S3_USE_SIGV4'] = 'True'
if 'AWS_ACCESS_KEY_ID' in os.environ:
AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
STATICFILES_STORAGE = 'custom_storages.StaticStorage'
DEFAULT_FILE_STORAGE = 'custom_storages.MediaStorage'
AWS_STORAGE_BUCKET_NAME = os.environ['AWS_STORAGE_BUCKET_NAME']
AWS_S3_HOST = 's3.eu-central-1.amazonaws.com'
S3_URL = 'https://%s.%s' % (AWS_STORAGE_BUCKET_NAME, AWS_S3_HOST)
MEDIA_URL = S3_URL + '/media/'
STATIC_URL = S3_URL + '/static/'
### django compress setting
COMPRESS_URL = S3_URL + '/'
############################################################################################
### django-allauth config ##################################################################
############################################################################################
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "username_email"
LOGIN_REDIRECT_URL = '/'
ACCOUNT_LOGOUT_ON_GET = True
SOCIALACCOUNT_QUERY_EMAIL = True
SOCIALACCOUNT_PROVIDERS = {
'facebook': {
'SCOPE': ['email', ],
'METHOD': 'oauth2' # instead of 'js_sdk'
}
}
if 'AWS_SES_ACCESS_KEY_ID' in os.environ:
EMAIL_BACKEND = 'django_ses.SESBackend'
DEFAULT_FROM_EMAIL = os.environ['DEFAULT_FROM_EMAIL']
AWS_SES_ACCESS_KEY_ID = os.environ['AWS_SES_ACCESS_KEY_ID']
AWS_SES_SECRET_ACCESS_KEY = os.environ['AWS_SES_SECRET_ACCESS_KEY']
AWS_SES_REGION_NAME = 'eu-west-1'
AWS_SES_REGION_ENDPOINT = 'email.eu-west-1.amazonaws.com'
else:
EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
EMAIL_FILE_PATH = BASE_DIR+'/faked-emails'
############################################################################################
### CRISPY FORMS CONFIG ####################################################################
############################################################################################
CRISPY_TEMPLATE_PACK = 'bootstrap3'
############################################################################################
### REST FRAMEWORK #########################################################################
############################################################################################
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
]
}
############################################################################################
### REDACTOR ##############################################################################
############################################################################################
#REDACTOR_UPLOAD_HANDLER = 'redactor.handlers.DateDirectoryUploader'
#REDACTOR_AUTH_DECORATOR = 'django.contrib.auth.decorators.login_required'
#REDACTOR_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
REDACTOR_OPTIONS = {'lang': 'en', 'django_lang': True,}
############################################################################################
### REDACTOR ##############################################################################
############################################################################################
MAX_REPLIES_REGISTERED = 4
############################################################################################
### HTML MINIFY ############################################################################
############################################################################################
if DEBUG:
HTML_MINIFY = False
else:
HTML_MINIFY = True
############################################################################################
### HTML MINIFY ############################################################################
############################################################################################
if 'RECAPTCHA_PUBLIC_KEY' in os.environ:
RECAPTCHA_PUBLIC_KEY = os.environ['RECAPTCHA_PUBLIC_KEY']
RECAPTCHA_PRIVATE_KEY = os.environ['RECAPTCHA_PRIVATE_KEY']
else:
RECAPTCHA_PUBLIC_KEY = ''
RECAPTCHA_PRIVATE_KEY = ''
NOCAPTCHA = True
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
############################################################################################
### ADSENSE SETTINGS #######################################################################
############################################################################################
if 'ADSENSE_YES' in os.environ:
ADSENSE_YES = True
ADSENSE_USER = os.environ['ADSENSE_USER']
ADSENSE_MAIN = os.environ['ADSENSE_MAIN']
else:
ADSENSE_YES = False
ADSENSE_USER = ''
ADSENSE_MAIN = ''
############################################################################################
### ANALYTICS SETTINGS #####################################################################
############################################################################################
G_ANALYTICS_ID = None
if 'G_ANALYTICS_ID' in os.environ:
G_ANALYTICS_ID = os.environ['G_ANALYTICS_ID']
|
apache-2.0
| 6,907,194,725,354,861,000
| 31.854881
| 141
| 0.509075
| false
| 4.14652
| false
| false
| false
|
SUSE-Cloud/glance
|
glance/tests/unit/v1/test_api.py
|
1
|
119257
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# -*- coding: utf-8 -*-
# Copyright 2010-2011 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import hashlib
import json
import StringIO
from oslo.config import cfg
import routes
import six
import webob
import glance.api
import glance.api.common
from glance.api.v1 import filters
from glance.api.v1 import images
from glance.api.v1 import router
from glance.common import exception
import glance.common.config
import glance.context
from glance.db.sqlalchemy import api as db_api
from glance.db.sqlalchemy import models as db_models
from glance.openstack.common import timeutils
from glance.openstack.common import uuidutils
import glance.store.filesystem
from glance.tests.unit import base
from glance.tests import utils as test_utils
import glance.tests.unit.utils as unit_test_utils
CONF = cfg.CONF
_gen_uuid = uuidutils.generate_uuid
UUID1 = _gen_uuid()
UUID2 = _gen_uuid()
class TestGlanceAPI(base.IsolatedUnitTest):
def setUp(self):
"""Establish a clean test environment"""
super(TestGlanceAPI, self).setUp()
self.mapper = routes.Mapper()
self.api = test_utils.FakeAuthMiddleware(router.API(self.mapper))
self.FIXTURES = [
{'id': UUID1,
'name': 'fake image #1',
'status': 'active',
'disk_format': 'ami',
'container_format': 'ami',
'is_public': False,
'created_at': timeutils.utcnow(),
'updated_at': timeutils.utcnow(),
'deleted_at': None,
'deleted': False,
'checksum': None,
'size': 13,
'locations': [{'url': "file:///%s/%s" % (self.test_dir, UUID1),
'metadata': {}}],
'properties': {'type': 'kernel'}},
{'id': UUID2,
'name': 'fake image #2',
'status': 'active',
'disk_format': 'vhd',
'container_format': 'ovf',
'is_public': True,
'created_at': timeutils.utcnow(),
'updated_at': timeutils.utcnow(),
'deleted_at': None,
'deleted': False,
'checksum': 'abc123',
'size': 19,
'locations': [{'url': "file:///%s/%s" % (self.test_dir, UUID2),
'metadata': {}}],
'properties': {}}]
self.context = glance.context.RequestContext(is_admin=True)
db_api.setup_db_env()
db_api.get_engine()
self.destroy_fixtures()
self.create_fixtures()
def tearDown(self):
"""Clear the test environment"""
super(TestGlanceAPI, self).tearDown()
self.destroy_fixtures()
def create_fixtures(self):
for fixture in self.FIXTURES:
db_api.image_create(self.context, fixture)
# We write a fake image file to the filesystem
with open("%s/%s" % (self.test_dir, fixture['id']), 'wb') as image:
image.write("chunk00000remainder")
image.flush()
def destroy_fixtures(self):
# Easiest to just drop the models and re-create them...
db_models.unregister_models(db_api._ENGINE)
db_models.register_models(db_api._ENGINE)
def _do_test_defaulted_format(self, format_key, format_value):
fixture_headers = {'x-image-meta-name': 'defaulted',
'x-image-meta-location': 'http://localhost:0/image',
format_key: format_value}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
res_body = json.loads(res.body)['image']
self.assertEquals(format_value, res_body['disk_format'])
self.assertEquals(format_value, res_body['container_format'])
def test_defaulted_amazon_format(self):
for key in ('x-image-meta-disk-format',
'x-image-meta-container-format'):
for value in ('aki', 'ari', 'ami'):
self._do_test_defaulted_format(key, value)
def test_bad_disk_format(self):
fixture_headers = {
'x-image-meta-store': 'bad',
'x-image-meta-name': 'bogus',
'x-image-meta-location': 'http://localhost:0/image.tar.gz',
'x-image-meta-disk-format': 'invalid',
'x-image-meta-container-format': 'ami',
}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
self.assertTrue('Invalid disk format' in res.body, res.body)
def test_configured_disk_format_good(self):
self.config(disk_formats=['foo'])
fixture_headers = {
'x-image-meta-store': 'bad',
'x-image-meta-name': 'bogus',
'x-image-meta-location': 'http://localhost:0/image.tar.gz',
'x-image-meta-disk-format': 'foo',
'x-image-meta-container-format': 'bare',
}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
def test_configured_disk_format_bad(self):
self.config(disk_formats=['foo'])
fixture_headers = {
'x-image-meta-store': 'bad',
'x-image-meta-name': 'bogus',
'x-image-meta-location': 'http://localhost:0/image.tar.gz',
'x-image-meta-disk-format': 'bar',
'x-image-meta-container-format': 'bare',
}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
self.assertTrue('Invalid disk format' in res.body, res.body)
def test_configured_container_format_good(self):
self.config(container_formats=['foo'])
fixture_headers = {
'x-image-meta-store': 'bad',
'x-image-meta-name': 'bogus',
'x-image-meta-location': 'http://localhost:0/image.tar.gz',
'x-image-meta-disk-format': 'raw',
'x-image-meta-container-format': 'foo',
}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
def test_configured_container_format_bad(self):
self.config(container_formats=['foo'])
fixture_headers = {
'x-image-meta-store': 'bad',
'x-image-meta-name': 'bogus',
'x-image-meta-location': 'http://localhost:0/image.tar.gz',
'x-image-meta-disk-format': 'raw',
'x-image-meta-container-format': 'bar',
}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
self.assertTrue('Invalid container format' in res.body, res.body)
def test_container_and_disk_amazon_format_differs(self):
fixture_headers = {
'x-image-meta-store': 'bad',
'x-image-meta-name': 'bogus',
'x-image-meta-location': 'http://localhost:0/image.tar.gz',
'x-image-meta-disk-format': 'aki',
'x-image-meta-container-format': 'ami'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
expected = ("Invalid mix of disk and container formats. "
"When setting a disk or container format to one of "
"'aki', 'ari', or 'ami', "
"the container and disk formats must match.")
self.assertEquals(res.status_int, 400)
self.assertTrue(expected in res.body, res.body)
def test_create_with_location_no_container_format(self):
fixture_headers = {
'x-image-meta-store': 'bad',
'x-image-meta-name': 'bogus',
'x-image-meta-location': 'http://localhost:0/image.tar.gz',
'x-image-meta-disk-format': 'vhd',
}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
self.assertTrue('Invalid container format' in res.body)
def test_bad_container_format(self):
fixture_headers = {
'x-image-meta-store': 'bad',
'x-image-meta-name': 'bogus',
'x-image-meta-location': 'http://localhost:0/image.tar.gz',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'invalid',
}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
self.assertTrue('Invalid container format' in res.body)
def test_bad_image_size(self):
fixture_headers = {
'x-image-meta-store': 'bad',
'x-image-meta-name': 'bogus',
'x-image-meta-location': 'http://example.com/image.tar.gz',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-size': 'invalid',
'x-image-meta-container-format': 'bare',
}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
self.assertTrue('Incoming image size' in res.body)
def test_bad_image_name(self):
fixture_headers = {
'x-image-meta-store': 'bad',
'x-image-meta-name': 'X' * 256,
'x-image-meta-location': 'http://example.com/image.tar.gz',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'bare',
}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
def test_add_image_no_location_no_image_as_body(self):
"""Tests creates a queued image for no body and no loc header"""
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #3'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
res_body = json.loads(res.body)['image']
self.assertEquals('queued', res_body['status'])
image_id = res_body['id']
# Test that we are able to edit the Location field
# per LP Bug #911599
req = webob.Request.blank("/images/%s" % image_id)
req.method = 'PUT'
req.headers['x-image-meta-location'] = 'http://localhost:0/images/123'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
res_body = json.loads(res.body)['image']
# Once the location is set, the image should be activated
# see LP Bug #939484
self.assertEquals('active', res_body['status'])
self.assertFalse('location' in res_body) # location never shown
def test_add_image_no_location_no_content_type(self):
"""Tests creates a queued image for no body and no loc header"""
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #3'}
req = webob.Request.blank("/images")
req.method = 'POST'
req.body = "chunk00000remainder"
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
def test_add_image_size_header_too_big(self):
"""Tests raises BadRequest for supplied image size that is too big"""
fixture_headers = {'x-image-meta-size': CONF.image_size_cap + 1,
'x-image-meta-name': 'fake image #3'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
def test_add_image_size_chunked_data_too_big(self):
self.config(image_size_cap=512)
fixture_headers = {
'x-image-meta-name': 'fake image #3',
'x-image-meta-container_format': 'ami',
'x-image-meta-disk_format': 'ami',
'transfer-encoding': 'chunked',
'content-type': 'application/octet-stream',
}
req = webob.Request.blank("/images")
req.method = 'POST'
req.body_file = StringIO.StringIO('X' * (CONF.image_size_cap + 1))
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 413)
def test_add_image_size_data_too_big(self):
self.config(image_size_cap=512)
fixture_headers = {
'x-image-meta-name': 'fake image #3',
'x-image-meta-container_format': 'ami',
'x-image-meta-disk_format': 'ami',
'content-type': 'application/octet-stream',
}
req = webob.Request.blank("/images")
req.method = 'POST'
req.body = 'X' * (CONF.image_size_cap + 1)
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
def test_add_image_size_header_exceed_quota(self):
quota = 500
self.config(user_storage_quota=quota)
fixture_headers = {'x-image-meta-size': quota + 1,
'x-image-meta-name': 'fake image #3',
'x-image-meta-container_format': 'bare',
'x-image-meta-disk_format': 'qcow2',
'content-type': 'application/octet-stream',
}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
req.body = 'X' * (quota + 1)
res = req.get_response(self.api)
self.assertEquals(res.status_int, 413)
def test_add_image_size_data_exceed_quota(self):
quota = 500
self.config(user_storage_quota=quota)
fixture_headers = {
'x-image-meta-name': 'fake image #3',
'x-image-meta-container_format': 'bare',
'x-image-meta-disk_format': 'qcow2',
'content-type': 'application/octet-stream',
}
req = webob.Request.blank("/images")
req.method = 'POST'
req.body = 'X' * (quota + 1)
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 413)
def test_add_image_size_data_exceed_quota_readd(self):
quota = 500
self.config(user_storage_quota=quota)
fixture_headers = {
'x-image-meta-name': 'fake image #3',
'x-image-meta-container_format': 'bare',
'x-image-meta-disk_format': 'qcow2',
'content-type': 'application/octet-stream',
}
req = webob.Request.blank("/images")
req.method = 'POST'
req.body = 'X' * (quota + 1)
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 413)
used_size = sum([f['size'] for f in self.FIXTURES])
req = webob.Request.blank("/images")
req.method = 'POST'
req.body = 'X' * (quota - used_size)
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
def _add_check_no_url_info(self):
fixture_headers = {'x-image-meta-disk-format': 'ami',
'x-image-meta-container-format': 'ami',
'x-image-meta-size': '0',
'x-image-meta-name': 'empty image'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
res_body = json.loads(res.body)['image']
self.assertFalse('locations' in res_body)
self.assertFalse('direct_url' in res_body)
image_id = res_body['id']
# HEAD empty image
req = webob.Request.blank("/images/%s" % image_id)
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
self.assertFalse('x-image-meta-locations' in res.headers)
self.assertFalse('x-image-meta-direct_url' in res.headers)
def test_add_check_no_url_info_ml(self):
self.config(show_multiple_locations=True)
self._add_check_no_url_info()
def test_add_check_no_url_info_direct_url(self):
self.config(show_image_direct_url=True)
self._add_check_no_url_info()
def test_add_check_no_url_info_both_on(self):
self.config(show_image_direct_url=True)
self.config(show_multiple_locations=True)
self._add_check_no_url_info()
def test_add_check_no_url_info_both_off(self):
self._add_check_no_url_info()
def test_add_image_zero_size(self):
"""Tests creating an active image with explicitly zero size"""
fixture_headers = {'x-image-meta-disk-format': 'ami',
'x-image-meta-container-format': 'ami',
'x-image-meta-size': '0',
'x-image-meta-name': 'empty image'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
res_body = json.loads(res.body)['image']
self.assertEquals('active', res_body['status'])
image_id = res_body['id']
# GET empty image
req = webob.Request.blank("/images/%s" % image_id)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
self.assertEqual(len(res.body), 0)
def _do_test_add_image_attribute_mismatch(self, attributes):
fixture_headers = {
'x-image-meta-name': 'fake image #3',
}
fixture_headers.update(attributes)
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
req.headers['Content-Type'] = 'application/octet-stream'
req.body = "XXXX"
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
def test_add_image_checksum_mismatch(self):
attributes = {
'x-image-meta-checksum': 'asdf',
}
self._do_test_add_image_attribute_mismatch(attributes)
def test_add_image_size_mismatch(self):
attributes = {
'x-image-meta-size': str(len("XXXX") + 1),
}
self._do_test_add_image_attribute_mismatch(attributes)
def test_add_image_checksum_and_size_mismatch(self):
attributes = {
'x-image-meta-checksum': 'asdf',
'x-image-meta-size': str(len("XXXX") + 1),
}
self._do_test_add_image_attribute_mismatch(attributes)
def test_add_image_bad_store(self):
"""Tests raises BadRequest for invalid store header"""
fixture_headers = {'x-image-meta-store': 'bad',
'x-image-meta-name': 'fake image #3'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
req.headers['Content-Type'] = 'application/octet-stream'
req.body = "chunk00000remainder"
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
def test_add_image_basic_file_store(self):
"""Tests to add a basic image in the file store"""
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #3'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
req.headers['Content-Type'] = 'application/octet-stream'
req.body = "chunk00000remainder"
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
# Test that the Location: header is set to the URI to
# edit the newly-created image, as required by APP.
# See LP Bug #719825
self.assertTrue('location' in res.headers,
"'location' not in response headers.\n"
"res.headerlist = %r" % res.headerlist)
res_body = json.loads(res.body)['image']
self.assertTrue('/images/%s' % res_body['id']
in res.headers['location'])
self.assertEquals('active', res_body['status'])
image_id = res_body['id']
# Test that we are NOT able to edit the Location field
# per LP Bug #911599
req = webob.Request.blank("/images/%s" % image_id)
req.method = 'PUT'
req.headers['x-image-meta-location'] = 'http://example.com/images/123'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
def test_add_image_unauthorized(self):
rules = {"add_image": '!'}
self.set_policy_rules(rules)
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #3'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
req.headers['Content-Type'] = 'application/octet-stream'
req.body = "chunk00000remainder"
res = req.get_response(self.api)
self.assertEquals(res.status_int, 403)
def test_add_publicize_image_unauthorized(self):
rules = {"add_image": '@', "modify_image": '@',
"publicize_image": '!'}
self.set_policy_rules(rules)
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-is-public': 'true',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #3'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
req.headers['Content-Type'] = 'application/octet-stream'
req.body = "chunk00000remainder"
res = req.get_response(self.api)
self.assertEquals(res.status_int, 403)
def test_add_publicize_image_authorized(self):
rules = {"add_image": '@', "modify_image": '@',
"publicize_image": '@'}
self.set_policy_rules(rules)
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-is-public': 'true',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #3'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
req.headers['Content-Type'] = 'application/octet-stream'
req.body = "chunk00000remainder"
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
def test_add_copy_from_image_unauthorized(self):
rules = {"add_image": '@', "copy_from": '!'}
self.set_policy_rules(rules)
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-glance-api-copy-from': 'http://glance.com/i.ovf',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #F'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
req.headers['Content-Type'] = 'application/octet-stream'
req.body = "chunk00000remainder"
res = req.get_response(self.api)
self.assertEquals(res.status_int, 403)
def test_add_copy_from_image_authorized(self):
rules = {"add_image": '@', "copy_from": '@'}
self.set_policy_rules(rules)
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-glance-api-copy-from': 'http://glance.com/i.ovf',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #F'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
req.headers['Content-Type'] = 'application/octet-stream'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
def test_add_copy_from_with_nonempty_body(self):
"""Tests creates an image from copy-from and nonempty body"""
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-glance-api-copy-from': 'http://a/b/c.ovf',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #F'}
req = webob.Request.blank("/images")
req.headers['Content-Type'] = 'application/octet-stream'
req.method = 'POST'
req.body = "chunk00000remainder"
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
def test_add_location_with_nonempty_body(self):
"""Tests creates an image from location and nonempty body"""
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-location': 'http://a/b/c.tar.gz',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #F'}
req = webob.Request.blank("/images")
req.headers['Content-Type'] = 'application/octet-stream'
req.method = 'POST'
req.body = "chunk00000remainder"
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
def test_add_location_with_conflict_image_size(self):
"""Tests creates an image from location and conflict image size"""
self.stubs.Set(glance.api.v1.images, 'get_size_from_backend',
lambda *args, **kwargs: 2)
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-location': 'http://a/b/c.tar.gz',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #F',
'x-image-meta-size': '1'}
req = webob.Request.blank("/images")
req.headers['Content-Type'] = 'application/octet-stream'
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 409)
def test_add_copy_from_with_location(self):
"""Tests creates an image from copy-from and location"""
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-glance-api-copy-from': 'http://a/b/c.ovf',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #F',
'x-image-meta-location': 'http://a/b/c.tar.gz'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
def _do_test_post_image_content_missing_format(self, missing):
"""Tests creation of an image with missing format"""
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #3'}
header = 'x-image-meta-' + missing.replace('_', '-')
del fixture_headers[header]
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
req.headers['Content-Type'] = 'application/octet-stream'
req.body = "chunk00000remainder"
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
def test_add_copy_from_with_restricted_sources(self):
"""Tests creates an image from copy-from with restricted sources"""
header_template = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #F'}
schemas = ["file:///etc/passwd",
"swift+config:///xxx",
"filesystem:///etc/passwd"]
for schema in schemas:
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in six.iteritems(header_template):
req.headers[k] = v
req.headers['x-glance-api-copy-from'] = schema
res = req.get_response(self.api)
self.assertEqual(400, res.status_int)
def test_post_image_content_missing_disk_format(self):
"""Tests creation of an image with missing disk format"""
self._do_test_post_image_content_missing_format('disk_format')
def test_post_image_content_missing_container_type(self):
"""Tests creation of an image with missing container format"""
self._do_test_post_image_content_missing_format('container_format')
def _do_test_put_image_content_missing_format(self, missing):
"""Tests delayed activation of an image with missing format"""
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #3'}
header = 'x-image-meta-' + missing.replace('_', '-')
del fixture_headers[header]
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
res_body = json.loads(res.body)['image']
self.assertEquals('queued', res_body['status'])
image_id = res_body['id']
req = webob.Request.blank("/images/%s" % image_id)
req.method = 'PUT'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
req.headers['Content-Type'] = 'application/octet-stream'
req.body = "chunk00000remainder"
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
def test_put_image_content_missing_disk_format(self):
"""Tests delayed activation of image with missing disk format"""
self._do_test_put_image_content_missing_format('disk_format')
def test_put_image_content_missing_container_type(self):
"""Tests delayed activation of image with missing container format"""
self._do_test_put_image_content_missing_format('container_format')
def test_update_deleted_image(self):
"""Tests that exception raised trying to update a deleted image"""
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
fixture = {'name': 'test_del_img'}
req = webob.Request.blank('/images/%s' % UUID2)
req.method = 'PUT'
req.content_type = 'application/json'
req.body = json.dumps(dict(image=fixture))
res = req.get_response(self.api)
self.assertEquals(res.status_int, 403)
self.assertTrue('Forbidden to update deleted image' in res.body)
def test_delete_deleted_image(self):
"""Tests that exception raised trying to delete a deleted image"""
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
# Verify the status is deleted
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
self.assertEqual("deleted", res.headers['x-image-meta-status'])
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 404)
msg = "Image %s not found." % UUID2
self.assertTrue(msg in res.body)
# Verify the status is still deleted
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
self.assertEqual("deleted", res.headers['x-image-meta-status'])
def test_delete_pending_delete_image(self):
"""
Tests that correct response returned when deleting
a pending_delete image
"""
# First deletion
self.config(delayed_delete=True, scrubber_datadir='/tmp/scrubber')
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
# Verify the status is pending_delete
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
self.assertEqual("pending_delete", res.headers['x-image-meta-status'])
# Second deletion
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 403)
self.assertTrue('Forbidden to delete a pending_delete image'
in res.body)
# Verify the status is still pending_delete
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
self.assertEqual("pending_delete", res.headers['x-image-meta-status'])
def test_register_and_upload(self):
"""
Test that the process of registering an image with
some metadata, then uploading an image file with some
more metadata doesn't mark the original metadata deleted
:see LP Bug#901534
"""
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #3',
'x-image-meta-property-key1': 'value1'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
res_body = json.loads(res.body)['image']
self.assertTrue('id' in res_body)
image_id = res_body['id']
self.assertTrue('/images/%s' % image_id in res.headers['location'])
# Verify the status is queued
self.assertTrue('status' in res_body)
self.assertEqual('queued', res_body['status'])
# Check properties are not deleted
self.assertTrue('properties' in res_body)
self.assertTrue('key1' in res_body['properties'])
self.assertEqual('value1', res_body['properties']['key1'])
# Now upload the image file along with some more
# metadata and verify original metadata properties
# are not marked deleted
req = webob.Request.blank("/images/%s" % image_id)
req.method = 'PUT'
req.headers['Content-Type'] = 'application/octet-stream'
req.headers['x-image-meta-property-key2'] = 'value2'
req.body = "chunk00000remainder"
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
# Verify the status is queued
req = webob.Request.blank("/images/%s" % image_id)
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
self.assertTrue('x-image-meta-property-key1' in res.headers,
"Did not find required property in headers. "
"Got headers: %r" % res.headers)
self.assertEqual("active", res.headers['x-image-meta-status'])
def test_disable_purge_props(self):
"""
Test the special x-glance-registry-purge-props header controls
the purge property behaviour of the registry.
:see LP Bug#901534
"""
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #3',
'x-image-meta-property-key1': 'value1'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
req.headers['Content-Type'] = 'application/octet-stream'
req.body = "chunk00000remainder"
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
res_body = json.loads(res.body)['image']
self.assertTrue('id' in res_body)
image_id = res_body['id']
self.assertTrue('/images/%s' % image_id in res.headers['location'])
# Verify the status is queued
self.assertTrue('status' in res_body)
self.assertEqual('active', res_body['status'])
# Check properties are not deleted
self.assertTrue('properties' in res_body)
self.assertTrue('key1' in res_body['properties'])
self.assertEqual('value1', res_body['properties']['key1'])
# Now update the image, setting new properties without
# passing the x-glance-registry-purge-props header and
# verify that original properties are marked deleted.
req = webob.Request.blank("/images/%s" % image_id)
req.method = 'PUT'
req.headers['x-image-meta-property-key2'] = 'value2'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
# Verify the original property no longer in headers
req = webob.Request.blank("/images/%s" % image_id)
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
self.assertTrue('x-image-meta-property-key2' in res.headers,
"Did not find required property in headers. "
"Got headers: %r" % res.headers)
self.assertFalse('x-image-meta-property-key1' in res.headers,
"Found property in headers that was not expected. "
"Got headers: %r" % res.headers)
# Now update the image, setting new properties and
# passing the x-glance-registry-purge-props header with
# a value of "false" and verify that second property
# still appears in headers.
req = webob.Request.blank("/images/%s" % image_id)
req.method = 'PUT'
req.headers['x-image-meta-property-key3'] = 'value3'
req.headers['x-glance-registry-purge-props'] = 'false'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
# Verify the second and third property in headers
req = webob.Request.blank("/images/%s" % image_id)
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
self.assertTrue('x-image-meta-property-key2' in res.headers,
"Did not find required property in headers. "
"Got headers: %r" % res.headers)
self.assertTrue('x-image-meta-property-key3' in res.headers,
"Did not find required property in headers. "
"Got headers: %r" % res.headers)
def test_publicize_image_unauthorized(self):
"""Create a non-public image then fail to make public"""
rules = {"add_image": '@', "publicize_image": '!'}
self.set_policy_rules(rules)
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-is-public': 'false',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #3'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
res_body = json.loads(res.body)['image']
req = webob.Request.blank("/images/%s" % res_body['id'])
req.method = 'PUT'
req.headers['x-image-meta-is-public'] = 'true'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 403)
def test_update_image_size_header_too_big(self):
"""Tests raises BadRequest for supplied image size that is too big"""
fixture_headers = {'x-image-meta-size': CONF.image_size_cap + 1}
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'PUT'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
def test_update_image_size_data_too_big(self):
self.config(image_size_cap=512)
fixture_headers = {'content-type': 'application/octet-stream'}
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'PUT'
req.body = 'X' * (CONF.image_size_cap + 1)
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
def test_update_image_size_chunked_data_too_big(self):
self.config(image_size_cap=512)
# Create new image that has no data
req = webob.Request.blank("/images")
req.method = 'POST'
req.headers['x-image-meta-name'] = 'something'
req.headers['x-image-meta-container_format'] = 'ami'
req.headers['x-image-meta-disk_format'] = 'ami'
res = req.get_response(self.api)
image_id = json.loads(res.body)['image']['id']
fixture_headers = {
'content-type': 'application/octet-stream',
'transfer-encoding': 'chunked',
}
req = webob.Request.blank("/images/%s" % image_id)
req.method = 'PUT'
req.body_file = StringIO.StringIO('X' * (CONF.image_size_cap + 1))
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 413)
def test_update_non_existing_image(self):
self.config(image_size_cap=100)
req = webob.Request.blank("images/%s" % _gen_uuid)
req.method = 'PUT'
req.body = 'test'
req.headers['x-image-meta-name'] = 'test'
req.headers['x-image-meta-container_format'] = 'ami'
req.headers['x-image-meta-disk_format'] = 'ami'
req.headers['x-image-meta-is_public'] = 'False'
res = req.get_response(self.api)
self.assertEqual(res.status_int, 404)
def test_update_public_image(self):
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-is-public': 'true',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #3'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
res_body = json.loads(res.body)['image']
req = webob.Request.blank("/images/%s" % res_body['id'])
req.method = 'PUT'
req.headers['x-image-meta-name'] = 'updated public image'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
def test_get_index_sort_name_asc(self):
"""
Tests that the /images registry API returns list of
public images sorted alphabetically by name in
ascending order.
"""
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'asdf',
'size': 19,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = {'id': UUID4,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'xyz',
'size': 20,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/images?sort_key=name&sort_dir=asc')
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
res_dict = json.loads(res.body)
images = res_dict['images']
self.assertEquals(len(images), 3)
self.assertEquals(images[0]['id'], UUID3)
self.assertEquals(images[1]['id'], UUID2)
self.assertEquals(images[2]['id'], UUID4)
def test_get_details_filter_changes_since(self):
"""
Tests that the /images/detail registry API returns list of
public images that have a size less than or equal to size_max
"""
dt1 = timeutils.utcnow() - datetime.timedelta(1)
iso1 = timeutils.isotime(dt1)
date_only1 = dt1.strftime('%Y-%m-%d')
date_only2 = dt1.strftime('%Y%m%d')
date_only3 = dt1.strftime('%Y-%m%d')
dt2 = timeutils.utcnow() + datetime.timedelta(1)
iso2 = timeutils.isotime(dt2)
image_ts = timeutils.utcnow() + datetime.timedelta(2)
hour_before = image_ts.strftime('%Y-%m-%dT%H:%M:%S%%2B01:00')
hour_after = image_ts.strftime('%Y-%m-%dT%H:%M:%S-01:00')
dt4 = timeutils.utcnow() + datetime.timedelta(3)
iso4 = timeutils.isotime(dt4)
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'fake image #3',
'size': 18,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
db_api.image_destroy(self.context, UUID3)
UUID4 = _gen_uuid()
extra_fixture = {'id': UUID4,
'status': 'active',
'is_public': True,
'disk_format': 'ami',
'container_format': 'ami',
'name': 'fake image #4',
'size': 20,
'checksum': None,
'created_at': image_ts,
'updated_at': image_ts}
db_api.image_create(self.context, extra_fixture)
# Check a standard list, 4 images in db (2 deleted)
req = webob.Request.blank('/images/detail')
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
res_dict = json.loads(res.body)
images = res_dict['images']
self.assertEquals(len(images), 2)
self.assertEqual(images[0]['id'], UUID4)
self.assertEqual(images[1]['id'], UUID2)
# Expect 3 images (1 deleted)
req = webob.Request.blank('/images/detail?changes-since=%s' % iso1)
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
res_dict = json.loads(res.body)
images = res_dict['images']
self.assertEquals(len(images), 3)
self.assertEqual(images[0]['id'], UUID4)
self.assertEqual(images[1]['id'], UUID3) # deleted
self.assertEqual(images[2]['id'], UUID2)
# Expect 1 images (0 deleted)
req = webob.Request.blank('/images/detail?changes-since=%s' % iso2)
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
res_dict = json.loads(res.body)
images = res_dict['images']
self.assertEquals(len(images), 1)
self.assertEqual(images[0]['id'], UUID4)
# Expect 1 images (0 deleted)
req = webob.Request.blank('/images/detail?changes-since=%s' %
hour_before)
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
res_dict = json.loads(res.body)
images = res_dict['images']
self.assertEquals(len(images), 1)
self.assertEqual(images[0]['id'], UUID4)
# Expect 0 images (0 deleted)
req = webob.Request.blank('/images/detail?changes-since=%s' %
hour_after)
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
res_dict = json.loads(res.body)
images = res_dict['images']
self.assertEquals(len(images), 0)
# Expect 0 images (0 deleted)
req = webob.Request.blank('/images/detail?changes-since=%s' % iso4)
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
res_dict = json.loads(res.body)
images = res_dict['images']
self.assertEquals(len(images), 0)
for param in [date_only1, date_only2, date_only3]:
# Expect 3 images (1 deleted)
req = webob.Request.blank('/images/detail?changes-since=%s' %
param)
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
res_dict = json.loads(res.body)
images = res_dict['images']
self.assertEquals(len(images), 3)
self.assertEqual(images[0]['id'], UUID4)
self.assertEqual(images[1]['id'], UUID3) # deleted
self.assertEqual(images[2]['id'], UUID2)
# Bad request (empty changes-since param)
req = webob.Request.blank('/images/detail?changes-since=')
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
def test_get_images_bad_urls(self):
"""Check that routes collections are not on (LP bug 1185828)"""
req = webob.Request.blank('/images/detail.xxx')
res = req.get_response(self.api)
self.assertEquals(res.status_int, 404)
req = webob.Request.blank('/images.xxx')
res = req.get_response(self.api)
self.assertEquals(res.status_int, 404)
req = webob.Request.blank('/images/new')
res = req.get_response(self.api)
self.assertEquals(res.status_int, 404)
req = webob.Request.blank("/images/%s/members" % UUID1)
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
req = webob.Request.blank("/images/%s/members.xxx" % UUID1)
res = req.get_response(self.api)
self.assertEquals(res.status_int, 404)
def test_get_images_detailed_unauthorized(self):
rules = {"get_images": '!'}
self.set_policy_rules(rules)
req = webob.Request.blank('/images/detail')
res = req.get_response(self.api)
self.assertEquals(res.status_int, 403)
def test_get_images_unauthorized(self):
rules = {"get_images": '!'}
self.set_policy_rules(rules)
req = webob.Request.blank('/images/detail')
res = req.get_response(self.api)
self.assertEquals(res.status_int, 403)
def test_store_location_not_revealed(self):
"""
Test that the internal store location is NOT revealed
through the API server
"""
# Check index and details...
for url in ('/images', '/images/detail'):
req = webob.Request.blank(url)
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
res_dict = json.loads(res.body)
images = res_dict['images']
num_locations = sum([1 for record in images
if 'location' in record.keys()])
self.assertEquals(0, num_locations, images)
# Check GET
req = webob.Request.blank("/images/%s" % UUID2)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
self.assertFalse('X-Image-Meta-Location' in res.headers)
# Check HEAD
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
self.assertFalse('X-Image-Meta-Location' in res.headers)
# Check PUT
req = webob.Request.blank("/images/%s" % UUID2)
req.body = res.body
req.method = 'PUT'
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
res_body = json.loads(res.body)
self.assertFalse('location' in res_body['image'])
# Check POST
req = webob.Request.blank("/images")
headers = {'x-image-meta-location': 'http://localhost',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #3'}
for k, v in headers.iteritems():
req.headers[k] = v
req.method = 'POST'
res = req.get_response(self.api)
self.assertEqual(res.status_int, 201)
res_body = json.loads(res.body)
self.assertFalse('location' in res_body['image'])
def test_image_is_checksummed(self):
"""Test that the image contents are checksummed properly"""
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #3'}
image_contents = "chunk00000remainder"
image_checksum = hashlib.md5(image_contents).hexdigest()
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
req.headers['Content-Type'] = 'application/octet-stream'
req.body = image_contents
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
res_body = json.loads(res.body)['image']
self.assertEquals(image_checksum, res_body['checksum'],
"Mismatched checksum. Expected %s, got %s" %
(image_checksum, res_body['checksum']))
def test_etag_equals_checksum_header(self):
"""Test that the ETag header matches the x-image-meta-checksum"""
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #3'}
image_contents = "chunk00000remainder"
image_checksum = hashlib.md5(image_contents).hexdigest()
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
req.headers['Content-Type'] = 'application/octet-stream'
req.body = image_contents
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
image = json.loads(res.body)['image']
# HEAD the image and check the ETag equals the checksum header...
expected_headers = {'x-image-meta-checksum': image_checksum,
'etag': image_checksum}
req = webob.Request.blank("/images/%s" % image['id'])
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
for key in expected_headers.keys():
self.assertTrue(key in res.headers,
"required header '%s' missing from "
"returned headers" % key)
for key, value in expected_headers.iteritems():
self.assertEquals(value, res.headers[key])
def test_bad_checksum_prevents_image_creation(self):
"""Test that the image contents are checksummed properly"""
image_contents = "chunk00000remainder"
bad_checksum = hashlib.md5("invalid").hexdigest()
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #3',
'x-image-meta-checksum': bad_checksum,
'x-image-meta-is-public': 'true'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
req.headers['Content-Type'] = 'application/octet-stream'
req.body = image_contents
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
# Test that only one image was returned (that already exists)
req = webob.Request.blank("/images")
req.method = 'GET'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
images = json.loads(res.body)['images']
self.assertEqual(len(images), 1)
def test_image_meta(self):
"""Test for HEAD /images/<ID>"""
expected_headers = {'x-image-meta-id': UUID2,
'x-image-meta-name': 'fake image #2'}
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
for key, value in expected_headers.iteritems():
self.assertEquals(value, res.headers[key])
def test_image_meta_unauthorized(self):
rules = {"get_image": '!'}
self.set_policy_rules(rules)
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 403)
def test_show_image_basic(self):
req = webob.Request.blank("/images/%s" % UUID2)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, 'application/octet-stream')
self.assertEqual('chunk00000remainder', res.body)
def test_show_non_exists_image(self):
req = webob.Request.blank("/images/%s" % _gen_uuid())
res = req.get_response(self.api)
self.assertEquals(res.status_int, 404)
def test_show_image_unauthorized(self):
rules = {"get_image": '!'}
self.set_policy_rules(rules)
req = webob.Request.blank("/images/%s" % UUID2)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 403)
def test_show_image_unauthorized_download(self):
rules = {"download_image": '!'}
self.set_policy_rules(rules)
req = webob.Request.blank("/images/%s" % UUID2)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 403)
def test_delete_image(self):
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
self.assertEquals(res.body, '')
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'GET'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 404,
res.body)
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
self.assertEquals(res.headers['x-image-meta-deleted'], 'True')
self.assertEquals(res.headers['x-image-meta-status'], 'deleted')
def test_delete_non_exists_image(self):
req = webob.Request.blank("/images/%s" % _gen_uuid())
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 404)
def test_delete_not_allowed(self):
# Verify we can get the image data
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'GET'
req.headers['X-Auth-Token'] = 'user:tenant:'
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
self.assertEqual(len(res.body), 19)
# Verify we cannot delete the image
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEqual(res.status_int, 403)
# Verify the image data is still there
req.method = 'GET'
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
self.assertEqual(len(res.body), 19)
def test_delete_queued_image(self):
"""Delete an image in a queued state
Bug #747799 demonstrated that trying to DELETE an image
that had had its save process killed manually results in failure
because the location attribute is None.
Bug #1048851 demonstrated that the status was not properly
being updated to 'deleted' from 'queued'.
"""
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #3'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
res_body = json.loads(res.body)['image']
self.assertEquals('queued', res_body['status'])
# Now try to delete the image...
req = webob.Request.blank("/images/%s" % res_body['id'])
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
req = webob.Request.blank('/images/%s' % res_body['id'])
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
self.assertEquals(res.headers['x-image-meta-deleted'], 'True')
self.assertEquals(res.headers['x-image-meta-status'], 'deleted')
def test_delete_queued_image_delayed_delete(self):
"""Delete an image in a queued state when delayed_delete is on
Bug #1048851 demonstrated that the status was not properly
being updated to 'deleted' from 'queued'.
"""
self.config(delayed_delete=True)
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'ovf',
'x-image-meta-name': 'fake image #3'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
res_body = json.loads(res.body)['image']
self.assertEquals('queued', res_body['status'])
# Now try to delete the image...
req = webob.Request.blank("/images/%s" % res_body['id'])
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
req = webob.Request.blank('/images/%s' % res_body['id'])
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
self.assertEquals(res.headers['x-image-meta-deleted'], 'True')
self.assertEquals(res.headers['x-image-meta-status'], 'deleted')
def test_delete_protected_image(self):
fixture_headers = {'x-image-meta-store': 'file',
'x-image-meta-name': 'fake image #3',
'x-image-meta-disk-format': 'vhd',
'x-image-meta-container-format': 'ovf',
'x-image-meta-protected': 'True'}
req = webob.Request.blank("/images")
req.method = 'POST'
for k, v in fixture_headers.iteritems():
req.headers[k] = v
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
res_body = json.loads(res.body)['image']
self.assertEquals('queued', res_body['status'])
# Now try to delete the image...
req = webob.Request.blank("/images/%s" % res_body['id'])
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 403)
def test_delete_image_unauthorized(self):
rules = {"delete_image": '!'}
self.set_policy_rules(rules)
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 403)
def test_get_details_invalid_marker(self):
"""
Tests that the /images/detail registry API returns a 400
when an invalid marker is provided
"""
req = webob.Request.blank('/images/detail?marker=%s' % _gen_uuid())
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
def test_get_image_members(self):
"""
Tests members listing for existing images
"""
req = webob.Request.blank('/images/%s/members' % UUID2)
req.method = 'GET'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
memb_list = json.loads(res.body)
num_members = len(memb_list['members'])
self.assertEquals(num_members, 0)
def test_get_image_members_allowed_by_policy(self):
rules = {"get_members": '@'}
self.set_policy_rules(rules)
req = webob.Request.blank('/images/%s/members' % UUID2)
req.method = 'GET'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
memb_list = json.loads(res.body)
num_members = len(memb_list['members'])
self.assertEquals(num_members, 0)
def test_get_image_members_forbidden_by_policy(self):
rules = {"get_members": '!'}
self.set_policy_rules(rules)
req = webob.Request.blank('/images/%s/members' % UUID2)
req.method = 'GET'
res = req.get_response(self.api)
self.assertEquals(res.status_int, webob.exc.HTTPForbidden.code)
def test_get_image_members_not_existing(self):
"""
Tests proper exception is raised if attempt to get members of
non-existing image
"""
req = webob.Request.blank('/images/%s/members' % _gen_uuid())
req.method = 'GET'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 404)
def test_add_member(self):
"""
Tests adding image members
"""
test_router_api = router.API(self.mapper)
self.api = test_utils.FakeAuthMiddleware(
test_router_api, is_admin=True)
req = webob.Request.blank('/images/%s/members/test' % UUID2)
req.method = 'PUT'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 201)
def test_get_member_images(self):
"""
Tests image listing for members
"""
req = webob.Request.blank('/shared-images/pattieblack')
req.method = 'GET'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
memb_list = json.loads(res.body)
num_members = len(memb_list['shared_images'])
self.assertEquals(num_members, 0)
def test_replace_members(self):
"""
Tests replacing image members raises right exception
"""
test_router_api = router.API(self.mapper)
self.api = test_utils.FakeAuthMiddleware(
test_router_api, is_admin=False)
fixture = dict(member_id='pattieblack')
req = webob.Request.blank('/images/%s/members' % UUID2)
req.method = 'PUT'
req.content_type = 'application/json'
req.body = json.dumps(dict(image_memberships=fixture))
res = req.get_response(self.api)
self.assertEquals(res.status_int, 401)
def test_active_image_immutable_props_for_user(self):
"""
Tests user cannot update immutable props of active image
"""
test_router_api = router.API(self.mapper)
self.api = test_utils.FakeAuthMiddleware(
test_router_api, is_admin=False)
fixture_header_list = [{'x-image-meta-checksum': '1234'},
{'x-image-meta-size': '12345'}]
for fixture_header in fixture_header_list:
req = webob.Request.blank('/images/%s' % UUID2)
req.method = 'PUT'
for k, v in fixture_header.iteritems():
req = webob.Request.blank('/images/%s' % UUID2)
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
orig_value = res.headers[k]
req = webob.Request.blank('/images/%s' % UUID2)
req.headers[k] = v
req.method = 'PUT'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 403)
prop = k[len('x-image-meta-'):]
self.assertNotEqual(res.body.find("Forbidden to modify \'%s\' "
"of active "
"image" % prop), -1)
req = webob.Request.blank('/images/%s' % UUID2)
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
self.assertEquals(orig_value, res.headers[k])
def test_props_of_active_image_mutable_for_admin(self):
"""
Tests admin can update 'immutable' props of active image
"""
test_router_api = router.API(self.mapper)
self.api = test_utils.FakeAuthMiddleware(
test_router_api, is_admin=True)
fixture_header_list = [{'x-image-meta-checksum': '1234'},
{'x-image-meta-size': '12345'}]
for fixture_header in fixture_header_list:
req = webob.Request.blank('/images/%s' % UUID2)
req.method = 'PUT'
for k, v in fixture_header.iteritems():
req = webob.Request.blank('/images/%s' % UUID2)
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
orig_value = res.headers[k]
req = webob.Request.blank('/images/%s' % UUID2)
req.headers[k] = v
req.method = 'PUT'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
req = webob.Request.blank('/images/%s' % UUID2)
req.method = 'HEAD'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
self.assertEquals(v, res.headers[k])
def test_replace_members_non_existing_image(self):
"""
Tests replacing image members raises right exception
"""
test_router_api = router.API(self.mapper)
self.api = test_utils.FakeAuthMiddleware(
test_router_api, is_admin=True)
fixture = dict(member_id='pattieblack')
req = webob.Request.blank('/images/%s/members' % _gen_uuid())
req.method = 'PUT'
req.content_type = 'application/json'
req.body = json.dumps(dict(image_memberships=fixture))
res = req.get_response(self.api)
self.assertEquals(res.status_int, 404)
def test_replace_members_bad_request(self):
"""
Tests replacing image members raises bad request if body is wrong
"""
test_router_api = router.API(self.mapper)
self.api = test_utils.FakeAuthMiddleware(
test_router_api, is_admin=True)
fixture = dict(member_id='pattieblack')
req = webob.Request.blank('/images/%s/members' % UUID2)
req.method = 'PUT'
req.content_type = 'application/json'
req.body = json.dumps(dict(image_memberships=fixture))
res = req.get_response(self.api)
self.assertEquals(res.status_int, 400)
def test_replace_members_positive(self):
"""
Tests replacing image members
"""
test_router = router.API(self.mapper)
self.api = test_utils.FakeAuthMiddleware(
test_router, is_admin=True)
fixture = [dict(member_id='pattieblack', can_share=False)]
# Replace
req = webob.Request.blank('/images/%s/members' % UUID2)
req.method = 'PUT'
req.content_type = 'application/json'
req.body = json.dumps(dict(memberships=fixture))
res = req.get_response(self.api)
self.assertEquals(res.status_int, 204)
def test_replace_members_forbidden_by_policy(self):
rules = {"modify_member": '!'}
self.set_policy_rules(rules)
self.api = test_utils.FakeAuthMiddleware(router.API(self.mapper),
is_admin=True)
fixture = [{'member_id': 'pattieblack', 'can_share': 'false'}]
req = webob.Request.blank('/images/%s/members' % UUID1)
req.method = 'PUT'
req.content_type = 'application/json'
req.body = json.dumps(dict(memberships=fixture))
res = req.get_response(self.api)
self.assertEquals(res.status_int, webob.exc.HTTPForbidden.code)
def test_replace_members_allowed_by_policy(self):
rules = {"modify_member": '@'}
self.set_policy_rules(rules)
self.api = test_utils.FakeAuthMiddleware(router.API(self.mapper),
is_admin=True)
fixture = [{'member_id': 'pattieblack', 'can_share': 'false'}]
req = webob.Request.blank('/images/%s/members' % UUID1)
req.method = 'PUT'
req.content_type = 'application/json'
req.body = json.dumps(dict(memberships=fixture))
res = req.get_response(self.api)
self.assertEquals(res.status_int, webob.exc.HTTPNoContent.code)
def test_add_member(self):
"""
Tests adding image members raises right exception
"""
test_router = router.API(self.mapper)
self.api = test_utils.FakeAuthMiddleware(
test_router, is_admin=False)
req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2)
req.method = 'PUT'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 401)
def test_add_member_non_existing_image(self):
"""
Tests adding image members raises right exception
"""
test_router = router.API(self.mapper)
self.api = test_utils.FakeAuthMiddleware(
test_router, is_admin=True)
test_uri = '/images/%s/members/pattieblack'
req = webob.Request.blank(test_uri % _gen_uuid())
req.method = 'PUT'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 404)
def test_add_member_positive(self):
"""
Tests adding image members
"""
test_router = router.API(self.mapper)
self.api = test_utils.FakeAuthMiddleware(
test_router, is_admin=True)
req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2)
req.method = 'PUT'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 204)
def test_add_member_with_body(self):
"""
Tests adding image members
"""
fixture = dict(can_share=True)
test_router = router.API(self.mapper)
self.api = test_utils.FakeAuthMiddleware(
test_router, is_admin=True)
req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2)
req.method = 'PUT'
req.body = json.dumps(dict(member=fixture))
res = req.get_response(self.api)
self.assertEquals(res.status_int, 204)
def test_add_member_forbidden_by_policy(self):
rules = {"modify_member": '!'}
self.set_policy_rules(rules)
self.api = test_utils.FakeAuthMiddleware(router.API(self.mapper),
is_admin=True)
req = webob.Request.blank('/images/%s/members/pattieblack' % UUID1)
req.method = 'PUT'
res = req.get_response(self.api)
self.assertEquals(res.status_int, webob.exc.HTTPForbidden.code)
def test_add_member_allowed_by_policy(self):
rules = {"modify_member": '@'}
self.set_policy_rules(rules)
self.api = test_utils.FakeAuthMiddleware(router.API(self.mapper),
is_admin=True)
req = webob.Request.blank('/images/%s/members/pattieblack' % UUID1)
req.method = 'PUT'
res = req.get_response(self.api)
self.assertEquals(res.status_int, webob.exc.HTTPNoContent.code)
def test_get_members_of_deleted_image_raises_404(self):
"""
Tests members listing for deleted image raises 404.
"""
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
req = webob.Request.blank('/images/%s/members' % UUID2)
req.method = 'GET'
res = req.get_response(self.api)
self.assertEquals(res.status_int, webob.exc.HTTPNotFound.code)
self.assertTrue(
'Image with identifier %s has been deleted.' % UUID2 in res.body)
def test_delete_member_of_deleted_image_raises_404(self):
"""
Tests deleting members of deleted image raises 404.
"""
test_router = router.API(self.mapper)
self.api = test_utils.FakeAuthMiddleware(test_router, is_admin=True)
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2)
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, webob.exc.HTTPNotFound.code)
self.assertTrue(
'Image with identifier %s has been deleted.' % UUID2 in res.body)
def test_update_members_of_deleted_image_raises_404(self):
"""
Tests update members of deleted image raises 404.
"""
test_router = router.API(self.mapper)
self.api = test_utils.FakeAuthMiddleware(test_router, is_admin=True)
req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2)
req.method = 'PUT'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 204)
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
fixture = [{'member_id': 'pattieblack', 'can_share': 'false'}]
req = webob.Request.blank('/images/%s/members' % UUID2)
req.method = 'PUT'
req.content_type = 'application/json'
req.body = json.dumps(dict(memberships=fixture))
res = req.get_response(self.api)
self.assertEquals(res.status_int, webob.exc.HTTPNotFound.code)
self.assertTrue(
'Image with identifier %s has been deleted.' % UUID2 in res.body)
def test_create_member_to_deleted_image_raises_404(self):
"""
Tests adding members to deleted image raises 404.
"""
test_router = router.API(self.mapper)
self.api = test_utils.FakeAuthMiddleware(test_router, is_admin=True)
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 200)
req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2)
req.method = 'PUT'
res = req.get_response(self.api)
self.assertEquals(res.status_int, webob.exc.HTTPNotFound.code)
self.assertTrue(
'Image with identifier %s has been deleted.' % UUID2 in res.body)
def test_delete_member(self):
"""
Tests deleting image members raises right exception
"""
test_router = router.API(self.mapper)
self.api = test_utils.FakeAuthMiddleware(
test_router, is_admin=False)
req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2)
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 401)
def test_delete_member_on_non_existing_image(self):
"""
Tests deleting image members raises right exception
"""
test_router = router.API(self.mapper)
api = test_utils.FakeAuthMiddleware(test_router, is_admin=True)
test_uri = '/images/%s/members/pattieblack'
req = webob.Request.blank(test_uri % _gen_uuid())
req.method = 'DELETE'
res = req.get_response(api)
self.assertEquals(res.status_int, 404)
def test_delete_non_exist_member(self):
"""
Test deleting image members raises right exception
"""
test_router = router.API(self.mapper)
api = test_utils.FakeAuthMiddleware(
test_router, is_admin=True)
req = webob.Request.blank('/images/%s/members/test_user' % UUID2)
req.method = 'DELETE'
res = req.get_response(api)
self.assertEquals(res.status_int, 404)
def test_delete_image_member(self):
test_rserver = router.API(self.mapper)
self.api = test_utils.FakeAuthMiddleware(
test_rserver, is_admin=True)
# Add member to image:
fixture = dict(can_share=True)
test_uri = '/images/%s/members/test_add_member_positive'
req = webob.Request.blank(test_uri % UUID2)
req.method = 'PUT'
req.content_type = 'application/json'
req.body = json.dumps(dict(member=fixture))
res = req.get_response(self.api)
self.assertEquals(res.status_int, 204)
# Delete member
test_uri = '/images/%s/members/test_add_member_positive'
req = webob.Request.blank(test_uri % UUID2)
req.headers['X-Auth-Token'] = 'test1:test1:'
req.method = 'DELETE'
req.content_type = 'application/json'
res = req.get_response(self.api)
self.assertEquals(res.status_int, 404)
self.assertTrue('Forbidden' in res.body)
def test_delete_member_allowed_by_policy(self):
rules = {"delete_member": '@', "modify_member": '@'}
self.set_policy_rules(rules)
self.api = test_utils.FakeAuthMiddleware(router.API(self.mapper),
is_admin=True)
req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2)
req.method = 'PUT'
res = req.get_response(self.api)
self.assertEquals(res.status_int, webob.exc.HTTPNoContent.code)
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, webob.exc.HTTPNoContent.code)
def test_delete_member_forbidden_by_policy(self):
rules = {"delete_member": '!', "modify_member": '@'}
self.set_policy_rules(rules)
self.api = test_utils.FakeAuthMiddleware(router.API(self.mapper),
is_admin=True)
req = webob.Request.blank('/images/%s/members/pattieblack' % UUID2)
req.method = 'PUT'
res = req.get_response(self.api)
self.assertEquals(res.status_int, webob.exc.HTTPNoContent.code)
req.method = 'DELETE'
res = req.get_response(self.api)
self.assertEquals(res.status_int, webob.exc.HTTPForbidden.code)
class TestImageSerializer(base.IsolatedUnitTest):
def setUp(self):
"""Establish a clean test environment"""
super(TestImageSerializer, self).setUp()
self.receiving_user = 'fake_user'
self.receiving_tenant = 2
self.context = glance.context.RequestContext(
is_admin=True,
user=self.receiving_user,
tenant=self.receiving_tenant)
self.serializer = images.ImageSerializer()
def image_iter():
for x in ['chunk', '678911234', '56789']:
yield x
self.FIXTURE = {
'image_iterator': image_iter(),
'image_meta': {
'id': UUID2,
'name': 'fake image #2',
'status': 'active',
'disk_format': 'vhd',
'container_format': 'ovf',
'is_public': True,
'created_at': timeutils.utcnow(),
'updated_at': timeutils.utcnow(),
'deleted_at': None,
'deleted': False,
'checksum': '06ff575a2856444fbe93100157ed74ab92eb7eff',
'size': 19,
'owner': _gen_uuid(),
'location': "file:///tmp/glance-tests/2",
'properties': {},
}
}
def test_meta(self):
exp_headers = {'x-image-meta-id': UUID2,
'x-image-meta-location': 'file:///tmp/glance-tests/2',
'ETag': self.FIXTURE['image_meta']['checksum'],
'x-image-meta-name': 'fake image #2'}
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'HEAD'
req.remote_addr = "1.2.3.4"
req.context = self.context
response = webob.Response(request=req)
self.serializer.meta(response, self.FIXTURE)
for key, value in exp_headers.iteritems():
self.assertEquals(value, response.headers[key])
def test_meta_utf8(self):
# We get unicode strings from JSON, and therefore all strings in the
# metadata will actually be unicode when handled internally. But we
# want to output utf-8.
FIXTURE = {
'image_meta': {
'id': unicode(UUID2),
'name': u'fake image #2 with utf-8 éàè',
'status': u'active',
'disk_format': u'vhd',
'container_format': u'ovf',
'is_public': True,
'created_at': timeutils.utcnow(),
'updated_at': timeutils.utcnow(),
'deleted_at': None,
'deleted': False,
'checksum': u'06ff575a2856444fbe93100157ed74ab92eb7eff',
'size': 19,
'owner': unicode(_gen_uuid()),
'location': u"file:///tmp/glance-tests/2",
'properties': {
u'prop_éé': u'ça marche',
u'prop_çé': u'çé',
}
}
}
exp_headers = {'x-image-meta-id': UUID2.encode('utf-8'),
'x-image-meta-location': 'file:///tmp/glance-tests/2',
'ETag': '06ff575a2856444fbe93100157ed74ab92eb7eff',
'x-image-meta-size': '19', # str, not int
'x-image-meta-name': 'fake image #2 with utf-8 éàè',
'x-image-meta-property-prop_éé': 'ça marche',
'x-image-meta-property-prop_çé': u'çé'.encode('utf-8')}
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'HEAD'
req.remote_addr = "1.2.3.4"
req.context = self.context
response = webob.Response(request=req)
self.serializer.meta(response, FIXTURE)
self.assertNotEqual(type(FIXTURE['image_meta']['name']),
type(response.headers['x-image-meta-name']))
self.assertEqual(response.headers['x-image-meta-name'].decode('utf-8'),
FIXTURE['image_meta']['name'])
for key, value in exp_headers.iteritems():
self.assertEquals(value, response.headers[key])
FIXTURE['image_meta']['properties'][u'prop_bad'] = 'çé'
self.assertRaises(UnicodeDecodeError,
self.serializer.meta, response, FIXTURE)
def test_show(self):
exp_headers = {'x-image-meta-id': UUID2,
'x-image-meta-location': 'file:///tmp/glance-tests/2',
'ETag': self.FIXTURE['image_meta']['checksum'],
'x-image-meta-name': 'fake image #2'}
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'GET'
req.context = self.context
response = webob.Response(request=req)
self.serializer.show(response, self.FIXTURE)
for key, value in exp_headers.iteritems():
self.assertEquals(value, response.headers[key])
self.assertEqual(response.body, 'chunk67891123456789')
def test_show_notify(self):
"""Make sure an eventlet posthook for notify_image_sent is added."""
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'GET'
req.context = self.context
response = webob.Response(request=req)
response.request.environ['eventlet.posthooks'] = []
self.serializer.show(response, self.FIXTURE)
#just make sure the app_iter is called
for chunk in response.app_iter:
pass
self.assertNotEqual(response.request.environ['eventlet.posthooks'], [])
def test_image_send_notification(self):
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'GET'
req.remote_addr = '1.2.3.4'
req.context = self.context
image_meta = self.FIXTURE['image_meta']
called = {"notified": False}
expected_payload = {
'bytes_sent': 19,
'image_id': UUID2,
'owner_id': image_meta['owner'],
'receiver_tenant_id': self.receiving_tenant,
'receiver_user_id': self.receiving_user,
'destination_ip': '1.2.3.4',
}
def fake_info(_event_type, _payload):
self.assertEqual(_payload, expected_payload)
called['notified'] = True
self.stubs.Set(self.serializer.notifier, 'info', fake_info)
glance.api.common.image_send_notification(19, 19, image_meta, req,
self.serializer.notifier)
self.assertTrue(called['notified'])
def test_image_send_notification_error(self):
"""Ensure image.send notification is sent on error."""
req = webob.Request.blank("/images/%s" % UUID2)
req.method = 'GET'
req.remote_addr = '1.2.3.4'
req.context = self.context
image_meta = self.FIXTURE['image_meta']
called = {"notified": False}
expected_payload = {
'bytes_sent': 17,
'image_id': UUID2,
'owner_id': image_meta['owner'],
'receiver_tenant_id': self.receiving_tenant,
'receiver_user_id': self.receiving_user,
'destination_ip': '1.2.3.4',
}
def fake_error(_event_type, _payload):
self.assertEqual(_payload, expected_payload)
called['notified'] = True
self.stubs.Set(self.serializer.notifier, 'error', fake_error)
#expected and actually sent bytes differ
glance.api.common.image_send_notification(17, 19, image_meta, req,
self.serializer.notifier)
self.assertTrue(called['notified'])
def test_redact_location(self):
"""Ensure location redaction does not change original metadata"""
image_meta = {'size': 3, 'id': '123', 'location': 'http://localhost'}
redacted_image_meta = {'size': 3, 'id': '123'}
copy_image_meta = copy.deepcopy(image_meta)
tmp_image_meta = glance.api.v1.images.redact_loc(image_meta)
self.assertEqual(image_meta, copy_image_meta)
self.assertEqual(tmp_image_meta, redacted_image_meta)
def test_noop_redact_location(self):
"""Check no-op location redaction does not change original metadata"""
image_meta = {'size': 3, 'id': '123'}
redacted_image_meta = {'size': 3, 'id': '123'}
copy_image_meta = copy.deepcopy(image_meta)
tmp_image_meta = glance.api.v1.images.redact_loc(image_meta)
self.assertEqual(image_meta, copy_image_meta)
self.assertEqual(tmp_image_meta, redacted_image_meta)
self.assertEqual(image_meta, redacted_image_meta)
class TestFilterValidator(base.IsolatedUnitTest):
def test_filter_validator(self):
self.assertFalse(glance.api.v1.filters.validate('size_max', -1))
self.assertTrue(glance.api.v1.filters.validate('size_max', 1))
self.assertTrue(glance.api.v1.filters.validate('protected', 'True'))
self.assertTrue(glance.api.v1.filters.validate('protected', 'FALSE'))
self.assertFalse(glance.api.v1.filters.validate('protected', '-1'))
class TestAPIProtectedProps(base.IsolatedUnitTest):
def setUp(self):
"""Establish a clean test environment"""
super(TestAPIProtectedProps, self).setUp()
self.mapper = routes.Mapper()
# turn on property protections
self.set_property_protections()
self.api = test_utils.FakeAuthMiddleware(router.API(self.mapper))
db_api.setup_db_env()
db_api.get_engine()
db_models.unregister_models(db_api._ENGINE)
db_models.register_models(db_api._ENGINE)
def tearDown(self):
"""Clear the test environment"""
super(TestAPIProtectedProps, self).tearDown()
self.destroy_fixtures()
def destroy_fixtures(self):
# Easiest to just drop the models and re-create them...
db_models.unregister_models(db_api._ENGINE)
db_models.register_models(db_api._ENGINE)
def _create_admin_image(self, props={}):
request = unit_test_utils.get_fake_request(path='/images')
headers = {'x-image-meta-disk-format': 'ami',
'x-image-meta-container-format': 'ami',
'x-image-meta-name': 'foo',
'x-image-meta-size': '0',
'x-auth-token': 'user:tenant:admin'}
headers.update(props)
for k, v in headers.iteritems():
request.headers[k] = v
created_image = request.get_response(self.api)
res_body = json.loads(created_image.body)['image']
image_id = res_body['id']
return image_id
def test_prop_protection_with_create_and_permitted_role(self):
"""
As admin role, create and image and verify permitted role 'member' can
create a protected property
"""
image_id = self._create_admin_image()
another_request = unit_test_utils.get_fake_request(
path='/images/%s' % image_id, method='PUT')
headers = {'x-auth-token': 'user:tenant:member',
'x-image-meta-property-x_owner_foo': 'bar'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
res_body = json.loads(output.body)['image']
self.assertEqual(res_body['properties']['x_owner_foo'], 'bar')
def test_prop_protection_with_create_and_unpermitted_role(self):
"""
As admin role, create an image and verify unpermitted role
'fake_member' can *not* create a protected property
"""
image_id = self._create_admin_image()
another_request = unit_test_utils.get_fake_request(
path='/images/%s' % image_id, method='PUT')
headers = {'x-auth-token': 'user:tenant:fake_member',
'x-image-meta-property-x_owner_foo': 'bar'}
for k, v in headers.iteritems():
another_request.headers[k] = v
another_request.get_response(self.api)
output = another_request.get_response(self.api)
self.assertEquals(output.status_int, webob.exc.HTTPForbidden.code)
self.assertIn("Property '%s' is protected" %
"x_owner_foo", output.body)
def test_prop_protection_with_show_and_permitted_role(self):
"""
As admin role, create an image with a protected property, and verify
permitted role 'member' can read that protected property via HEAD
"""
image_id = self._create_admin_image(
{'x-image-meta-property-x_owner_foo': 'bar'})
another_request = unit_test_utils.get_fake_request(
method='HEAD', path='/images/%s' % image_id)
headers = {'x-auth-token': 'user:tenant:member'}
for k, v in headers.iteritems():
another_request.headers[k] = v
res2 = another_request.get_response(self.api)
self.assertEqual(res2.headers['x-image-meta-property-x_owner_foo'],
'bar')
def test_prop_protection_with_show_and_unpermitted_role(self):
"""
As admin role, create an image with a protected property, and verify
permitted role 'fake_role' can *not* read that protected property via
HEAD
"""
image_id = self._create_admin_image(
{'x-image-meta-property-x_owner_foo': 'bar'})
another_request = unit_test_utils.get_fake_request(
method='HEAD', path='/images/%s' % image_id)
headers = {'x-auth-token': 'user:tenant:fake_role'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
self.assertEqual(output.status_int, 200)
self.assertEqual('', output.body)
self.assertNotIn('x-image-meta-property-x_owner_foo', output.headers)
def test_prop_protection_with_get_and_permitted_role(self):
"""
As admin role, create an image with a protected property, and verify
permitted role 'member' can read that protected property via GET
"""
image_id = self._create_admin_image(
{'x-image-meta-property-x_owner_foo': 'bar'})
another_request = unit_test_utils.get_fake_request(
method='GET', path='/images/%s' % image_id)
headers = {'x-auth-token': 'user:tenant:member'}
for k, v in headers.iteritems():
another_request.headers[k] = v
res2 = another_request.get_response(self.api)
self.assertEqual(res2.headers['x-image-meta-property-x_owner_foo'],
'bar')
def test_prop_protection_with_get_and_unpermitted_role(self):
"""
As admin role, create an image with a protected property, and verify
permitted role 'fake_role' can *not* read that protected property via
GET
"""
image_id = self._create_admin_image(
{'x-image-meta-property-x_owner_foo': 'bar'})
another_request = unit_test_utils.get_fake_request(
method='GET', path='/images/%s' % image_id)
headers = {'x-auth-token': 'user:tenant:fake_role'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
self.assertEqual(output.status_int, 200)
self.assertEqual('', output.body)
self.assertNotIn('x-image-meta-property-x_owner_foo', output.headers)
def test_prop_protection_with_detail_and_permitted_role(self):
"""
As admin role, create an image with a protected property, and verify
permitted role 'member' can read that protected property via
/images/detail
"""
image_id = self._create_admin_image(
{'x-image-meta-property-x_owner_foo': 'bar'})
another_request = unit_test_utils.get_fake_request(
method='GET', path='/images/detail')
headers = {'x-auth-token': 'user:tenant:member'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
self.assertEqual(output.status_int, 200)
res_body = json.loads(output.body)['images'][0]
self.assertEqual(res_body['properties']['x_owner_foo'], 'bar')
def test_prop_protection_with_detail_and_unpermitted_role(self):
"""
As admin role, create an image with a protected property, and verify
permitted role 'fake_role' can *not* read that protected property via
/images/detail
"""
image_id = self._create_admin_image(
{'x-image-meta-property-x_owner_foo': 'bar'})
another_request = unit_test_utils.get_fake_request(
method='GET', path='/images/detail')
headers = {'x-auth-token': 'user:tenant:fake_role'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
self.assertEqual(output.status_int, 200)
res_body = json.loads(output.body)['images'][0]
self.assertNotIn('x-image-meta-property-x_owner_foo',
res_body['properties'])
def test_prop_protection_with_update_and_permitted_role(self):
"""
As admin role, create an image with protected property, and verify
permitted role 'member' can update that protected property
"""
image_id = self._create_admin_image(
{'x-image-meta-property-x_owner_foo': 'bar'})
another_request = unit_test_utils.get_fake_request(
path='/images/%s' % image_id, method='PUT')
headers = {'x-auth-token': 'user:tenant:member',
'x-image-meta-property-x_owner_foo': 'baz'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
res_body = json.loads(output.body)['image']
self.assertEqual(res_body['properties']['x_owner_foo'], 'baz')
def test_prop_protection_with_update_and_unpermitted_role(self):
"""
As admin role, create an image with protected property, and verify
unpermitted role 'fake_role' can *not* update that protected property
"""
image_id = self._create_admin_image(
{'x-image-meta-property-x_owner_foo': 'bar'})
another_request = unit_test_utils.get_fake_request(
path='/images/%s' % image_id, method='PUT')
headers = {'x-auth-token': 'user:tenant:fake_role',
'x-image-meta-property-x_owner_foo': 'baz'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
self.assertEquals(output.status_int, webob.exc.HTTPForbidden.code)
self.assertIn("Property '%s' is protected" %
"x_owner_foo", output.body)
def test_prop_protection_update_without_read(self):
"""
Test protected property cannot be updated without read permission
"""
image_id = self._create_admin_image(
{'x-image-meta-property-spl_update_only_prop': 'foo'})
another_request = unit_test_utils.get_fake_request(
path='/images/%s' % image_id, method='PUT')
headers = {'x-auth-token': 'user:tenant:spl_role',
'x-image-meta-property-spl_update_only_prop': 'bar'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
self.assertEquals(output.status_int, webob.exc.HTTPForbidden.code)
self.assertIn("Property '%s' is protected" %
"spl_update_only_prop", output.body)
def test_prop_protection_update_noop(self):
"""
Test protected property update is allowed as long as the user has read
access and the value is unchanged
"""
image_id = self._create_admin_image(
{'x-image-meta-property-spl_read_prop': 'foo'})
another_request = unit_test_utils.get_fake_request(
path='/images/%s' % image_id, method='PUT')
headers = {'x-auth-token': 'user:tenant:spl_role',
'x-image-meta-property-spl_read_prop': 'foo'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
res_body = json.loads(output.body)['image']
self.assertEqual(res_body['properties']['spl_read_prop'], 'foo')
self.assertEquals(output.status_int, 200)
def test_prop_protection_with_delete_and_permitted_role(self):
"""
As admin role, create an image with protected property, and verify
permitted role 'member' can can delete that protected property
"""
image_id = self._create_admin_image(
{'x-image-meta-property-x_owner_foo': 'bar'})
another_request = unit_test_utils.get_fake_request(
path='/images/%s' % image_id, method='PUT')
headers = {'x-auth-token': 'user:tenant:member',
'X-Glance-Registry-Purge-Props': 'True'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
res_body = json.loads(output.body)['image']
self.assertEqual(res_body['properties'], {})
def test_prop_protection_with_delete_and_unpermitted_read(self):
"""
Test protected property cannot be deleted without read permission
"""
image_id = self._create_admin_image(
{'x-image-meta-property-x_owner_foo': 'bar'})
another_request = unit_test_utils.get_fake_request(
path='/images/%s' % image_id, method='PUT')
headers = {'x-auth-token': 'user:tenant:fake_role',
'X-Glance-Registry-Purge-Props': 'True'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
self.assertEquals(output.status_int, 200)
self.assertNotIn('x-image-meta-property-x_owner_foo', output.headers)
another_request = unit_test_utils.get_fake_request(
method='HEAD', path='/images/%s' % image_id)
headers = {'x-auth-token': 'user:tenant:admin'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
self.assertEqual(output.status_int, 200)
self.assertEqual('', output.body)
self.assertEqual(output.headers['x-image-meta-property-x_owner_foo'],
'bar')
def test_prop_protection_with_delete_and_unpermitted_delete(self):
"""
Test protected property cannot be deleted without delete permission
"""
image_id = self._create_admin_image(
{'x-image-meta-property-spl_update_prop': 'foo'})
another_request = unit_test_utils.get_fake_request(
path='/images/%s' % image_id, method='PUT')
headers = {'x-auth-token': 'user:tenant:spl_role',
'X-Glance-Registry-Purge-Props': 'True'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
self.assertEquals(output.status_int, 403)
self.assertIn("Property '%s' is protected" %
"spl_update_prop", output.body)
another_request = unit_test_utils.get_fake_request(
method='HEAD', path='/images/%s' % image_id)
headers = {'x-auth-token': 'user:tenant:admin'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
self.assertEqual(output.status_int, 200)
self.assertEqual('', output.body)
self.assertEqual(
output.headers['x-image-meta-property-spl_update_prop'], 'foo')
def test_read_protected_props_leak_with_update(self):
"""
Verify when updating props that ones we don't have read permission for
are not disclosed
"""
image_id = self._create_admin_image(
{'x-image-meta-property-spl_update_prop': '0',
'x-image-meta-property-foo': 'bar'})
another_request = unit_test_utils.get_fake_request(
path='/images/%s' % image_id, method='PUT')
headers = {'x-auth-token': 'user:tenant:spl_role',
'x-image-meta-property-spl_update_prop': '1',
'X-Glance-Registry-Purge-Props': 'False'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
res_body = json.loads(output.body)['image']
self.assertEqual(res_body['properties']['spl_update_prop'], '1')
self.assertNotIn('foo', res_body['properties'])
def test_update_protected_props_mix_no_read(self):
"""
Create an image with two props - one only readable by admin, and one
readable/updatable by member. Verify member can sucessfully update
their property while the admin owned one is ignored transparently
"""
image_id = self._create_admin_image(
{'x-image-meta-property-admin_foo': 'bar',
'x-image-meta-property-x_owner_foo': 'bar'})
another_request = unit_test_utils.get_fake_request(
path='/images/%s' % image_id, method='PUT')
headers = {'x-auth-token': 'user:tenant:member',
'x-image-meta-property-x_owner_foo': 'baz'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
res_body = json.loads(output.body)['image']
self.assertEqual(res_body['properties']['x_owner_foo'], 'baz')
self.assertNotIn('admin_foo', res_body['properties'])
def test_update_protected_props_mix_read(self):
"""
Create an image with two props - one readable/updatable by admin, but
also readable by spl_role. The other is readable/updatable by
spl_role. Verify spl_role can successfully update their property but
not the admin owned one
"""
custom_props = {
'x-image-meta-property-spl_read_only_prop': '1',
'x-image-meta-property-spl_update_prop': '2'
}
image_id = self._create_admin_image(custom_props)
another_request = unit_test_utils.get_fake_request(
path='/images/%s' % image_id, method='PUT')
# verify spl_role can update it's prop
headers = {'x-auth-token': 'user:tenant:spl_role',
'x-image-meta-property-spl_read_only_prop': '1',
'x-image-meta-property-spl_update_prop': '1'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
res_body = json.loads(output.body)['image']
self.assertEqual(output.status_int, 200)
self.assertEqual(res_body['properties']['spl_read_only_prop'], '1')
self.assertEqual(res_body['properties']['spl_update_prop'], '1')
# verify spl_role can not update admin controlled prop
headers = {'x-auth-token': 'user:tenant:spl_role',
'x-image-meta-property-spl_read_only_prop': '2',
'x-image-meta-property-spl_update_prop': '1'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
self.assertEqual(output.status_int, 403)
def test_delete_protected_props_mix_no_read(self):
"""
Create an image with two props - one only readable by admin, and one
readable/deletable by member. Verify member can sucessfully delete
their property while the admin owned one is ignored transparently
"""
image_id = self._create_admin_image(
{'x-image-meta-property-admin_foo': 'bar',
'x-image-meta-property-x_owner_foo': 'bar'})
another_request = unit_test_utils.get_fake_request(
path='/images/%s' % image_id, method='PUT')
headers = {'x-auth-token': 'user:tenant:member',
'X-Glance-Registry-Purge-Props': 'True'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
res_body = json.loads(output.body)['image']
self.assertNotIn('x_owner_foo', res_body['properties'])
self.assertNotIn('admin_foo', res_body['properties'])
def test_delete_protected_props_mix_read(self):
"""
Create an image with two props - one readable/deletable by admin, but
also readable by spl_role. The other is readable/deletable by
spl_role. Verify spl_role is forbidden to purge_props in this scenario
without retaining the readable prop.
"""
custom_props = {
'x-image-meta-property-spl_read_only_prop': '1',
'x-image-meta-property-spl_delete_prop': '2'
}
image_id = self._create_admin_image(custom_props)
another_request = unit_test_utils.get_fake_request(
path='/images/%s' % image_id, method='PUT')
headers = {'x-auth-token': 'user:tenant:spl_role',
'X-Glance-Registry-Purge-Props': 'True'}
for k, v in headers.iteritems():
another_request.headers[k] = v
output = another_request.get_response(self.api)
self.assertEqual(output.status_int, 403)
|
apache-2.0
| -5,985,045,226,187,337,000
| 39.847893
| 79
| 0.570034
| false
| 3.761119
| true
| false
| false
|
Sid1057/obstacle_detector
|
sample_extra_plane_transformer.py
|
1
|
1461
|
#!/usr/bin/python3
import cv2
import numpy as np
from obstacle_detector.perspective import inv_persp_new
from obstacle_detector.distance_calculator import spline_dist
def video_test(input_video_path=None):
cx = 603
cy = 297
roi_width = 25
roi_length = 90
px_height_of_roi_length = 352
#int(
# spline_dist.get_rails_px_height_by_distance(roi_length))
#print(px_height_of_roi_length)
cap = cv2.VideoCapture(
input_video_path \
if input_video_path is not None \
else input('enter video path: '))
ret, frame = cap.read()
while(ret):
ret, frame = cap.read()
transformed_plane, pts1, M = inv_persp_new(
frame, (cx, cy), (roi_width, roi_length),
px_height_of_roi_length, 200)
extra_transformed_plane, pts1, M = inv_persp_new(
frame, (cx, cy), (roi_width, roi_length),
px_height_of_roi_length, 200,
extra_width=200 * 2)
cv2.imshow(
'plane of the way',
transformed_plane)
cv2.imshow(
'plane',
extra_transformed_plane)
cv2.imshow(
'original frame',
frame)
k = cv2.waitKey(1) & 0xff
if k == 27:
break
elif k == ord('s'):
cv2.imwrite('screen.png', extra_transformed_plane)
cap.release()
cv2.destroyAllWindows()
video_test('../../video/6.mp4')
|
mit
| 90,747,233,213,776,740
| 23.35
| 65
| 0.557153
| false
| 3.397674
| false
| false
| false
|
zzjkf2009/Midterm_Astar
|
opencv/platforms/ios/build_framework.py
|
1
|
10978
|
#!/usr/bin/env python
"""
The script builds OpenCV.framework for iOS.
The built framework is universal, it can be used to build app and run it on either iOS simulator or real device.
Usage:
./build_framework.py <outputdir>
By cmake conventions (and especially if you work with OpenCV repository),
the output dir should not be a subdirectory of OpenCV source tree.
Script will create <outputdir>, if it's missing, and a few its subdirectories:
<outputdir>
build/
iPhoneOS-*/
[cmake-generated build tree for an iOS device target]
iPhoneSimulator-*/
[cmake-generated build tree for iOS simulator]
opencv2.framework/
[the framework content]
The script should handle minor OpenCV updates efficiently
- it does not recompile the library from scratch each time.
However, opencv2.framework directory is erased and recreated on each run.
Adding --dynamic parameter will build opencv2.framework as App Store dynamic framework. Only iOS 8+ versions are supported.
"""
from __future__ import print_function
import glob, re, os, os.path, shutil, string, sys, argparse, traceback, multiprocessing
from subprocess import check_call, check_output, CalledProcessError
def execute(cmd, cwd = None):
print("Executing: %s in %s" % (cmd, cwd), file=sys.stderr)
retcode = check_call(cmd, cwd = cwd)
if retcode != 0:
raise Exception("Child returned:", retcode)
def getXCodeMajor():
ret = check_output(["xcodebuild", "-version"])
m = re.match(r'XCode\s+(\d)\..*', ret, flags=re.IGNORECASE)
if m:
return int(m.group(1))
return 0
class Builder:
def __init__(self, opencv, contrib, dynamic, bitcodedisabled, exclude, targets):
self.opencv = os.path.abspath(opencv)
self.contrib = None
if contrib:
modpath = os.path.join(contrib, "modules")
if os.path.isdir(modpath):
self.contrib = os.path.abspath(modpath)
else:
print("Note: contrib repository is bad - modules subfolder not found", file=sys.stderr)
self.dynamic = dynamic
self.bitcodedisabled = bitcodedisabled
self.exclude = exclude
self.targets = targets
def getBD(self, parent, t):
if len(t[0]) == 1:
res = os.path.join(parent, 'build-%s-%s' % (t[0][0].lower(), t[1].lower()))
else:
res = os.path.join(parent, 'build-%s' % t[1].lower())
if not os.path.isdir(res):
os.makedirs(res)
return os.path.abspath(res)
def _build(self, outdir):
outdir = os.path.abspath(outdir)
if not os.path.isdir(outdir):
os.makedirs(outdir)
mainWD = os.path.join(outdir, "build")
dirs = []
xcode_ver = getXCodeMajor()
if self.dynamic:
alltargets = self.targets
else:
# if we are building a static library, we must build each architecture separately
alltargets = []
for t in self.targets:
for at in t[0]:
current = ( [at], t[1] )
alltargets.append(current)
for t in alltargets:
mainBD = self.getBD(mainWD, t)
dirs.append(mainBD)
cmake_flags = []
if self.contrib:
cmake_flags.append("-DOPENCV_EXTRA_MODULES_PATH=%s" % self.contrib)
if xcode_ver >= 7 and t[1] == 'iPhoneOS' and self.bitcodedisabled == False:
cmake_flags.append("-DCMAKE_C_FLAGS=-fembed-bitcode")
cmake_flags.append("-DCMAKE_CXX_FLAGS=-fembed-bitcode")
self.buildOne(t[0], t[1], mainBD, cmake_flags)
if self.dynamic == False:
self.mergeLibs(mainBD)
self.makeFramework(outdir, dirs)
def build(self, outdir):
try:
self._build(outdir)
except Exception as e:
print("="*60, file=sys.stderr)
print("ERROR: %s" % e, file=sys.stderr)
print("="*60, file=sys.stderr)
traceback.print_exc(file=sys.stderr)
sys.exit(1)
def getToolchain(self, arch, target):
return None
def getCMakeArgs(self, arch, target):
args = [
"cmake",
"-GXcode",
"-DAPPLE_FRAMEWORK=ON",
"-DCMAKE_INSTALL_PREFIX=install",
"-DCMAKE_BUILD_TYPE=Release",
] + ([
"-DBUILD_SHARED_LIBS=ON",
"-DCMAKE_MACOSX_BUNDLE=ON",
"-DCMAKE_XCODE_ATTRIBUTE_CODE_SIGNING_REQUIRED=NO",
] if self.dynamic else [])
if len(self.exclude) > 0:
args += ["-DBUILD_opencv_world=OFF"] if not self.dynamic else []
args += ["-DBUILD_opencv_%s=OFF" % m for m in self.exclude]
return args
def getBuildCommand(self, archs, target):
buildcmd = [
"xcodebuild",
]
if self.dynamic:
buildcmd += [
"IPHONEOS_DEPLOYMENT_TARGET=8.0",
"ONLY_ACTIVE_ARCH=NO",
]
for arch in archs:
buildcmd.append("-arch")
buildcmd.append(arch.lower())
else:
arch = ";".join(archs)
buildcmd += [
"IPHONEOS_DEPLOYMENT_TARGET=6.0",
"ARCHS=%s" % arch,
]
buildcmd += [
"-sdk", target.lower(),
"-configuration", "Release",
"-parallelizeTargets",
"-jobs", str(multiprocessing.cpu_count()),
] + (["-target","ALL_BUILD"] if self.dynamic else [])
return buildcmd
def getInfoPlist(self, builddirs):
return os.path.join(builddirs[0], "ios", "Info.plist")
def buildOne(self, arch, target, builddir, cmakeargs = []):
# Run cmake
toolchain = self.getToolchain(arch, target)
cmakecmd = self.getCMakeArgs(arch, target) + \
(["-DCMAKE_TOOLCHAIN_FILE=%s" % toolchain] if toolchain is not None else [])
if target.lower().startswith("iphoneos"):
cmakecmd.append("-DENABLE_NEON=ON")
cmakecmd.append(self.opencv)
cmakecmd.extend(cmakeargs)
execute(cmakecmd, cwd = builddir)
# Clean and build
clean_dir = os.path.join(builddir, "install")
if os.path.isdir(clean_dir):
shutil.rmtree(clean_dir)
buildcmd = self.getBuildCommand(arch, target)
execute(buildcmd + ["-target", "ALL_BUILD", "build"], cwd = builddir)
execute(["cmake", "-P", "cmake_install.cmake"], cwd = builddir)
def mergeLibs(self, builddir):
res = os.path.join(builddir, "lib", "Release", "libopencv_merged.a")
libs = glob.glob(os.path.join(builddir, "install", "lib", "*.a"))
libs3 = glob.glob(os.path.join(builddir, "install", "share", "OpenCV", "3rdparty", "lib", "*.a"))
print("Merging libraries:\n\t%s" % "\n\t".join(libs + libs3), file=sys.stderr)
execute(["libtool", "-static", "-o", res] + libs + libs3)
def makeFramework(self, outdir, builddirs):
name = "opencv2"
# set the current dir to the dst root
framework_dir = os.path.join(outdir, "%s.framework" % name)
if os.path.isdir(framework_dir):
shutil.rmtree(framework_dir)
os.makedirs(framework_dir)
if self.dynamic:
dstdir = framework_dir
libname = "opencv2.framework/opencv2"
else:
dstdir = os.path.join(framework_dir, "Versions", "A")
libname = "libopencv_merged.a"
# copy headers from one of build folders
shutil.copytree(os.path.join(builddirs[0], "install", "include", "opencv2"), os.path.join(dstdir, "Headers"))
# make universal static lib
libs = [os.path.join(d, "lib", "Release", libname) for d in builddirs]
lipocmd = ["lipo", "-create"]
lipocmd.extend(libs)
lipocmd.extend(["-o", os.path.join(dstdir, name)])
print("Creating universal library from:\n\t%s" % "\n\t".join(libs), file=sys.stderr)
execute(lipocmd)
# dynamic framework has different structure, just copy the Plist directly
if self.dynamic:
resdir = dstdir
shutil.copyfile(self.getInfoPlist(builddirs), os.path.join(resdir, "Info.plist"))
else:
# copy Info.plist
resdir = os.path.join(dstdir, "Resources")
os.makedirs(resdir)
shutil.copyfile(self.getInfoPlist(builddirs), os.path.join(resdir, "Info.plist"))
# make symbolic links
links = [
(["A"], ["Versions", "Current"]),
(["Versions", "Current", "Headers"], ["Headers"]),
(["Versions", "Current", "Resources"], ["Resources"]),
(["Versions", "Current", name], [name])
]
for l in links:
s = os.path.join(*l[0])
d = os.path.join(framework_dir, *l[1])
os.symlink(s, d)
class iOSBuilder(Builder):
def getToolchain(self, arch, target):
toolchain = os.path.join(self.opencv, "platforms", "ios", "cmake", "Toolchains", "Toolchain-%s_Xcode.cmake" % target)
return toolchain
def getCMakeArgs(self, arch, target):
arch = ";".join(arch)
args = Builder.getCMakeArgs(self, arch, target)
args = args + [
'-DIOS_ARCH=%s' % arch
]
return args
if __name__ == "__main__":
folder = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "../.."))
parser = argparse.ArgumentParser(description='The script builds OpenCV.framework for iOS.')
parser.add_argument('out', metavar='OUTDIR', help='folder to put built framework')
parser.add_argument('--opencv', metavar='DIR', default=folder, help='folder with opencv repository (default is "../.." relative to script location)')
parser.add_argument('--contrib', metavar='DIR', default=None, help='folder with opencv_contrib repository (default is "None" - build only main framework)')
parser.add_argument('--without', metavar='MODULE', default=[], action='append', help='OpenCV modules to exclude from the framework')
parser.add_argument('--dynamic', default=False, action='store_true', help='build dynamic framework (default is "False" - builds static framework)')
parser.add_argument('--disable-bitcode', default=False, dest='bitcodedisabled', action='store_true', help='disable bitcode (enabled by default)')
args = parser.parse_args()
b = iOSBuilder(args.opencv, args.contrib, args.dynamic, args.bitcodedisabled, args.without,
[
(["armv7", "arm64"], "iPhoneOS"),
] if os.environ.get('BUILD_PRECOMMIT', None) else
[
(["armv7", "armv7s", "arm64"], "iPhoneOS"),
(["i386", "x86_64"], "iPhoneSimulator"),
])
b.build(args.out)
|
mit
| -2,232,820,552,741,659,100
| 37.250871
| 159
| 0.578338
| false
| 3.771213
| false
| false
| false
|
mrcodehang/cqut-chat-server
|
configs/__init__.py
|
1
|
1131
|
configs = {}
configs['app_key'] = 'cbe36a100c9977c74c296a6777e920ec'
configs['enviroment'] = 'development'
configs['appid'] = '12353'
configs['content'] = '【CQUT-CHAT】您的验证码是:'
def save_config(key, value):
configs[key] = value
def get_config(key):
return configs.get(key)
username_invalid = { 'code': 1, 'msg': '用户名长度在4到16之间, 不能有空格, 引号' }
password_invalid = { 'code': 2, 'msg': '密码长度在6到16之间, 不能有引号' }
username_unique = { 'code': 3, 'msg': '用户名不能重复' }
mob_number_unique = { 'code': 4, 'msg': '手机号不能重复' }
mob_number_invalid = { 'code': 5, 'msg': '手机号格式不合法' }
vcode_invalid = { 'code': 6, 'msg': '验证码错误, 重新发送验证码' }
account_is_none = { 'code': 7, 'msg': '登陆账号(手机号/用户名)不能为空' }
token_invalid = { 'code':8, 'msg': 'token不正确或者token已过期' }
__all__ = [save_config, get_config,
username_invalid, password_invalid,
username_unique, mob_number_unique,
vcode_invalid, account_is_none, token_invalid
]
|
mpl-2.0
| 8,151,344,901,222,302,000
| 31.551724
| 66
| 0.625663
| false
| 2.104911
| true
| false
| false
|
DigiThinkIT/stem
|
test/integ/process.py
|
1
|
4793
|
"""
Tests the stem.process functions with various use cases.
"""
import shutil
import subprocess
import tempfile
import time
import unittest
import stem.prereq
import stem.process
import stem.socket
import stem.util.system
import stem.version
import test.runner
try:
# added in python 3.3
from unittest.mock import patch
except ImportError:
from mock import patch
class TestProcess(unittest.TestCase):
def setUp(self):
self.data_directory = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.data_directory)
def test_launch_tor_with_config(self):
"""
Exercises launch_tor_with_config.
"""
if test.runner.only_run_once(self, 'test_launch_tor_with_config'):
return
# Launch tor without a torrc, but with a control port. Confirms that this
# works by checking that we're still able to access the new instance.
runner = test.runner.get_runner()
tor_process = stem.process.launch_tor_with_config(
tor_cmd = runner.get_tor_command(),
config = {
'SocksPort': '2777',
'ControlPort': '2778',
'DataDirectory': self.data_directory,
},
completion_percent = 5
)
control_socket = None
try:
control_socket = stem.socket.ControlPort(port = 2778)
stem.connection.authenticate(control_socket, chroot_path = runner.get_chroot())
# exercises the socket
control_socket.send('GETCONF ControlPort')
getconf_response = control_socket.recv()
self.assertEquals('ControlPort=2778', str(getconf_response))
finally:
if control_socket:
control_socket.close()
tor_process.kill()
tor_process.wait()
def test_launch_tor_with_timeout(self):
"""
Runs launch_tor where it times out before completing.
"""
if test.runner.only_run_once(self, 'test_launch_tor_with_timeout'):
return
runner = test.runner.get_runner()
start_time = time.time()
config = {'SocksPort': '2777', 'DataDirectory': self.data_directory}
self.assertRaises(OSError, stem.process.launch_tor_with_config, config, runner.get_tor_command(), 100, None, 2)
runtime = time.time() - start_time
if not (runtime > 2 and runtime < 3):
self.fail('Test should have taken 2-3 seconds, took %i instead' % runtime)
@patch('os.getpid')
def test_take_ownership_via_pid(self, getpid_mock):
"""
Checks that the tor process quits after we do if we set take_ownership. To
test this we spawn a process and trick tor into thinking that it is us.
"""
if not stem.util.system.is_available('sleep'):
test.runner.skip(self, "('sleep' command is unavailable)")
return
elif test.runner.only_run_once(self, 'test_take_ownership_via_pid'):
return
elif test.runner.require_version(self, stem.version.Requirement.TAKEOWNERSHIP):
return
sleep_process = subprocess.Popen(['sleep', '60'])
getpid_mock.return_value = str(sleep_process.pid)
tor_process = stem.process.launch_tor_with_config(
tor_cmd = test.runner.get_runner().get_tor_command(),
config = {
'SocksPort': '2777',
'ControlPort': '2778',
'DataDirectory': self.data_directory,
},
completion_percent = 5,
take_ownership = True,
)
# Kill the sleep command. Tor should quit shortly after.
sleep_process.kill()
sleep_process.communicate()
# tor polls for the process every fifteen seconds so this may take a
# while...
for seconds_waited in xrange(30):
if tor_process.poll() == 0:
return # tor exited
time.sleep(1)
self.fail("tor didn't quit after the process that owned it terminated")
def test_take_ownership_via_controller(self):
"""
Checks that the tor process quits after the controller that owns it
connects, then disconnects..
"""
if test.runner.only_run_once(self, 'test_take_ownership_via_controller'):
return
elif test.runner.require_version(self, stem.version.Requirement.TAKEOWNERSHIP):
return
tor_process = stem.process.launch_tor_with_config(
tor_cmd = test.runner.get_runner().get_tor_command(),
config = {
'SocksPort': '2777',
'ControlPort': '2778',
'DataDirectory': self.data_directory,
},
completion_percent = 5,
take_ownership = True,
)
# We're the controlling process. Just need to connect then disconnect.
controller = stem.control.Controller.from_port(port = 2778)
controller.authenticate()
controller.close()
# give tor a few seconds to quit
for seconds_waited in xrange(5):
if tor_process.poll() == 0:
return # tor exited
time.sleep(1)
self.fail("tor didn't quit after the controller that owned it disconnected")
|
lgpl-3.0
| -1,561,313,229,695,803,600
| 27.700599
| 115
| 0.662007
| false
| 3.724165
| true
| false
| false
|
josdaza/deep-toolbox
|
TensorFlow/seq2seq/Main_simple.py
|
1
|
4953
|
import numpy as np
import tensorflow as tf
import helpers
tf.reset_default_graph()
PAD = 0
EOS = 1
VOCAB_SIZE = 10
EMBEDDINGS_SIZE = 20
ENC_HIDDEN_UNITS = 20
DEC_HIDDEN_UNITS = 20
# Fake Function to Emulate a series of encoder and decoder sequences
# Given encoder_inputs [5, 6, 7], decoder_targets would be [5, 6, 7, 1],
# where 1 is for EOS, and decoder_inputs would be [1, 5, 6, 7]
# decoder_inputs are lagged by 1 step, passing previous token as input at current step.
def next_feed():
batch = next(batches)
encoder_inputs_, _ = helpers.batch(batch)
decoder_targets_, _ = helpers.batch(
[(sequence) + [EOS] for sequence in batch]
)
decoder_inputs_, _ = helpers.batch(
[[EOS] + (sequence) for sequence in batch]
)
return {
encoder_inputs: encoder_inputs_,
decoder_inputs: decoder_inputs_,
decoder_targets: decoder_targets_,
}
if __name__ == "__main__":
sess = tf.InteractiveSession()
# Loading Data [Toy example]
batch_ = [[6], [3, 4], [9, 8, 7]]
batch_, batch_length_ = helpers.batch(batch_)
print('batch_encoded:\n' + str(batch_))
din_, dlen_ = helpers.batch(np.ones(shape=(3, 1), dtype=np.int32),max_sequence_length=4)
print('decoder inputs:\n' + str(din_))
# Random Initialization of Embeddings --> here we share them for Encoder and Decoder (could be different)
embeddings = tf.Variable(tf.random_uniform([VOCAB_SIZE, EMBEDDINGS_SIZE], -1.0, 1.0), dtype=tf.float32)
# ----- ENCODER -----
encoder_inputs = tf.placeholder(shape=(None, None),
dtype=tf.int32, name='encoder_inputs')# [encoder_max_time, batch_size]
encoder_inputs_embedded = tf.nn.embedding_lookup(embeddings, encoder_inputs)
encoder_cell = tf.contrib.rnn.LSTMCell(ENC_HIDDEN_UNITS)
# RNN
encoder_outputs, encoder_final_state = tf.nn.dynamic_rnn(
encoder_cell, encoder_inputs_embedded,
dtype=tf.float32, time_major=True)
# We are only interested in the Encoder Final State (Thought Vector!),
# to feed (condition) the decoder with it
del encoder_outputs
# ----- DECODER -----
decoder_targets = tf.placeholder(shape=(None, None),
dtype=tf.int32, name='decoder_targets') # [decoder_max_time, batch_size]
decoder_inputs = tf.placeholder(shape=(None, None),
dtype=tf.int32, name='decoder_inputs') # [decoder_max_time, batch_size]
decoder_inputs_embedded = tf.nn.embedding_lookup(embeddings, decoder_inputs)
decoder_cell = tf.contrib.rnn.LSTMCell(DEC_HIDDEN_UNITS)
# RNN
decoder_outputs, decoder_final_state = tf.nn.dynamic_rnn(
decoder_cell, decoder_inputs_embedded,
initial_state=encoder_final_state,
dtype=tf.float32, time_major=True, scope="plain_decoder")
# Projection Layer [max_time, batch_size, hidden_units] --> [max_time, batch_size, VOCAB_SIZE]
decoder_logits = tf.contrib.layers.linear(decoder_outputs, VOCAB_SIZE) # shape=[?, ?, VOCAB_SIZE]
decoder_prediction = tf.argmax(decoder_logits, axis=2) # Predict Output over vocabulary
# ----- LOSS & OPTIMIZATION -----
stepwise_cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
labels=tf.one_hot(decoder_targets, depth=VOCAB_SIZE, dtype=tf.float32),
logits=decoder_logits)
loss = tf.reduce_mean(stepwise_cross_entropy)
train_op = tf.train.AdamOptimizer().minimize(loss)
sess.run(tf.global_variables_initializer())
# Test Forward Pass [To check if Everything is wired correctly!]
pred_ = sess.run(decoder_prediction,
feed_dict={
encoder_inputs: batch_,
decoder_inputs: din_,
})
print('decoder predictions:\n' + str(pred_))
# ----- TRAINING -----
batch_size = 100
batches = helpers.random_sequences(length_from=3, length_to=8,
vocab_lower=2, vocab_upper=10,
batch_size=batch_size)
print('head of the batch:')
for seq in next(batches)[:10]:
print(seq)
loss_track = []
max_batches = 3001
batches_in_epoch = 1000
try:
for batch in range(max_batches):
fd = next_feed()
_, l = sess.run([train_op, loss], fd)
loss_track.append(l)
if batch == 0 or batch % batches_in_epoch == 0:
print('batch {}'.format(batch))
print(' minibatch loss: {}'.format(sess.run(loss, fd)))
predict_ = sess.run(decoder_prediction, fd)
for i, (inp, pred) in enumerate(zip(fd[encoder_inputs].T, predict_.T)):
print(' sample {}:'.format(i + 1))
print(' input > {}'.format(inp))
print(' predicted > {}'.format(pred))
if i >= 2:
break
print()
except KeyboardInterrupt:
print('training interrupted')
|
mit
| -3,960,815,499,132,609,000
| 38.943548
| 109
| 0.608924
| false
| 3.578757
| false
| false
| false
|
jaklinger/nesta_dataflow
|
collect_data/utils/uae/business_directory/dcc.py
|
1
|
3272
|
'''
dcc
----
'''
from bs4 import BeautifulSoup
import logging
import requests
import time
# Local imports
from utils.common.browser import SelfClosingBrowser
from utils.common.datapipeline import DataPipeline
def get_field_from_box(field, box):
''''''
for row in box.find("ul"):
# Accept rows containing spans
try:
spans = row.find_all("span")
except AttributeError:
continue
# Match the first span to the field name
if spans[0].text != field:
continue
# Return the field data
return spans[1].text
raise ValueError("Could not find field "+field)
def get_response_from_url(url, max_tries=3):
'''
Returns response if no ConnectionError exception
'''
n_tries = 0
while True:
n_tries += 1
# Try to get the URL
try:
r = requests.get(url)
return r
# Allow connection error, then retry
except requests.exceptions.ConnectionError as err:
if n_tries == max_tries:
raise err
logging.warning("Connection error to %s", (url))
time.sleep(10)
def run(config):
''''''
# Fire up a browser at the top url
top_url = config["parameters"]["src"]
cat_pages = {}
with SelfClosingBrowser(top_url=top_url) as b:
# Scrape pages until no page found
found_page = True
while found_page:
print("Found page")
# Get the category web pages
html_list = b.find_element_by_class_name("dcci_cat")
list_items = html_list.find_elements_by_tag_name("li")
for item in list_items:
link = item.find_element_by_tag_name("a")
cat_pages[link.text] = link.get_attribute('href')
# Click the next page and get the table
found_page = b.find_and_click_link("Next »")
# Process each category's URL to find companies
data = {}
for cat, url in cat_pages.items():
r = get_response_from_url(url)
# No bad statuses
if r.status_code != 200:
continue
# Loop over text boxes in the soup
soup = BeautifulSoup(r.text, "lxml")
boxes = soup.find_all("div", class_="result_box")
for box in boxes:
# Get the company name
title_box = box.find("div", class_="title")
title_link = title_box.find("a")
company_name = title_link.text
# Get the website
company_url = get_field_from_box("Website", box)
city = get_field_from_box("City", box)
# Check whether this URL has been processed before
if company_name not in data:
data[company_name] = dict(category=[cat], url=company_url,
city=city, company_name=company_name)
else:
data[company_name]["category"].append(cat)
logging.info("\tGot %s rows", len(data))
# Write data
logging.info("\tWriting to table")
with DataPipeline(config) as dp:
for _,row in data.items():
row["category"] = ";".join(row["category"]) # Pretty hacky
dp.insert(row)
|
mit
| 3,339,632,406,249,094,700
| 31.386139
| 79
| 0.558239
| false
| 4.053284
| false
| false
| false
|
ge0rgi/cinder
|
cinder/volume/drivers/netapp/dataontap/fc_cmode.py
|
1
|
5379
|
# Copyright (c) - 2014, Clinton Knight. All rights reserved.
# Copyright (c) - 2016 Mike Rooney. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for NetApp Data ONTAP (C-mode) FibreChannel storage systems.
"""
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers.netapp.dataontap import block_cmode
from cinder.zonemanager import utils as fczm_utils
@interface.volumedriver
class NetAppCmodeFibreChannelDriver(driver.BaseVD,
driver.ManageableVD):
"""NetApp C-mode FibreChannel volume driver."""
DRIVER_NAME = 'NetApp_FibreChannel_Cluster_direct'
# ThirdPartySystems wiki page
CI_WIKI_NAME = "NetApp_CI"
VERSION = block_cmode.NetAppBlockStorageCmodeLibrary.VERSION
def __init__(self, *args, **kwargs):
super(NetAppCmodeFibreChannelDriver, self).__init__(*args, **kwargs)
self.library = block_cmode.NetAppBlockStorageCmodeLibrary(
self.DRIVER_NAME, 'FC', **kwargs)
def do_setup(self, context):
self.library.do_setup(context)
def check_for_setup_error(self):
self.library.check_for_setup_error()
def create_volume(self, volume):
return self.library.create_volume(volume)
def create_volume_from_snapshot(self, volume, snapshot):
return self.library.create_volume_from_snapshot(volume, snapshot)
def create_cloned_volume(self, volume, src_vref):
return self.library.create_cloned_volume(volume, src_vref)
def delete_volume(self, volume):
self.library.delete_volume(volume)
def create_snapshot(self, snapshot):
self.library.create_snapshot(snapshot)
def delete_snapshot(self, snapshot):
self.library.delete_snapshot(snapshot)
def get_volume_stats(self, refresh=False):
return self.library.get_volume_stats(refresh,
self.get_filter_function(),
self.get_goodness_function())
def get_default_filter_function(self):
return self.library.get_default_filter_function()
def get_default_goodness_function(self):
return self.library.get_default_goodness_function()
def extend_volume(self, volume, new_size):
self.library.extend_volume(volume, new_size)
def ensure_export(self, context, volume):
return self.library.ensure_export(context, volume)
def create_export(self, context, volume, connector):
return self.library.create_export(context, volume)
def remove_export(self, context, volume):
self.library.remove_export(context, volume)
def manage_existing(self, volume, existing_ref):
return self.library.manage_existing(volume, existing_ref)
def manage_existing_get_size(self, volume, existing_ref):
return self.library.manage_existing_get_size(volume, existing_ref)
def unmanage(self, volume):
return self.library.unmanage(volume)
@fczm_utils.add_fc_zone
def initialize_connection(self, volume, connector):
return self.library.initialize_connection_fc(volume, connector)
@fczm_utils.remove_fc_zone
def terminate_connection(self, volume, connector, **kwargs):
return self.library.terminate_connection_fc(volume, connector,
**kwargs)
def get_pool(self, volume):
return self.library.get_pool(volume)
def create_consistencygroup(self, context, group):
return self.library.create_consistencygroup(group)
def delete_consistencygroup(self, context, group, volumes):
return self.library.delete_consistencygroup(group, volumes)
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
return self.library.update_consistencygroup(group, add_volumes=None,
remove_volumes=None)
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
return self.library.create_cgsnapshot(cgsnapshot, snapshots)
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
return self.library.delete_cgsnapshot(cgsnapshot, snapshots)
def create_consistencygroup_from_src(self, context, group, volumes,
cgsnapshot=None, snapshots=None,
source_cg=None, source_vols=None):
return self.library.create_consistencygroup_from_src(
group, volumes, cgsnapshot=cgsnapshot, snapshots=snapshots,
source_cg=source_cg, source_vols=source_vols)
def failover_host(self, context, volumes, secondary_id=None):
return self.library.failover_host(
context, volumes, secondary_id=secondary_id)
|
apache-2.0
| -4,857,598,255,072,615,000
| 38.844444
| 78
| 0.671314
| false
| 4.005212
| false
| false
| false
|
StackStorm/mistral
|
mistral/tests/unit/engine/test_task_publish.py
|
1
|
2807
|
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_config import cfg
from mistral.actions import std_actions
from mistral.db.v2 import api as db_api
from mistral.services import workbooks as wb_service
from mistral.tests.unit.engine import base
from mistral.workflow import states
# Use the set_default method to set value otherwise in certain test cases
# the change in value is not permanent.
cfg.CONF.set_default('auth_enable', False, group='pecan')
SIMPLE_WORKBOOK = """
---
version: '2.0'
name: wb1
workflows:
wf1:
type: direct
tasks:
t1:
action: std.echo output="Task 1"
publish:
v1: <% $.t1.get($foobar) %>
on-success:
- t2
t2:
action: std.echo output="Task 2"
on-success:
- t3
t3:
action: std.echo output="Task 3"
"""
class TaskPublishTest(base.EngineTestCase):
@mock.patch.object(
std_actions.EchoAction,
'run',
mock.MagicMock(
side_effect=[
'Task 1', # Mock task1 success.
'Task 2', # Mock task2 success.
'Task 3' # Mock task3 success.
]
)
)
def test_publish_failure(self):
wb_service.create_workbook_v2(SIMPLE_WORKBOOK)
# Run workflow and fail task.
wf_ex = self.engine.start_workflow('wb1.wf1')
self.await_workflow_error(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_execs = wf_ex.task_executions
self.assertEqual(states.ERROR, wf_ex.state)
self.assertEqual(1, len(task_execs))
task_1_ex = self._assert_single_item(task_execs, name='t1')
# Task 1 should have failed.
self.assertEqual(states.ERROR, task_1_ex.state)
self.assertIn('Can not evaluate YAQL expression', task_1_ex.state_info)
# Action execution of task 1 should have succeeded.
task_1_action_exs = db_api.get_action_executions(
task_execution_id=task_1_ex.id
)
self.assertEqual(1, len(task_1_action_exs))
self.assertEqual(states.SUCCESS, task_1_action_exs[0].state)
|
apache-2.0
| -4,910,604,444,682,694,000
| 28.861702
| 79
| 0.625223
| false
| 3.589514
| false
| false
| false
|
mlwithtf/mlwithtf
|
chapter_10/models.py
|
1
|
1545
|
import tensorflow as tf
def compute_loss(logits, labels):
labels = tf.squeeze(tf.cast(labels, tf.int32))
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels)
cross_entropy_loss= tf.reduce_mean(cross_entropy)
reg_loss = tf.reduce_mean(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
return cross_entropy_loss + reg_loss, cross_entropy_loss, reg_loss
def compute_accuracy(logits, labels):
labels = tf.squeeze(tf.cast(labels, tf.int32))
batch_predictions = tf.cast(tf.argmax(logits, 1), tf.int32)
predicted_correctly = tf.equal(batch_predictions, labels)
accuracy = tf.reduce_mean(tf.cast(predicted_correctly, tf.float32))
return accuracy
def get_learning_rate(global_step, initial_value, decay_steps, decay_rate):
learning_rate = tf.train.exponential_decay(initial_value, global_step, decay_steps, decay_rate, staircase=True)
return learning_rate
def train(total_loss, learning_rate, global_step):
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(total_loss, global_step)
return train_op
def average_gradients(gradients):
average_grads = []
for grad_and_vars in zip(*gradients):
grads = []
for g, _ in grad_and_vars:
grads.append(tf.expand_dims(g, 0))
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
|
apache-2.0
| -2,445,733,855,455,899,000
| 32.586957
| 115
| 0.68932
| false
| 3.322581
| false
| false
| false
|
devilry/devilry-django
|
devilry/devilry_admin/cradminextensions/listfilter/listfilter_relateduser.py
|
1
|
3399
|
from django.conf import settings
from django.db import models
from django.db.models.functions import Lower, Concat
from django.utils.translation import gettext_lazy, pgettext_lazy
from cradmin_legacy.viewhelpers import listfilter
from cradmin_legacy.viewhelpers.listfilter.basefilters.single import abstractselect
from devilry.devilry_admin.cradminextensions.listfilter import listfilter_tags
class OrderRelatedStudentsFilter(listfilter.django.single.select.AbstractOrderBy):
def get_ordering_options(self):
if settings.CRADMIN_LEGACY_USE_EMAIL_AUTH_BACKEND:
shortname_ascending_label = gettext_lazy('Email')
shortname_descending_label = gettext_lazy('Email descending')
else:
shortname_ascending_label = gettext_lazy('Username')
shortname_descending_label = gettext_lazy('Username descending')
# NOTE: We use Concat below to get sorting that works even when the user
# does not have a fullname, and we use Lower to sort ignoring case.
return [
('', {
'label': gettext_lazy('Name'),
'order_by': [Lower(Concat(
'user__fullname', 'user__shortname', output_field=models.CharField()))],
}),
('name_descending', {
'label': gettext_lazy('Name descending'),
'order_by': [Lower(Concat(
'user__fullname', 'user__shortname', output_field=models.CharField())).desc()],
}),
('lastname_ascending', {
'label': gettext_lazy('Last name'),
'order_by': [Lower('user__lastname')],
}),
('lastname_descending', {
'label': gettext_lazy('Last name descending'),
'order_by': [Lower('user__lastname').desc()],
}),
('shortname_ascending', {
'label': shortname_ascending_label,
'order_by': ['user__shortname'],
}),
('shortname_descending', {
'label': shortname_descending_label,
'order_by': ['-user__shortname'],
}),
]
def get_slug(self):
return 'orderby'
def get_label(self):
return pgettext_lazy('orderby', 'Sort')
class IsActiveFilter(listfilter.django.single.select.Boolean):
def get_slug(self):
return 'active'
def get_label(self):
return pgettext_lazy('listfilter relateduser', 'Is active?')
class Search(listfilter.django.single.textinput.Search):
def get_modelfields(self):
return [
'user__fullname',
'user__shortname',
'periodtag__tag',
]
def get_label_is_screenreader_only(self):
return True
def get_slug(self):
return 'search'
def get_label(self):
return gettext_lazy('Search')
def get_placeholder(self):
return gettext_lazy('Search listed objects ...')
class TagSelectFilter(listfilter_tags.AbstractTagSelectFilter):
def filter(self, queryobject):
cleaned_value = self.get_cleaned_value() or ''
if cleaned_value == self.get_notag_value():
queryobject = queryobject.filter(periodtag__isnull=True)
elif cleaned_value != '':
queryobject = queryobject.filter(periodtag__id=cleaned_value)
return queryobject
|
bsd-3-clause
| -3,571,475,830,365,769,700
| 35.159574
| 99
| 0.597823
| false
| 4.206683
| false
| false
| false
|
tonyqiu1019/BulletAPI
|
models/api/models.py
|
1
|
1264
|
from django.db import models
# the bullet object
class Bullet(models.Model):
# required fields
content = models.CharField(max_length=512)
ret_time = models.DateTimeField(blank=True, null=True)
post_time = models.DateTimeField(blank=True, null=True)
# optional fields
info = models.ForeignKey('Info', on_delete=models.SET_NULL,
blank=True, null=True)
color = models.CharField(max_length=6, blank=True, default="ffffff")
font_size = models.PositiveSmallIntegerField(blank=True, default=12)
num_repeat = models.PositiveSmallIntegerField(blank=True, default=1)
display_mode = models.CharField(max_length=1, blank=True, choices=(
('f', 'fixed'),
('s', 'scroll'),
), default='s')
def __unicode__(self):
ret = self.content[:10]+'...' if len(self.content) > 13 else self.content
return u'%s' % (ret,)
class Meta:
ordering = ['ret_time', 'post_time']
# the user info about a bullet
class Info(models.Model):
fingerprint = models.CharField(max_length=64, unique=True)
user_agent = models.CharField(max_length=1024, blank=True)
is_banned = models.BooleanField(blank=True, default=False)
def __unicode__(self):
return u'%s' % (self.fingerprint,)
|
mit
| 3,125,209,969,669,040,000
| 35.114286
| 81
| 0.662184
| false
| 3.611429
| false
| false
| false
|
siko/xyz-get
|
src/ext/ipnli.py
|
1
|
2052
|
#!/usr/bin/env python
# -- coding:utf-8 --
import os, re, sys, time, datetime
import urllib,urllib2,threading,requests
from bs4 import BeautifulSoup
downurls = []
threads=[]
f=open('log.txt', 'w+')
class downloader(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.info=[]
def run(self):
for i in self.info:
print '下载 %s\r\n' % i['url']
try:
# urllib.urlretrieve(i['url'], i['name'].decode('utf8'))
urllib.urlretrieve(i['url'], i['name'])
except Exception as e:
f.write('url:' + i['url'] + '\r\n' + str(e) + '\r\n')
def createurls(channel,begin,end):
# print channel
page = requests.get(channel)
soup = BeautifulSoup(page.text)
articles = soup.findAll('article')[begin:end]
for art in articles:
filename = art.find('h1').find('a').contents[2].replace(' ','').replace('\n','')
audiourl = channel+art.find('a',class_='button fa-download')['href']
downurls.append([filename,audiourl])
def downfiles():
i=0
for g in downurls:
name=g[0] + ".mp3"
path=g[1]
print 'name=',name,'path=',path
if i%6==0:
t=downloader()
threads.append(t)
t.info.append({'url':path, 'name':name})
i=i+1
if __name__ == '__main__':
channel = int(input('将从「IT 公论」(http://ipn.li/)下载,请选择节目- 1,IT公论 2,内核恐慌 3,太医来了 4,味之道 :'))
channels = {
1: 'itgonglun/',
2: 'kernelpanic/',
3: 'taiyilaile/',
4: 'weizhidao/',
}
channelurl = 'http://ipn.li/'+channels.get(channel,'itgonglun/')
begin = int(input('请输入开始的期数:'))
end = int(input('请输入结束的期数:'))
createurls(channelurl,begin,end)
downfiles()
print 'threads length is : %d' % len(threads)
for t in threads:
t.start()
time.sleep(1)
f.flush()
for t in threads:
t.join()
|
mit
| -2,164,372,479,213,248,500
| 25.808219
| 91
| 0.542434
| false
| 2.950226
| false
| false
| false
|
z23han/Wrangling-MongoDB
|
Lesson_5_Analyzing_Data/14-Using_push/push.py
|
1
|
2297
|
#!/usr/bin/env python
"""
$push is similar to $addToSet. The difference is that rather than accumulating only unique values
it aggregates all values into an array.
Using an aggregation query, count the number of tweets for each user. In the same $group stage,
use $push to accumulate all the tweet texts for each user. Limit your output to the 5 users
with the most tweets.
Your result documents should include only the fields:
"_id" (screen name of user),
"count" (number of tweets found for the user),
"tweet_texts" (a list of the tweet texts found for the user).
Please modify only the 'make_pipeline' function so that it creates and returns an aggregation
pipeline that can be passed to the MongoDB aggregate function. As in our examples in this lesson,
the aggregation pipeline should be a list of one or more dictionary objects.
Please review the lesson examples if you are unsure of the syntax.
Your code will be run against a MongoDB instance that we have provided. If you want to run this code
locally on your machine, you have to install MongoDB, download and insert the dataset.
For instructions related to MongoDB setup and datasets please see Course Materials.
Please note that the dataset you are using here is a smaller version of the twitter dataset used in
examples in this lesson. If you attempt some of the same queries that we looked at in the lesson
examples, your results will be different.
"""
def get_db(db_name):
from pymongo import MongoClient
client = MongoClient('localhost:27017')
db = client[db_name]
return db
def make_pipeline():
# complete the aggregation pipeline
pipeline = [
{"$group": {"_id": "$user.screen_name",
"count": {"$sum": 1},
"tweet_texts": {
"$push": "$text"
}}},
{"$sort": {"count": -1}},
{"$limit": 5}
]
return pipeline
def aggregate(db, pipeline):
result = db.tweets.aggregate(pipeline)
return result
if __name__ == '__main__':
db = get_db('twitter')
pipeline = make_pipeline()
result = aggregate(db, pipeline)
#assert len(result["result"]) == 5
#assert result["result"][0]["count"] > result["result"][4]["count"]
import pprint
pprint.pprint(result)
|
agpl-3.0
| -3,282,531,151,248,877,000
| 38.603448
| 101
| 0.684806
| false
| 4.079929
| false
| false
| false
|
aaiijmrtt/WORDVECTORS
|
shiftreducer.py
|
1
|
4286
|
import neuralnet, numpy
SHIFT = -1
REDUCELEFT = 0
REDUCERIGHT = 1
def transition(stack, queue, arcs, dependencies):
if len(stack) < 2:
return (SHIFT, SHIFT, SHIFT)
for dependency in dependencies:
if stack[-1] == dependency[0] and stack[-2] == dependency[1]:
return dependency
for dependency in dependencies:
if stack[-2] == dependency[0] and stack[-1] == dependency[1]:
flag = True
for dependence in dependencies:
if dependence[0] == stack[-1] and dependence not in arcs:
flag = False
if flag:
return dependency
return (SHIFT, SHIFT, SHIFT)
def trainoracle(inputs, outputs, oracle, labels):
vectorizednonlinearity = neuralnet.VECTORIZEDNONLINEARITY
embeddingsize = inputs[0].shape[0]
stack = [inputs[0]]
queue = inputs[1: ]
stackindices = [0]
queueindices = range(1, len(inputs))
arcs = list()
ins = list()
outs = list()
while len(stack) > 1 or len(queue) > 0:
besttransition = transition(stackindices, queueindices, arcs, outputs)
if len(stack) > 1 and len(queue) > 0:
ins.append(numpy.concatenate([stack[-2], stack[-1], queue[0]]))
elif len(stack) > 1:
ins.append(numpy.concatenate([stack[-2], stack[-1], numpy.zeros((embeddingsize, 1), dtype = float)]))
else:
ins.append(numpy.concatenate([numpy.zeros((embeddingsize, 1), dtype = float), stack[-1], queue[0]]))
outs.append(numpy.zeros((2 * (labels + 1) + 1, 1), dtype = float))
outs[-1][besttransition[2]][0] = 1.0
if besttransition == (SHIFT, SHIFT, SHIFT):
stack.append(queue.pop(0))
stackindices.append(queueindices.pop(0))
else:
arcs.append(besttransition)
# stack[stackindices.index(besttransition[0])] = neuralnet.forwardpass(vectorin, oracle['weights'], oracle['biases'], vectorizednonlinearity)[1]
del stack[stackindices.index(besttransition[1])]
del stackindices[stackindices.index(besttransition[1])]
for i in range(len(inputs)):
oracle['weights'], oracle['biases'] = neuralnet.train(ins, outs, oracle['weights'], oracle['biases'], alpha = [0.05, 0.05], gamma = [0.5, 0.5], history = i, hiddeninitializer = [numpy.zeros((embeddingsize, 1), dtype = float), numpy.zeros((2 * labels + 3, 1), dtype = float)])
return oracle
def shiftreduce(inputs, oracle):
vectorizednonlinearity = neuralnet.VECTORIZEDNONLINEARITY
embeddingsize = inputs[0].shape[0]
classes = oracle['biases'][1].shape[0]
stack = [inputs[0]]
queue = inputs[1: ]
stackindices = [0]
queueindices = range(1, len(inputs))
arcs = list()
hidden = [numpy.zeros((embeddingsize, 1), dtype = float), numpy.zeros((classes, 1), dtype = float)]
while len(stack) > 1 or len(queue) > 0:
bestscore = float("-inf")
besttransition = None
bestcombination = None
bestlabel = None
if len(stack) > 1:
if len(queue) > 0:
vectorin = numpy.concatenate([stack[-2], stack[-1], queue[0]])
else:
vectorin = numpy.concatenate([stack[-2], stack[-1], numpy.zeros((embeddingsize, 1), dtype = float)])
activations = neuralnet.forwardpass(vectorin, oracle['weights'], oracle['biases'], vectorizednonlinearity, hidden)
if numpy.max(activations[-1][0: -1]) > bestscore:
bestscore = numpy.max(activations[2][0: -1])
bestcombination = activations[1]
bestlabel = numpy.argmax(activations[2][0: -1])
besttransition = REDUCELEFT if bestlabel < classes // 2 else REDUCERIGHT
besthidden = activations[1: ]
if len(queue) > 0:
if len(stack) > 1:
vectorin = numpy.concatenate([stack[-2], stack[-1], queue[0]])
else:
vectorin = numpy.concatenate([numpy.zeros((embeddingsize, 1), dtype = float), stack[-1], queue[0]])
activations = neuralnet.forwardpass(vectorin, oracle['weights'], oracle['biases'], vectorizednonlinearity, hidden)
if activations[-1][-1][0] > bestscore:
bestscore = activations[2][-1][0]
bestcombination = None
bestlabel = SHIFT
besttransition = SHIFT
besthidden = activations[1: ]
hidden = besthidden
if besttransition == SHIFT:
stack.append(queue.pop(0))
stackindices.append(queueindices.pop(0))
else:
arcs.append((stackindices[-1 - besttransition] + 1, stackindices[-2 + besttransition] + 1, bestlabel))
del stack[-2 + besttransition]
del stackindices[-2 + besttransition]
# stack[-1] = bestcombination
arcs.append((0, stackindices[0] + 1, REDUCERIGHT))
return arcs
|
mit
| 8,538,488,397,566,068,000
| 40.211538
| 277
| 0.685254
| false
| 3.110305
| false
| false
| false
|
jittat/ku-eng-direct-admission
|
scripts/import_final_results.py
|
1
|
1966
|
import codecs
import sys
if len(sys.argv)!=2:
print "Usage: import_final_results [results.csv]"
quit()
file_name = sys.argv[1]
from django.conf import settings
from django_bootstrap import bootstrap
bootstrap(__file__)
from result.models import AdmissionResult
from application.models import Applicant, SubmissionInfo, PersonalInfo, Major
applicants = []
def read_results():
f = codecs.open(file_name, encoding="utf-8", mode="r")
lines = f.readlines()
for l in lines[1:]:
items = l.strip().split(',')
app = {'national_id': items[0],
'major': items[2] }
applicants.append(app)
def standardize_major_number(major):
return ('0' * (3 - len(major))) + major
def import_results():
print 'Importing results...'
majors = Major.get_all_majors()
major_dict = dict([(m.number, m) for m in majors])
not_found_list = []
app_order = 1
for a in applicants:
personal_infos = (PersonalInfo.objects
.filter(national_id=a['national_id'])
.select_related(depth=1))
if len(personal_infos)==0:
print "NOT-FOUND:", a['national_id']
not_found_list.append(a['national_id'])
continue
for pinfo in personal_infos:
applicant = pinfo.applicant
try:
aresult = applicant.admission_result
except:
aresult = AdmissionResult.new_for_applicant(applicant)
major_number = standardize_major_number(a['major'])
major = major_dict[major_number]
aresult.is_final_admitted = True
aresult.final_admitted_major = major
aresult.save()
print a['national_id']
print '-------NOT-FOUND-------'
for nid in not_found_list:
print nid
def main():
read_results()
import_results()
if __name__ == '__main__':
main()
|
agpl-3.0
| 2,766,383,835,464,029,000
| 24.532468
| 77
| 0.573245
| false
| 3.73055
| false
| false
| false
|
glaslos/waechter
|
samples/hello_job_lock.py
|
1
|
1122
|
# Waechter - Job Scheduling Helper
# Copyright (C) 2016 Lukas Rist
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from time import sleep
import waechter.scheduler
class HelloJobLock(waechter.scheduler.BaseJob):
def __init__(self, interval=None):
super(HelloJobLock, self).__init__(interval)
self.interval = interval if interval else 1
@classmethod
def work(cls):
print('hello work lock')
sleep(1.5)
if __name__ == '__main__':
main_worker = waechter.scheduler.JobScheduler().run()
|
gpl-3.0
| -2,088,577,196,151,638
| 32
| 71
| 0.720143
| false
| 3.895833
| false
| false
| false
|
lesion/indicator-stickynotes
|
stickynotes/backend.py
|
1
|
8331
|
# Copyright © 2012-2015 Umang Varma <umang.me@gmail.com>
#
# This file is part of indicator-stickynotes.
#
# indicator-stickynotes is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# indicator-stickynotes is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# indicator-stickynotes. If not, see <http://www.gnu.org/licenses/>.
from datetime import datetime
import uuid
import json
from os.path import expanduser
from stickynotes.info import FALLBACK_PROPERTIES
class Note:
def __init__(self, content=None, gui_class=None, noteset=None,
category=None):
self.gui_class = gui_class
self.noteset = noteset
content = content or {}
self.uuid = content.get('uuid')
self.body = content.get('body','')
self.title = content.get('title','')
self.properties = content.get("properties", {})
self.category = category or content.get("cat", "")
if not self.category in self.noteset.categories:
self.category = ""
last_modified = content.get('last_modified')
if last_modified:
self.last_modified = datetime.strptime(last_modified,
"%Y-%m-%dT%H:%M:%S")
else:
self.last_modified = datetime.now()
# Don't create GUI until show is called
self.gui = None
def extract(self):
if not self.uuid:
self.uuid = str(uuid.uuid4())
if self.gui != None:
self.gui.update_note()
self.properties = self.gui.properties()
return {"uuid":self.uuid, "body":self.body,
"last_modified":self.last_modified.strftime(
"%Y-%m-%dT%H:%M:%S"), "properties":self.properties,
"cat": self.category, "title": self.title}
def update(self,body=None,title=None):
if not body == None:
self.body = body
self.last_modified = datetime.now()
if not title == None:
self.title = title
self.last_modified = datetime.now()
def delete(self):
self.noteset.notes.remove(self)
self.noteset.save()
del self
def show(self, *args, **kwargs):
# If GUI has not been created, create it now
if self.gui == None:
self.gui = self.gui_class(note=self)
else:
self.gui.show(*args, **kwargs)
def hide(self):
if self.gui != None:
self.gui.hide()
def set_locked_state(self, locked):
# if gui hasn't been initialized, just change the property
if self.gui == None:
self.properties["locked"] = locked
else:
self.gui.set_locked_state(locked)
def cat_prop(self, prop):
"""Gets a property of the note's category"""
return self.noteset.get_category_property(self.category, prop)
class NoteSet:
def __init__(self, gui_class, data_file, indicator):
self.notes = []
self.properties = {}
self.categories = {}
self.gui_class = gui_class
self.data_file = data_file
self.indicator = indicator
def _loads_updater(self, dnoteset):
"""Parses old versions of the Notes structure and updates them"""
return dnoteset
def loads(self, snoteset):
"""Loads notes into their respective objects"""
notes = self._loads_updater(json.loads(snoteset))
self.properties = notes.get("properties", {})
self.categories = notes.get("categories", {})
self.notes = [Note(note, gui_class=self.gui_class, noteset=self)
for note in notes.get("notes",[])]
def dumps(self):
return json.dumps({"notes":[x.extract() for x in self.notes],
"properties": self.properties, "categories": self.categories})
def save(self, path=''):
output = self.dumps()
with open(path or expanduser(self.data_file),
mode='w', encoding='utf-8') as fsock:
fsock.write(output)
def open(self, path=''):
with open(path or expanduser(self.data_file),
encoding='utf-8') as fsock:
self.loads(fsock.read())
def load_fresh(self):
"""Load empty data"""
self.loads('{}')
self.new()
def merge(self, data):
"""Update notes based on new data"""
jdata = self._loads_updater(json.loads(data))
self.hideall()
# update categories
if "categories" in jdata:
self.categories.update(jdata["categories"])
# make a dictionary of notes so we can modify existing notes
dnotes = {n.uuid : n for n in self.notes}
for newnote in jdata.get("notes", []):
if "uuid" in newnote and newnote["uuid"] in dnotes:
# Update notes that are already in the noteset
orignote = dnotes[newnote["uuid"]]
# make sure it's an 'Update'
if datetime.strptime(newnote["last_modified"], \
"%Y-%m-%dT%H:%M:%S") > orignote.last_modified:
if "body" in newnote:
orignote.body = newnote["body"]
if "properties" in newnote:
orignote.properties = newnote["properties"]
if "cat" in newnote:
orignote.category = newnote["cat"]
else:
# otherwise create a new note
if "uuid" in newnote:
uuid = newnote["uuid"]
else:
uuid = str(uuid.uuid4())
dnotes[uuid] = Note(newnote, gui_class=self.gui_class,
noteset=self)
# copy notes over from dictionary to list
self.notes = list(dnotes.values())
self.showall(reload_from_backend=True)
def find_category(self, name=""):
# return cid of the first matched category
if name:
try: cid = (cat for cat in self.categories if \
self.categories[cat]["name"] == name).__next__()
# not found
except Exception: cid = None
else:
cid = None
return cid
def new(self, notebody='', category=''):
"""Creates a new note and adds it to the note set"""
cid = self.find_category(name=category)
if category and not cid:
cid = str(uuid.uuid4())
self.categories[cid]={'name':category}
note = Note(gui_class=self.gui_class, noteset=self,
category=cid)
note.body=notebody
note.set_locked_state(not not notebody)
self.notes.append(note)
self.gui_class and note.show() # show if created with gui
return note
def showall(self, *args, **kwargs):
for note in self.notes:
note.show(*args, **kwargs)
self.properties["all_visible"] = True
def hideall(self, *args):
self.save()
for note in self.notes:
note.hide(*args)
self.properties["all_visible"] = False
def get_category_property(self, cat, prop):
"""Get a property of a category or the default"""
if ((not cat) or (not cat in self.categories)) and \
self.properties.get("default_cat", None):
cat = self.properties["default_cat"]
cat_data = self.categories.get(cat, {})
if prop in cat_data:
return cat_data[prop]
# Otherwise, use fallback categories
if prop in FALLBACK_PROPERTIES:
return FALLBACK_PROPERTIES[prop]
else:
raise ValueError("Unknown property")
class dGUI:
"""Dummy GUI"""
def __init__(self, *args, **kwargs):
pass
def show(self):
pass
def hide(self):
pass
def update_note(self):
pass
def properties(self):
return None
|
gpl-3.0
| -2,976,877,124,460,908,000
| 34.751073
| 78
| 0.569868
| false
| 4.071359
| false
| false
| false
|
vietdh85/vh-utility
|
script/hyip_stop.py
|
1
|
1859
|
import sys
import os.path
import urllib2
import re
from pyquery import PyQuery as pq
import common
def getId(url):
arr = url.split("/")
id = arr[len(arr) - 2]
return id
def getSiteUrl(urlRequest, monitor, rcbUrl):
result = ""
print("REQUEST: {0}".format(urlRequest))
try:
req = urllib2.urlopen(urlRequest, timeout=30)
url = req.geturl()
arr = url.split("/?")
arr1 = arr[0].split("//")
result = arr1[1].replace("www.", "")
result = result.split("/")[0]
except :
print("========== ERROR ===========")
#common.insertUnknowSite(rcbUrl, monitor)
return result
def getRcb(monitor):
print("hyip_stop.getRcb()")
rcb_url = "http://{0}/new".format(monitor)
d = pq(url=rcb_url)
list = d("a.joinnw")
siteList = []
for item in list:
obj = {}
obj['id'] = getId(item.get("href"))
if common.getSiteMonitorByRefSiteId(monitor, obj['id']) == None:
obj['siteRCBUrl'] = "http://{0}/details/aj/rcb/lid/{1}/".format(monitor, obj['id'])
obj['url'] = getSiteUrl(item.get("href"), monitor, obj['siteRCBUrl'])
obj['siteId'] = ""
if obj['url'] != '':
siteId = common.insertSite(obj)
obj['siteId'] = siteId
siteList.append(obj)
print("{0} - {1} - {2}".format(obj['id'], obj['url'], obj['siteId']))
for item in siteList:
common.insertSiteMonitor(item, monitor)
def checkPaid(siteUrl):
d = pq(url=siteUrl)
tables = d("#content2 table.listbody tr td:nth-child(6) center")
result = False
#print(tables)
for item in tables:
if re.search('paid', item.text_content(), re.IGNORECASE):
result = True
return result
def checkRcb(monitor):
siteMonitors = common.getSiteMonitor(monitor)
for item in siteMonitors:
print(item)
if item[2] == 0:
if checkPaid(item[1]):
common.setPaid(item[0])
def run():
MONITOR = "hyipstop.com"
getRcb(MONITOR)
#checkRcb(MONITOR)
|
gpl-3.0
| -9,091,527,717,578,031,000
| 21.39759
| 86
| 0.629371
| false
| 2.713869
| false
| false
| false
|
mscoutermarsh/exercism_coveralls
|
assignments/python/space-age/space_age_test.py
|
1
|
1640
|
try:
from space_age import SpaceAge
except ImportError:
raise SystemExit('Could not find space_age.py. Does it exist?')
import unittest
class SpaceAgeTest(unittest.TestCase):
def test_age_in_seconds(self):
age = SpaceAge(1e6)
self.assertEqual(1e6, age.seconds)
def test_age_in_earth_years(self):
age = SpaceAge(1e9)
self.assertEqual(31.69, age.on_earth())
def test_age_in_mercury_years(self):
age = SpaceAge(2134835688)
self.assertEqual(67.65, age.on_earth())
self.assertEqual(280.88, age.on_mercury())
def test_age_in_venus_years(self):
age = SpaceAge(189839836)
self.assertEqual(6.02, age.on_earth())
self.assertEqual(9.78, age.on_venus())
def test_age_on_mars(self):
age = SpaceAge(2329871239)
self.assertEqual(73.83, age.on_earth())
self.assertEqual(39.25, age.on_mars())
def test_age_on_jupiter(self):
age = SpaceAge(901876382)
self.assertEqual(28.58, age.on_earth())
self.assertEqual(2.41, age.on_jupiter())
def test_age_on_saturn(self):
age = SpaceAge(3e9)
self.assertEqual(95.06, age.on_earth())
self.assertEqual(3.23, age.on_saturn())
def test_age_on_uranus(self):
age = SpaceAge(3210123456)
self.assertEqual(101.72, age.on_earth())
self.assertEqual(1.21, age.on_uranus())
def test_age_on_neptune(self):
age = SpaceAge(8210123456)
self.assertEqual(260.16, age.on_earth())
self.assertEqual(1.58, age.on_neptune())
if __name__ == '__main__':
unittest.main()
|
agpl-3.0
| -6,469,365,441,323,217,000
| 29.943396
| 67
| 0.615244
| false
| 2.992701
| true
| false
| false
|
hulifox008/bitbake
|
lib/bb/msg.py
|
1
|
5659
|
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
"""
BitBake 'msg' implementation
Message handling infrastructure for bitbake
"""
# Copyright (C) 2006 Richard Purdie
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import logging
import collections
from itertools import groupby
import warnings
import bb
import bb.event
class BBLogFormatter(logging.Formatter):
"""Formatter which ensures that our 'plain' messages (logging.INFO + 1) are used as is"""
DEBUG3 = logging.DEBUG - 2
DEBUG2 = logging.DEBUG - 1
DEBUG = logging.DEBUG
VERBOSE = logging.INFO - 1
NOTE = logging.INFO
PLAIN = logging.INFO + 1
ERROR = logging.ERROR
WARNING = logging.WARNING
CRITICAL = logging.CRITICAL
levelnames = {
DEBUG3 : 'DEBUG',
DEBUG2 : 'DEBUG',
DEBUG : 'DEBUG',
VERBOSE: 'NOTE',
NOTE : 'NOTE',
PLAIN : '',
WARNING : 'WARNING',
ERROR : 'ERROR',
CRITICAL: 'ERROR',
}
def getLevelName(self, levelno):
try:
return self.levelnames[levelno]
except KeyError:
self.levelnames[levelno] = value = 'Level %d' % levelno
return value
def format(self, record):
record.levelname = self.getLevelName(record.levelno)
if record.levelno == self.PLAIN:
return record.getMessage()
else:
return logging.Formatter.format(self, record)
class Loggers(dict):
def __getitem__(self, key):
if key in self:
return dict.__getitem__(self, key)
else:
log = logging.getLogger("BitBake.%s" % domain._fields[key])
dict.__setitem__(self, key, log)
return log
class DebugLevel(dict):
def __getitem__(self, key):
if key == "default":
key = domain.Default
return get_debug_level(key)
def _NamedTuple(name, fields):
Tuple = collections.namedtuple(name, " ".join(fields))
return Tuple(*range(len(fields)))
domain = _NamedTuple("Domain", (
"Default",
"Build",
"Cache",
"Collection",
"Data",
"Depends",
"Fetcher",
"Parsing",
"PersistData",
"Provider",
"RunQueue",
"TaskData",
"Util"))
logger = logging.getLogger("BitBake")
loggers = Loggers()
debug_level = DebugLevel()
# Message control functions
#
def set_debug_level(level):
for log in loggers.itervalues():
log.setLevel(logging.NOTSET)
if level:
logger.setLevel(logging.DEBUG - level + 1)
else:
logger.setLevel(logging.INFO)
def get_debug_level(msgdomain = domain.Default):
if not msgdomain:
level = logger.getEffectiveLevel()
else:
level = loggers[msgdomain].getEffectiveLevel()
return max(0, logging.DEBUG - level + 1)
def set_verbose(level):
if level:
logger.setLevel(BBLogFormatter.VERBOSE)
else:
logger.setLevel(BBLogFormatter.INFO)
def set_debug_domains(domainargs):
for (domainarg, iterator) in groupby(domainargs):
for index, msgdomain in enumerate(domain._fields):
if msgdomain == domainarg:
level = len(tuple(iterator))
if level:
loggers[index].setLevel(logging.DEBUG - level + 1)
break
else:
warn(None, "Logging domain %s is not valid, ignoring" % domainarg)
#
# Message handling functions
#
def debug(level, msgdomain, msg):
warnings.warn("bb.msg.debug is deprecated in favor of the python 'logging' module",
DeprecationWarning, stacklevel=2)
level = logging.DEBUG - (level - 1)
if not msgdomain:
logger.debug(level, msg)
else:
loggers[msgdomain].debug(level, msg)
def plain(msg):
warnings.warn("bb.msg.plain is deprecated in favor of the python 'logging' module",
DeprecationWarning, stacklevel=2)
logger.plain(msg)
def note(level, msgdomain, msg):
warnings.warn("bb.msg.note is deprecated in favor of the python 'logging' module",
DeprecationWarning, stacklevel=2)
if level > 1:
if msgdomain:
logger.verbose(msg)
else:
loggers[msgdomain].verbose(msg)
else:
if msgdomain:
logger.info(msg)
else:
loggers[msgdomain].info(msg)
def warn(msgdomain, msg):
warnings.warn("bb.msg.warn is deprecated in favor of the python 'logging' module",
DeprecationWarning, stacklevel=2)
if not msgdomain:
logger.warn(msg)
else:
loggers[msgdomain].warn(msg)
def error(msgdomain, msg):
warnings.warn("bb.msg.error is deprecated in favor of the python 'logging' module",
DeprecationWarning, stacklevel=2)
if not msgdomain:
logger.error(msg)
else:
loggers[msgdomain].error(msg)
def fatal(msgdomain, msg):
if not msgdomain:
logger.critical(msg)
else:
loggers[msgdomain].critical(msg)
sys.exit(1)
|
gpl-2.0
| -3,374,870,527,087,330,000
| 27.580808
| 93
| 0.626436
| false
| 3.91085
| false
| false
| false
|
retux/google-finance-storage
|
quotes-exporter.py
|
1
|
1531
|
#!/usr/bin/env python
'''
quotes-exporter: exposes quotes as prometheus exporter api
Usage: quotes-exporter.py <port>
'''
import sys
from prometheus_client import start_http_server, Metric, REGISTRY
from stockwatch import *
def cat_to_string (Symbols):
strSymbols = ' '
o = 0
for i in Symbols:
if o == 0:
strSymbols = i
else:
strSymbols = strSymbols + ',' + i
o += 1
return strSymbols
class QuoteCollector(object):
def __init__(self):
self._endpoint = ''
def collect(self):
Symbols = [ 'GOOG', 'CSCO', 'BABA', 'APPL', 'IBM', 'GLOB' ]
#Symbols = [ 'GOOG' ]
strSymbols = cat_to_string(Symbols)
JSp = GoogleFinanceAPI()
if JSp.get(strSymbols):
#JSp.Quotes2Stdout() # // Show a little data, just for testing
JSp.JsonQot2Obj()
metric = Metric('stock_quotes', 'stock quotes last price', 'gauge')
for quote in JSp.QuotesList:
# Convert quotes to metric
metric.add_sample('stock_quotes', value=float(quote.Last), labels={'symbol': quote.Symbol})
yield metric
def main():
"""
Symbols list contain a list of pairs which describes stock symbols as used by Google API.
Each element should be 'EXCHANGE:SYMBOL' examples:
[ 'NASDAQ:GOOG', 'NASDAQ:CSCO', 'NYSE:IBM', 'BCBA:YPFD' ]
"""
start_http_server(int(sys.argv[1]))
REGISTRY.register(QuoteCollector())
while True: time.sleep(1)
if __name__ == "__main__":
main()
|
gpl-2.0
| 4,092,831,948,622,307,300
| 25.396552
| 103
| 0.601568
| false
| 3.387168
| false
| false
| false
|
haandol/review_crawler
|
crawler.py
|
1
|
1108
|
# coding: utf-8
import time
import json
import urllib
import urllib2
import logging
from bs4 import BeautifulSoup as Soup
logging.basicConfig(level=logging.DEBUG)
url = 'https://play.google.com/store/getreviews?authuser=0'
headers = {
'content-type': 'application/x-www-form-urlencoded;charset=UTF-8',
'user-agent': 'Mozilla/5.0'
}
payload = {
'id': 'com.google.android.apps.maps',
'reviewType': 0,
'pageNum': 0,
'reviewSortOrder': 4,
'xhr': 1,
'hl': 'ko'
}
def parse():
values = urllib.urlencode(payload)
req = urllib2.Request(url, values, headers)
response = urllib2.urlopen(req)
data = json.loads(response.read()[5:])
soup = Soup(data[0][2])
for review in soup.select('.single-review'):
body = review.select('.review-body')[0].text
rating = int(review.select('.current-rating')[0]['style'].split(':')[1].strip()[:-2])/20
if 1 == rating:
logging.warning(body)
while True:
logging.info('start parsing')
parse()
logging.info('parsing ends')
logging.info('sleep in 60s')
time.sleep(60)
|
mit
| 6,187,855,021,797,479,000
| 21.612245
| 96
| 0.633574
| false
| 3.297619
| false
| false
| false
|
tforrest/soda-automation
|
app/runserver.py
|
1
|
1857
|
from flask_script import Manager
from flask_restful import Api
from models.user import User
from redis_ops.init_redis import RedisPopulater
from config import app
from config import db
from api import api
import logging
import os
import sys
manager = Manager(app)
def setup_api(app):
"""
Config resources with flask app
"""
service = Api(app)
service.add_resource(api.MailChimpListCheck,'/api/lists/',endpoint='check_mailchimp')
service.add_resource(api.MailChimpList,'/api/lists/<list_id>/<asu_id>',endpoint='member_list')
service.add_resource(api.GenerateAuthToken,'/api/gen_token/',endpoint='token')
return app
serviced_app = setup_api(app)
def setup_redis():
try:
RedisPopulater().init_redis_dbs()
except Exception as e:
logging.fatal(e)
logging.fatal("Failure to init redis")
sys.exit(1)
# Deploy for development
def setup_dev():
# setup database for admin
db.create_all()
try:
admin_user_name = os.environ['DEV_ADMIN_USER_NAME']
admin_password = os.environ['DEV_ADMIN_PASSWORD']
except KeyError as e:
logging.warning(e)
logging.fatal("Error cannot setup dev environment")
sys.exit(2)
admin = User(admin_user_name,admin_password)
try:
db.session.add(admin)
db.session.commit()
except Exception as e:
logging.fatal(e)
logging.fatal("Error cannot setup dev environment")
sys.exit(2)
# init redis and populate with mailchimp
setup_redis()
@manager.command
def run_dev():
setup_dev()
serviced_app.run(debug=True)
# Deploy for intergation tests
@manager.command
def run_test():
# To-Do
pass
# Deploy for production
@manager.command
def run_production():
# TO-DO
pass
if __name__ == '__main__':
manager.run()
|
mit
| -2,609,925,388,766,893,600
| 20.604651
| 98
| 0.658051
| false
| 3.550669
| false
| false
| false
|
thomas-schmid-ubnt/avocado
|
selftests/functional/test_output.py
|
1
|
20899
|
import json
import tempfile
import os
import re
import shutil
import unittest
from xml.dom import minidom
import pkg_resources
from avocado.core import exit_codes
from avocado.core.output import TermSupport
from avocado.utils import process
from avocado.utils import script
from avocado.utils import path as utils_path
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
basedir = os.path.abspath(basedir)
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")
PERL_TAP_PARSER_SNIPPET = """#!/bin/env perl
use TAP::Parser;
my $parser = TAP::Parser->new( { exec => ['%s', 'run', 'passtest.py', 'errortest.py', 'warntest.py', '--tap', '-', '--sysinfo', 'off', '--job-results-dir', '%%s'] } );
while ( my $result = $parser->next ) {
$result->is_unknown && die "Unknown line \\"" . $result->as_string . "\\" in the TAP output!\n";
}
$parser->parse_errors == 0 || die "Parser errors!\n";
$parser->is_good_plan || die "Plan is not a good plan!\n";
$parser->plan eq '1..3' || die "Plan does not match what was expected!\n";
""" % AVOCADO
def image_output_uncapable():
try:
import PIL
return False
except ImportError:
return True
def html_uncapable():
try:
pkg_resources.require('avocado_result_html')
return False
except pkg_resources.DistributionNotFound:
return True
def perl_tap_parser_uncapable():
return os.system("perl -e 'use TAP::Parser;'") != 0
def missing_binary(binary):
try:
utils_path.find_command(binary)
return False
except utils_path.CmdNotFoundError:
return True
class OutputTest(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
@unittest.skipIf(missing_binary('cc'),
"C compiler is required by the underlying doublefree.py test")
def test_output_doublefree(self):
os.chdir(basedir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'doublefree.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_ALL_OK
output = result.stdout + result.stderr
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
bad_string = 'double free or corruption'
self.assertNotIn(bad_string, output,
"Libc double free can be seen in avocado "
"doublefree output:\n%s" % output)
def tearDown(self):
shutil.rmtree(self.tmpdir)
class OutputPluginTest(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
def check_output_files(self, debug_log):
base_dir = os.path.dirname(debug_log)
json_output = os.path.join(base_dir, 'results.json')
self.assertTrue(os.path.isfile(json_output))
with open(json_output, 'r') as fp:
json.load(fp)
xunit_output = os.path.join(base_dir, 'results.xml')
self.assertTrue(os.path.isfile(json_output))
try:
minidom.parse(xunit_output)
except Exception as details:
raise AssertionError("Unable to parse xunit output: %s\n\n%s"
% (details, open(xunit_output).read()))
tap_output = os.path.join(base_dir, "results.tap")
self.assertTrue(os.path.isfile(tap_output))
tap = open(tap_output).read()
self.assertIn("..", tap)
self.assertIn("\n# debug.log of ", tap)
def test_output_incompatible_setup(self):
os.chdir(basedir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--xunit - --json - passtest.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_FAIL
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
error_regex = re.compile(r'avocado run: error: argument ((--json)|'
'(--xunit)): Options ((--xunit --json)|'
'(--json --xunit)) are trying to use stdout '
'simultaneously\n')
self.assertIsNotNone(error_regex.match(result.stderr),
"Missing error message from output:\n%s" %
result.stderr)
@unittest.skipIf(html_uncapable(),
"Uncapable of Avocado Result HTML plugin")
def test_output_incompatible_setup_2(self):
os.chdir(basedir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--html - passtest.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_JOB_FAIL
output = result.stdout + result.stderr
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
error_excerpt = "HTML to stdout not supported"
self.assertIn(error_excerpt, output,
"Missing excerpt error message from output:\n%s" % output)
def test_output_compatible_setup(self):
tmpfile = tempfile.mktemp()
os.chdir(basedir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--journal --xunit %s --json - passtest.py' %
(AVOCADO, self.tmpdir, tmpfile))
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = exit_codes.AVOCADO_ALL_OK
try:
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
# Check if we are producing valid outputs
json.loads(output)
minidom.parse(tmpfile)
finally:
try:
os.remove(tmpfile)
except OSError:
pass
def test_output_compatible_setup_2(self):
tmpfile = tempfile.mktemp()
os.chdir(basedir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--xunit - --json %s passtest.py' %
(AVOCADO, self.tmpdir, tmpfile))
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = exit_codes.AVOCADO_ALL_OK
try:
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
# Check if we are producing valid outputs
with open(tmpfile, 'r') as fp:
json_results = json.load(fp)
debug_log = json_results['debuglog']
self.check_output_files(debug_log)
minidom.parseString(output)
finally:
try:
os.remove(tmpfile)
except OSError:
pass
@unittest.skipIf(html_uncapable(),
"Uncapable of Avocado Result HTML plugin")
def test_output_compatible_setup_3(self):
tmpfile = tempfile.mktemp(prefix='avocado_' + __name__)
tmpfile2 = tempfile.mktemp(prefix='avocado_' + __name__)
tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
tmpfile3 = tempfile.mktemp(dir=tmpdir)
os.chdir(basedir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'--xunit %s --json %s --html %s passtest.py'
% (AVOCADO, self.tmpdir, tmpfile, tmpfile2, tmpfile3))
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = exit_codes.AVOCADO_ALL_OK
tmpdir_contents = os.listdir(tmpdir)
self.assertEqual(len(tmpdir_contents), 4,
'Not all resources dir were created: %s' % tmpdir_contents)
try:
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertNotEqual(output, "", "Output is empty")
# Check if we are producing valid outputs
with open(tmpfile2, 'r') as fp:
json_results = json.load(fp)
debug_log = json_results['debuglog']
self.check_output_files(debug_log)
minidom.parse(tmpfile)
finally:
try:
os.remove(tmpfile)
os.remove(tmpfile2)
shutil.rmtree(tmpdir)
except OSError:
pass
def test_output_compatible_setup_nooutput(self):
tmpfile = tempfile.mktemp()
tmpfile2 = tempfile.mktemp()
os.chdir(basedir)
# Verify --silent can be supplied as app argument
cmd_line = ('%s --silent run --job-results-dir %s '
'--sysinfo=off --xunit %s --json %s passtest.py'
% (AVOCADO, self.tmpdir, tmpfile, tmpfile2))
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = exit_codes.AVOCADO_ALL_OK
try:
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertEqual(output, "", "Output is not empty:\n%s" % output)
# Check if we are producing valid outputs
with open(tmpfile2, 'r') as fp:
json_results = json.load(fp)
debug_log = json_results['debuglog']
self.check_output_files(debug_log)
minidom.parse(tmpfile)
finally:
try:
os.remove(tmpfile)
os.remove(tmpfile2)
except OSError:
pass
def test_nonprintable_chars(self):
cmd_line = ("%s run --external-runner /bin/ls "
"'NON_EXISTING_FILE_WITH_NONPRINTABLE_CHARS_IN_HERE\x1b' "
"--job-results-dir %s --sysinfo=off"
% (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = exit_codes.AVOCADO_TESTS_FAIL
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
debug_log = None
for line in output.splitlines():
if "JOB LOG" in line:
debug_log = line.split(':', 1)[-1].strip()
break
self.assertTrue(debug_log, "Unable to get JOB LOG from output:\n%s"
% output)
self.check_output_files(debug_log)
def test_show_job_log(self):
os.chdir(basedir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'passtest.py --show-job-log' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
job_id_list = re.findall('Job ID: (.*)', result.stdout,
re.MULTILINE)
self.assertTrue(job_id_list, 'No Job ID in stdout:\n%s' %
result.stdout)
job_id = job_id_list[0]
self.assertEqual(len(job_id), 40)
def test_silent_trumps_show_job_log(self):
os.chdir(basedir)
# Also verify --silent can be supplied as run option
cmd_line = ('%s run --silent --job-results-dir %s '
'--sysinfo=off passtest.py --show-job-log'
% (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertEqual(output, "")
def test_default_enabled_plugins(self):
os.chdir(basedir)
cmd_line = ('%s run --job-results-dir %s --sysinfo=off '
'passtest.py' % (AVOCADO, self.tmpdir))
result = process.run(cmd_line, ignore_status=True)
output = result.stdout + result.stderr
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
output_lines = output.splitlines()
# The current human output produces 6 lines when running a single test,
# with an optional 7th line when the HTML report generation is enabled
self.assertGreaterEqual(len(output_lines), 6,
('Basic human interface did not produce the '
'expect output. Output produced: "%s"' % output))
second_line = output_lines[1]
debug_log = second_line.split()[-1]
self.check_output_files(debug_log)
def test_verify_whiteboard_save(self):
tmpfile = tempfile.mktemp()
try:
os.chdir(basedir)
config = os.path.join(self.tmpdir, "conf.ini")
content = ("[datadir.paths]\nlogs_dir = %s"
% os.path.relpath(self.tmpdir, "."))
script.Script(config, content).save()
cmd_line = ('%s --config %s --show all run '
'--sysinfo=off whiteboard.py --json %s'
% (AVOCADO, config, tmpfile))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
with open(tmpfile, 'r') as fp:
json_results = json.load(fp)
logfile = json_results['tests'][0]['logfile']
debug_dir = os.path.dirname(logfile)
whiteboard_path = os.path.join(debug_dir, 'whiteboard')
self.assertTrue(os.path.exists(whiteboard_path),
'Missing whiteboard file %s' % whiteboard_path)
finally:
try:
os.remove(tmpfile)
except OSError:
pass
@unittest.skipIf(image_output_uncapable(),
"Uncapable of generating images with PIL library")
def test_gendata(self):
tmpfile = tempfile.mktemp()
try:
os.chdir(basedir)
cmd_line = ("%s run --job-results-dir %s "
"--sysinfo=off gendata.py --json %s" %
(AVOCADO, self.tmpdir, tmpfile))
result = process.run(cmd_line, ignore_status=True)
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
with open(tmpfile, 'r') as fp:
json_results = json.load(fp)
bsod_dir = None
json_dir = None
for test in json_results['tests']:
if "test_bsod" in test['url']:
bsod_dir = test['logfile']
elif "test_json" in test['url']:
json_dir = test['logfile']
self.assertTrue(bsod_dir, "Failed to get test_bsod output "
"directory")
self.assertTrue(json_dir, "Failed to get test_json output "
"directory")
bsod_dir = os.path.join(os.path.dirname(bsod_dir), "data",
"bsod.png")
json_dir = os.path.join(os.path.dirname(json_dir), "data",
"test.json")
self.assertTrue(os.path.exists(bsod_dir), "File %s produced by"
"test does not exist" % bsod_dir)
self.assertTrue(os.path.exists(json_dir), "File %s produced by"
"test does not exist" % json_dir)
finally:
try:
os.remove(tmpfile)
except OSError:
pass
def test_redirect_output(self):
redirected_output_path = tempfile.mktemp()
try:
os.chdir(basedir)
cmd_line = ('%s run --job-results-dir %s '
'--sysinfo=off passtest.py > %s'
% (AVOCADO, self.tmpdir, redirected_output_path))
result = process.run(cmd_line, ignore_status=True, shell=True)
output = result.stdout + result.stderr
expected_rc = exit_codes.AVOCADO_ALL_OK
self.assertEqual(result.exit_status, expected_rc,
"Avocado did not return rc %d:\n%s" %
(expected_rc, result))
self.assertEqual(output, '',
'After redirecting to file, output is not empty: %s' % output)
with open(redirected_output_path, 'r') as redirected_output_file_obj:
redirected_output = redirected_output_file_obj.read()
for code in TermSupport.ESCAPE_CODES:
self.assertNotIn(code, redirected_output,
'Found terminal support code %s in redirected output\n%s' %
(code, redirected_output))
finally:
try:
os.remove(redirected_output_path)
except OSError:
pass
@unittest.skipIf(perl_tap_parser_uncapable(),
"Uncapable of using Perl TAP::Parser library")
def test_tap_parser(self):
perl_script = script.TemporaryScript("tap_parser.pl",
PERL_TAP_PARSER_SNIPPET
% self.tmpdir)
perl_script.save()
os.chdir(basedir)
process.run("perl %s" % perl_script)
def test_tap_totaltests(self):
os.chdir(basedir)
cmd_line = ("%s run passtest.py "
"-m examples/tests/sleeptest.py.data/sleeptest.yaml "
"--job-results-dir %s "
"--tap -" % (AVOCADO, self.tmpdir))
result = process.run(cmd_line)
expr = '1..4'
self.assertIn(expr, result.stdout, "'%s' not found in:\n%s"
% (expr, result.stdout))
def test_broken_pipe(self):
os.chdir(basedir)
cmd_line = "(%s run --help | whacky-unknown-command)" % AVOCADO
result = process.run(cmd_line, shell=True, ignore_status=True,
env={"LC_ALL": "C"})
expected_rc = 127
self.assertEqual(result.exit_status, expected_rc,
("avocado run to broken pipe did not return "
"rc %d:\n%s" % (expected_rc, result)))
self.assertEqual(len(result.stderr.splitlines()), 1)
self.assertIn("whacky-unknown-command", result.stderr)
self.assertIn("not found", result.stderr)
self.assertNotIn("Avocado crashed", result.stderr)
def test_results_plugins_no_tests(self):
os.chdir(basedir)
cmd_line = ("%s run UNEXISTING --job-results-dir %s"
% (AVOCADO, self.tmpdir))
exit_code = process.system(cmd_line, ignore_status=True)
self.assertEqual(exit_code, exit_codes.AVOCADO_JOB_FAIL)
xunit_results = os.path.join(self.tmpdir, 'latest', 'results.xml')
self.assertFalse(os.path.exists(xunit_results))
json_results = os.path.join(self.tmpdir, 'latest', 'results.json')
self.assertFalse(os.path.exists(json_results))
tap_results = os.path.join(self.tmpdir, 'latest', 'results.tap')
self.assertFalse(os.path.exists(tap_results))
def tearDown(self):
shutil.rmtree(self.tmpdir)
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
| 296,487,700,817,237,500
| 42.269151
| 167
| 0.537442
| false
| 3.931339
| true
| false
| false
|
ge0rgi/cinder
|
cinder/volume/utils.py
|
1
|
31057
|
# Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Volume-related Utilities and helpers."""
import ast
import functools
import math
import operator
import re
import time
import uuid
from Crypto.Random import random
import eventlet
from eventlet import tpool
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import units
import six
from six.moves import range
from cinder.brick.local_dev import lvm as brick_lvm
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _, _LI, _LW, _LE
from cinder import objects
from cinder import rpc
from cinder import utils
from cinder.volume import group_types
from cinder.volume import throttling
from cinder.volume import volume_types
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def null_safe_str(s):
return str(s) if s else ''
def _usage_from_volume(context, volume_ref, **kw):
now = timeutils.utcnow()
launched_at = volume_ref['launched_at'] or now
created_at = volume_ref['created_at'] or now
volume_status = volume_ref['status']
if volume_status == 'error_managing_deleting':
volume_status = 'deleting'
usage_info = dict(
tenant_id=volume_ref['project_id'],
host=volume_ref['host'],
user_id=volume_ref['user_id'],
availability_zone=volume_ref['availability_zone'],
volume_id=volume_ref['id'],
volume_type=volume_ref['volume_type_id'],
display_name=volume_ref['display_name'],
launched_at=launched_at.isoformat(),
created_at=created_at.isoformat(),
status=volume_status,
snapshot_id=volume_ref['snapshot_id'],
size=volume_ref['size'],
replication_status=volume_ref['replication_status'],
replication_extended_status=volume_ref['replication_extended_status'],
replication_driver_data=volume_ref['replication_driver_data'],
metadata=volume_ref.get('volume_metadata'),)
usage_info.update(kw)
try:
attachments = db.volume_attachment_get_all_by_volume_id(
context, volume_ref['id'])
usage_info['volume_attachment'] = attachments
glance_meta = db.volume_glance_metadata_get(context, volume_ref['id'])
if glance_meta:
usage_info['glance_metadata'] = glance_meta
except exception.GlanceMetadataNotFound:
pass
except exception.VolumeNotFound:
LOG.debug("Can not find volume %s at notify usage", volume_ref['id'])
return usage_info
def _usage_from_backup(backup, **kw):
num_dependent_backups = backup.num_dependent_backups
usage_info = dict(tenant_id=backup.project_id,
user_id=backup.user_id,
availability_zone=backup.availability_zone,
backup_id=backup.id,
host=backup.host,
display_name=backup.display_name,
created_at=str(backup.created_at),
status=backup.status,
volume_id=backup.volume_id,
size=backup.size,
service_metadata=backup.service_metadata,
service=backup.service,
fail_reason=backup.fail_reason,
parent_id=backup.parent_id,
num_dependent_backups=num_dependent_backups,
snapshot_id=backup.snapshot_id,
)
usage_info.update(kw)
return usage_info
@utils.if_notifications_enabled
def notify_about_volume_usage(context, volume, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_volume(context, volume, **extra_usage_info)
rpc.get_notifier("volume", host).info(context, 'volume.%s' % event_suffix,
usage_info)
@utils.if_notifications_enabled
def notify_about_backup_usage(context, backup, event_suffix,
extra_usage_info=None,
host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_backup(backup, **extra_usage_info)
rpc.get_notifier("backup", host).info(context, 'backup.%s' % event_suffix,
usage_info)
def _usage_from_snapshot(snapshot, context, **extra_usage_info):
# (niedbalski) a snapshot might be related to a deleted
# volume, if that's the case, the volume information is still
# required for filling the usage_info, so we enforce to read
# the volume data even if the volume has been deleted.
context.read_deleted = "yes"
volume = db.volume_get(context, snapshot.volume_id)
usage_info = {
'tenant_id': snapshot.project_id,
'user_id': snapshot.user_id,
'availability_zone': volume['availability_zone'],
'volume_id': snapshot.volume_id,
'volume_size': snapshot.volume_size,
'snapshot_id': snapshot.id,
'display_name': snapshot.display_name,
'created_at': str(snapshot.created_at),
'status': snapshot.status,
'deleted': null_safe_str(snapshot.deleted),
'metadata': null_safe_str(snapshot.metadata),
}
usage_info.update(extra_usage_info)
return usage_info
@utils.if_notifications_enabled
def notify_about_snapshot_usage(context, snapshot, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_snapshot(snapshot, context, **extra_usage_info)
rpc.get_notifier('snapshot', host).info(context,
'snapshot.%s' % event_suffix,
usage_info)
def _usage_from_capacity(capacity, **extra_usage_info):
capacity_info = {
'name_to_id': capacity['name_to_id'],
'total': capacity['total'],
'free': capacity['free'],
'allocated': capacity['allocated'],
'provisioned': capacity['provisioned'],
'virtual_free': capacity['virtual_free'],
'reported_at': capacity['reported_at']
}
capacity_info.update(extra_usage_info)
return capacity_info
@utils.if_notifications_enabled
def notify_about_capacity_usage(context, capacity, suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_capacity(capacity, **extra_usage_info)
rpc.get_notifier('capacity', host).info(context,
'capacity.%s' % suffix,
usage_info)
@utils.if_notifications_enabled
def notify_about_replication_usage(context, volume, suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_volume(context, volume,
**extra_usage_info)
rpc.get_notifier('replication', host).info(context,
'replication.%s' % suffix,
usage_info)
@utils.if_notifications_enabled
def notify_about_replication_error(context, volume, suffix,
extra_error_info=None, host=None):
if not host:
host = CONF.host
if not extra_error_info:
extra_error_info = {}
usage_info = _usage_from_volume(context, volume,
**extra_error_info)
rpc.get_notifier('replication', host).error(context,
'replication.%s' % suffix,
usage_info)
def _usage_from_consistencygroup(group_ref, **kw):
usage_info = dict(tenant_id=group_ref.project_id,
user_id=group_ref.user_id,
availability_zone=group_ref.availability_zone,
consistencygroup_id=group_ref.id,
name=group_ref.name,
created_at=group_ref.created_at.isoformat(),
status=group_ref.status)
usage_info.update(kw)
return usage_info
@utils.if_notifications_enabled
def notify_about_consistencygroup_usage(context, group, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_consistencygroup(group,
**extra_usage_info)
rpc.get_notifier("consistencygroup", host).info(
context,
'consistencygroup.%s' % event_suffix,
usage_info)
def _usage_from_group(group_ref, **kw):
usage_info = dict(tenant_id=group_ref.project_id,
user_id=group_ref.user_id,
availability_zone=group_ref.availability_zone,
group_id=group_ref.id,
group_type=group_ref.group_type_id,
name=group_ref.name,
created_at=group_ref.created_at.isoformat(),
status=group_ref.status)
usage_info.update(kw)
return usage_info
@utils.if_notifications_enabled
def notify_about_group_usage(context, group, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_group(group,
**extra_usage_info)
rpc.get_notifier("group", host).info(
context,
'group.%s' % event_suffix,
usage_info)
def _usage_from_cgsnapshot(cgsnapshot, **kw):
usage_info = dict(
tenant_id=cgsnapshot.project_id,
user_id=cgsnapshot.user_id,
cgsnapshot_id=cgsnapshot.id,
name=cgsnapshot.name,
consistencygroup_id=cgsnapshot.consistencygroup_id,
created_at=cgsnapshot.created_at.isoformat(),
status=cgsnapshot.status)
usage_info.update(kw)
return usage_info
def _usage_from_group_snapshot(group_snapshot, **kw):
usage_info = dict(
tenant_id=group_snapshot.project_id,
user_id=group_snapshot.user_id,
group_snapshot_id=group_snapshot.id,
name=group_snapshot.name,
group_id=group_snapshot.group_id,
group_type=group_snapshot.group_type_id,
created_at=group_snapshot.created_at.isoformat(),
status=group_snapshot.status)
usage_info.update(kw)
return usage_info
@utils.if_notifications_enabled
def notify_about_cgsnapshot_usage(context, cgsnapshot, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_cgsnapshot(cgsnapshot,
**extra_usage_info)
rpc.get_notifier("cgsnapshot", host).info(
context,
'cgsnapshot.%s' % event_suffix,
usage_info)
@utils.if_notifications_enabled
def notify_about_group_snapshot_usage(context, group_snapshot, event_suffix,
extra_usage_info=None, host=None):
if not host:
host = CONF.host
if not extra_usage_info:
extra_usage_info = {}
usage_info = _usage_from_group_snapshot(group_snapshot,
**extra_usage_info)
rpc.get_notifier("group_snapshot", host).info(
context,
'group_snapshot.%s' % event_suffix,
usage_info)
def _check_blocksize(blocksize):
# Check if volume_dd_blocksize is valid
try:
# Rule out zero-sized/negative/float dd blocksize which
# cannot be caught by strutils
if blocksize.startswith(('-', '0')) or '.' in blocksize:
raise ValueError
strutils.string_to_bytes('%sB' % blocksize)
except ValueError:
LOG.warning(_LW("Incorrect value error: %(blocksize)s, "
"it may indicate that \'volume_dd_blocksize\' "
"was configured incorrectly. Fall back to default."),
{'blocksize': blocksize})
# Fall back to default blocksize
CONF.clear_override('volume_dd_blocksize')
blocksize = CONF.volume_dd_blocksize
return blocksize
def check_for_odirect_support(src, dest, flag='oflag=direct'):
# Check whether O_DIRECT is supported
try:
# iflag=direct and if=/dev/zero combination does not work
# error: dd: failed to open '/dev/zero': Invalid argument
if (src == '/dev/zero' and flag == 'iflag=direct'):
return False
else:
utils.execute('dd', 'count=0', 'if=%s' % src,
'of=%s' % dest,
flag, run_as_root=True)
return True
except processutils.ProcessExecutionError:
return False
def _copy_volume_with_path(prefix, srcstr, deststr, size_in_m, blocksize,
sync=False, execute=utils.execute, ionice=None,
sparse=False):
cmd = prefix[:]
if ionice:
cmd.extend(('ionice', ionice))
blocksize = _check_blocksize(blocksize)
size_in_bytes = size_in_m * units.Mi
cmd.extend(('dd', 'if=%s' % srcstr, 'of=%s' % deststr,
'count=%d' % size_in_bytes, 'bs=%s' % blocksize))
# Use O_DIRECT to avoid thrashing the system buffer cache
odirect = check_for_odirect_support(srcstr, deststr, 'iflag=direct')
cmd.append('iflag=count_bytes,direct' if odirect else 'iflag=count_bytes')
if check_for_odirect_support(srcstr, deststr, 'oflag=direct'):
cmd.append('oflag=direct')
odirect = True
# If the volume is being unprovisioned then
# request the data is persisted before returning,
# so that it's not discarded from the cache.
conv = []
if sync and not odirect:
conv.append('fdatasync')
if sparse:
conv.append('sparse')
if conv:
conv_options = 'conv=' + ",".join(conv)
cmd.append(conv_options)
# Perform the copy
start_time = timeutils.utcnow()
execute(*cmd, run_as_root=True)
duration = timeutils.delta_seconds(start_time, timeutils.utcnow())
# NOTE(jdg): use a default of 1, mostly for unit test, but in
# some incredible event this is 0 (cirros image?) don't barf
if duration < 1:
duration = 1
mbps = (size_in_m / duration)
LOG.debug("Volume copy details: src %(src)s, dest %(dest)s, "
"size %(sz).2f MB, duration %(duration).2f sec",
{"src": srcstr,
"dest": deststr,
"sz": size_in_m,
"duration": duration})
LOG.info(_LI("Volume copy %(size_in_m).2f MB at %(mbps).2f MB/s"),
{'size_in_m': size_in_m, 'mbps': mbps})
def _open_volume_with_path(path, mode):
try:
with utils.temporary_chown(path):
handle = open(path, mode)
return handle
except Exception:
LOG.error(_LE("Failed to open volume from %(path)s."), {'path': path})
def _transfer_data(src, dest, length, chunk_size):
"""Transfer data between files (Python IO objects)."""
chunks = int(math.ceil(length / chunk_size))
remaining_length = length
LOG.debug("%(chunks)s chunks of %(bytes)s bytes to be transferred.",
{'chunks': chunks, 'bytes': chunk_size})
for chunk in range(0, chunks):
before = time.time()
data = tpool.execute(src.read, min(chunk_size, remaining_length))
# If we have reached end of source, discard any extraneous bytes from
# destination volume if trim is enabled and stop writing.
if data == b'':
break
tpool.execute(dest.write, data)
remaining_length -= len(data)
delta = (time.time() - before)
rate = (chunk_size / delta) / units.Ki
LOG.debug("Transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s).",
{'chunk': chunk + 1, 'chunks': chunks, 'rate': rate})
# yield to any other pending operations
eventlet.sleep(0)
tpool.execute(dest.flush)
def _copy_volume_with_file(src, dest, size_in_m):
src_handle = src
if isinstance(src, six.string_types):
src_handle = _open_volume_with_path(src, 'rb')
dest_handle = dest
if isinstance(dest, six.string_types):
dest_handle = _open_volume_with_path(dest, 'wb')
if not src_handle:
raise exception.DeviceUnavailable(
_("Failed to copy volume, source device unavailable."))
if not dest_handle:
raise exception.DeviceUnavailable(
_("Failed to copy volume, destination device unavailable."))
start_time = timeutils.utcnow()
_transfer_data(src_handle, dest_handle, size_in_m * units.Mi, units.Mi * 4)
duration = max(1, timeutils.delta_seconds(start_time, timeutils.utcnow()))
if isinstance(src, six.string_types):
src_handle.close()
if isinstance(dest, six.string_types):
dest_handle.close()
mbps = (size_in_m / duration)
LOG.info(_LI("Volume copy completed (%(size_in_m).2f MB at "
"%(mbps).2f MB/s)."),
{'size_in_m': size_in_m, 'mbps': mbps})
def copy_volume(src, dest, size_in_m, blocksize, sync=False,
execute=utils.execute, ionice=None, throttle=None,
sparse=False):
"""Copy data from the source volume to the destination volume.
The parameters 'src' and 'dest' are both typically of type str, which
represents the path to each volume on the filesystem. Connectors can
optionally return a volume handle of type RawIOBase for volumes that are
not available on the local filesystem for open/close operations.
If either 'src' or 'dest' are not of type str, then they are assumed to be
of type RawIOBase or any derivative that supports file operations such as
read and write. In this case, the handles are treated as file handles
instead of file paths and, at present moment, throttling is unavailable.
"""
if (isinstance(src, six.string_types) and
isinstance(dest, six.string_types)):
if not throttle:
throttle = throttling.Throttle.get_default()
with throttle.subcommand(src, dest) as throttle_cmd:
_copy_volume_with_path(throttle_cmd['prefix'], src, dest,
size_in_m, blocksize, sync=sync,
execute=execute, ionice=ionice,
sparse=sparse)
else:
_copy_volume_with_file(src, dest, size_in_m)
def clear_volume(volume_size, volume_path, volume_clear=None,
volume_clear_size=None, volume_clear_ionice=None,
throttle=None):
"""Unprovision old volumes to prevent data leaking between users."""
if volume_clear is None:
volume_clear = CONF.volume_clear
if volume_clear_size is None:
volume_clear_size = CONF.volume_clear_size
if volume_clear_size == 0:
volume_clear_size = volume_size
if volume_clear_ionice is None:
volume_clear_ionice = CONF.volume_clear_ionice
LOG.info(_LI("Performing secure delete on volume: %s"), volume_path)
# We pass sparse=False explicitly here so that zero blocks are not
# skipped in order to clear the volume.
if volume_clear == 'zero':
return copy_volume('/dev/zero', volume_path, volume_clear_size,
CONF.volume_dd_blocksize,
sync=True, execute=utils.execute,
ionice=volume_clear_ionice,
throttle=throttle, sparse=False)
else:
raise exception.InvalidConfigurationValue(
option='volume_clear',
value=volume_clear)
def supports_thin_provisioning():
return brick_lvm.LVM.supports_thin_provisioning(
utils.get_root_helper())
def get_all_physical_volumes(vg_name=None):
return brick_lvm.LVM.get_all_physical_volumes(
utils.get_root_helper(),
vg_name)
def get_all_volume_groups(vg_name=None):
return brick_lvm.LVM.get_all_volume_groups(
utils.get_root_helper(),
vg_name)
# Default symbols to use for passwords. Avoids visually confusing characters.
# ~6 bits per symbol
DEFAULT_PASSWORD_SYMBOLS = ('23456789', # Removed: 0,1
'ABCDEFGHJKLMNPQRSTUVWXYZ', # Removed: I, O
'abcdefghijkmnopqrstuvwxyz') # Removed: l
def generate_password(length=16, symbolgroups=DEFAULT_PASSWORD_SYMBOLS):
"""Generate a random password from the supplied symbol groups.
At least one symbol from each group will be included. Unpredictable
results if length is less than the number of symbol groups.
Believed to be reasonably secure (with a reasonable password length!)
"""
# NOTE(jerdfelt): Some password policies require at least one character
# from each group of symbols, so start off with one random character
# from each symbol group
password = [random.choice(s) for s in symbolgroups]
# If length < len(symbolgroups), the leading characters will only
# be from the first length groups. Try our best to not be predictable
# by shuffling and then truncating.
random.shuffle(password)
password = password[:length]
length -= len(password)
# then fill with random characters from all symbol groups
symbols = ''.join(symbolgroups)
password.extend([random.choice(symbols) for _i in range(length)])
# finally shuffle to ensure first x characters aren't from a
# predictable group
random.shuffle(password)
return ''.join(password)
def generate_username(length=20, symbolgroups=DEFAULT_PASSWORD_SYMBOLS):
# Use the same implementation as the password generation.
return generate_password(length, symbolgroups)
DEFAULT_POOL_NAME = '_pool0'
def extract_host(host, level='backend', default_pool_name=False):
"""Extract Host, Backend or Pool information from host string.
:param host: String for host, which could include host@backend#pool info
:param level: Indicate which level of information should be extracted
from host string. Level can be 'host', 'backend' or 'pool',
default value is 'backend'
:param default_pool_name: this flag specify what to do if level == 'pool'
and there is no 'pool' info encoded in host
string. default_pool_name=True will return
DEFAULT_POOL_NAME, otherwise we return None.
Default value of this parameter is False.
:return: expected information, string or None
:raises: exception.InvalidVolume
For example:
host = 'HostA@BackendB#PoolC'
ret = extract_host(host, 'host')
# ret is 'HostA'
ret = extract_host(host, 'backend')
# ret is 'HostA@BackendB'
ret = extract_host(host, 'pool')
# ret is 'PoolC'
host = 'HostX@BackendY'
ret = extract_host(host, 'pool')
# ret is None
ret = extract_host(host, 'pool', True)
# ret is '_pool0'
"""
if host is None:
msg = _("volume is not assigned to a host")
raise exception.InvalidVolume(reason=msg)
if level == 'host':
# make sure pool is not included
hst = host.split('#')[0]
return hst.split('@')[0]
elif level == 'backend':
return host.split('#')[0]
elif level == 'pool':
lst = host.split('#')
if len(lst) == 2:
return lst[1]
elif default_pool_name is True:
return DEFAULT_POOL_NAME
else:
return None
def append_host(host, pool):
"""Encode pool into host info."""
if not host or not pool:
return host
new_host = "#".join([host, pool])
return new_host
def matching_backend_name(src_volume_type, volume_type):
if src_volume_type.get('volume_backend_name') and \
volume_type.get('volume_backend_name'):
return src_volume_type.get('volume_backend_name') == \
volume_type.get('volume_backend_name')
else:
return False
def hosts_are_equivalent(host_1, host_2):
# In case host_1 or host_2 are None
if not (host_1 and host_2):
return host_1 == host_2
return extract_host(host_1) == extract_host(host_2)
def read_proc_mounts():
"""Read the /proc/mounts file.
It's a dummy function but it eases the writing of unit tests as mocking
__builtin__open() for a specific file only is not trivial.
"""
with open('/proc/mounts') as mounts:
return mounts.readlines()
def extract_id_from_volume_name(vol_name):
regex = re.compile(
CONF.volume_name_template.replace('%s', '(?P<uuid>.+)'))
match = regex.match(vol_name)
return match.group('uuid') if match else None
def check_already_managed_volume(vol_id):
"""Check cinder db for already managed volume.
:param vol_id: volume id parameter
:returns: bool -- return True, if db entry with specified
volume id exists, otherwise return False
"""
try:
return (vol_id and isinstance(vol_id, six.string_types) and
uuid.UUID(vol_id, version=4) and
objects.Volume.exists(context.get_admin_context(), vol_id))
except ValueError:
return False
def extract_id_from_snapshot_name(snap_name):
"""Return a snapshot's ID from its name on the backend."""
regex = re.compile(
CONF.snapshot_name_template.replace('%s', '(?P<uuid>.+)'))
match = regex.match(snap_name)
return match.group('uuid') if match else None
def paginate_entries_list(entries, marker, limit, offset, sort_keys,
sort_dirs):
"""Paginate a list of entries.
:param entries: list of dictionaries
:marker: The last element previously returned
:limit: The maximum number of items to return
:offset: The number of items to skip from the marker or from the first
element.
:sort_keys: A list of keys in the dictionaries to sort by
:sort_dirs: A list of sort directions, where each is either 'asc' or 'dec'
"""
comparers = [(operator.itemgetter(key.strip()), multiplier)
for (key, multiplier) in zip(sort_keys, sort_dirs)]
def comparer(left, right):
for fn, d in comparers:
left_val = fn(left)
right_val = fn(right)
if isinstance(left_val, dict):
left_val = sorted(left_val.values())[0]
if isinstance(right_val, dict):
right_val = sorted(right_val.values())[0]
if left_val == right_val:
continue
if d == 'asc':
return -1 if left_val < right_val else 1
else:
return -1 if left_val > right_val else 1
else:
return 0
sorted_entries = sorted(entries, key=functools.cmp_to_key(comparer))
start_index = 0
if offset is None:
offset = 0
if marker:
start_index = -1
for i, entry in enumerate(sorted_entries):
if entry['reference'] == marker:
start_index = i + 1
break
if start_index < 0:
msg = _('marker not found: %s') % marker
raise exception.InvalidInput(reason=msg)
range_end = start_index + limit
return sorted_entries[start_index + offset:range_end + offset]
def convert_config_string_to_dict(config_string):
"""Convert config file replication string to a dict.
The only supported form is as follows:
"{'key-1'='val-1' 'key-2'='val-2'...}"
:param config_string: Properly formatted string to convert to dict.
:response: dict of string values
"""
resultant_dict = {}
try:
st = config_string.replace("=", ":")
st = st.replace(" ", ", ")
resultant_dict = ast.literal_eval(st)
except Exception:
LOG.warning(_LW("Error encountered translating config_string: "
"%(config_string)s to dict"),
{'config_string': config_string})
return resultant_dict
def create_encryption_key(context, key_manager, volume_type_id):
encryption_key_id = None
if volume_types.is_encrypted(context, volume_type_id):
volume_type_encryption = (
volume_types.get_volume_type_encryption(context,
volume_type_id))
cipher = volume_type_encryption.cipher
length = volume_type_encryption.key_size
algorithm = cipher.split('-')[0] if cipher else None
encryption_key_id = key_manager.create_key(
context,
algorithm=algorithm,
length=length)
return encryption_key_id
def is_replicated_str(str):
spec = (str or '').split()
return (len(spec) == 2 and
spec[0] == '<is>' and strutils.bool_from_string(spec[1]))
def is_replicated_spec(extra_specs):
return (extra_specs and
is_replicated_str(extra_specs.get('replication_enabled')))
def group_get_by_id(group_id):
ctxt = context.get_admin_context()
group = db.group_get(ctxt, group_id)
return group
def is_group_a_cg_snapshot_type(group_or_snap):
LOG.debug("Checking if %s is a consistent snapshot group",
group_or_snap)
if group_or_snap["group_type_id"] is not None:
spec = group_types.get_group_type_specs(
group_or_snap["group_type_id"],
key="consistent_group_snapshot_enabled"
)
return spec == "<is> True"
return False
|
apache-2.0
| 8,789,788,171,055,562,000
| 33.016429
| 79
| 0.601314
| false
| 3.949262
| false
| false
| false
|
Astyan-42/skepticalscience
|
skepticalsciencewebsite/publications/forms.py
|
1
|
6022
|
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django_select2.forms import Select2MultipleWidget
from django.db.models import Q
from django.contrib.auth import get_user_model
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Submit
from crispy_forms.bootstrap import Field
from skepticalsciencewebsite.utils import NoLinkClearableFileInput
from publications.models import Publication, Comment, EstimatedImpactFactor, CommentReview
from publications.constants import BOOLEAN_CHOICES, ABORTED, CORRECTION
from sciences.forms import ScienceModelForm
User = get_user_model()
class UserModelChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return obj.get_full_name()
class UserModelMultipleChoiceField(forms.ModelMultipleChoiceField):
def label_from_instance(self, obj):
return obj.get_full_name()
class PublicationCreateForm(ScienceModelForm):
"""
create an publication form with restricted field
TO ADD AUTHORS AND LAST AUTHOR
"""
first_author = UserModelChoiceField(queryset=User.objects.filter(~Q(first_name="") & ~Q(last_name="")))
last_author = UserModelChoiceField(queryset=User.objects.filter(~Q(first_name="") & ~Q(last_name="")),
required=False)
authors = UserModelMultipleChoiceField(queryset=User.objects.filter(~Q(first_name="") & ~Q(last_name="")),
required=False, widget=Select2MultipleWidget)
def __init__(self, *args, **kwargs):
super(PublicationCreateForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_id = 'id-publicationcreateForm'
self.helper.add_input(Submit('submit', _('Submit')))
class Meta:
model = Publication
fields = ["title", "resume", "pdf_creation", "source_creation", "first_author", "authors", "last_author",
"sciences", "licence"]
widgets = {'sciences': Select2MultipleWidget,
'resume': forms.Textarea(),
'pdf_creation': NoLinkClearableFileInput,
'source_creation': NoLinkClearableFileInput,}
class PublicationCorrectForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(PublicationCorrectForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_id = 'id_publicationcorrectionupdateForm'
self.helper.add_input(Submit('submit', _('Submit')))
class Meta:
model = Publication
fields = ["resume", "pdf_final", "source_final"]
widgets = {'resume': forms.Textarea(),
'pdf_final': NoLinkClearableFileInput,
'source_final': NoLinkClearableFileInput,}
class PublicationAbortForm(forms.ModelForm):
abort = forms.BooleanField(required=False)
def __init__(self, *args, **kwargs):
super(PublicationAbortForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_id = 'id_publicationabortupdateForm'
self.helper.add_input(Submit('submit', _('Abort publication')))
def save(self, commit=True):
data = super(PublicationAbortForm, self).save(commit=False)
if self.cleaned_data["abort"] and data.status == CORRECTION:
data.status = ABORTED
data.update_status_date = timezone.now()
if commit:
data.save()
return data
class Meta:
model = Publication
fields = ["abort"]
class CommentForm(forms.ModelForm):
prefix = 'comment'
def __init__(self, *args, **kwargs):
super(CommentForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_id = 'id-commentForm'
self.helper.add_input(Submit('submit', _('Submit')))
class Meta:
model = Comment
fields = ["author_fake_pseudo", "comment_type", "title", "content"]
widgets = {'content': forms.Textarea()}
class EstimatedImpactFactorForm(forms.ModelForm):
prefix = 'impact_factor'
def __init__(self, *args, **kwargs):
super(EstimatedImpactFactorForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_class = 'form-inline'
self.helper.field_template = 'bootstrap3/layout/inline_field.html'
self.helper.form_id = 'id-estimatedimpactfactorForm'
self.helper.layout = Layout(Field("estimated_impact_factor", min=0, max=1000, value="",
template=self.helper.field_template))
self.helper.add_input(Submit('submit', _('Evaluate')))
class Meta:
model = EstimatedImpactFactor
fields = ["estimated_impact_factor"]
class CommentReviewValidationForm(forms.ModelForm):
valid = forms.ChoiceField(choices=BOOLEAN_CHOICES, widget=forms.Select())
def __init__(self, *args, **kwargs):
super(CommentReviewValidationForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_id = 'id-commentreviewvalidationForm'
self.helper.add_input(Submit('submit', _('Validate')))
class Meta:
model = CommentReview
fields = ["valid", "seriousness", "reason_validation"]
widgets = {'reason_validation': forms.Textarea()}
class CommentReviewCorrectionForm(forms.ModelForm):
corrected = forms.ChoiceField(choices=BOOLEAN_CHOICES, widget=forms.Select())
def __init__(self, *args, **kwargs):
super(CommentReviewCorrectionForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_id = 'id-commentreviewvcorrectionForm'
self.helper.add_input(Submit('submit', _('Corrected')))
class Meta:
model = CommentReview
fields = ["corrected", "reason_correction"]
widgets = {'reason_correction': forms.Textarea()}
|
agpl-3.0
| 1,018,924,324,186,331,900
| 37.608974
| 113
| 0.65377
| false
| 4.0389
| false
| false
| false
|
encukou/freeipa
|
ipalib/x509.py
|
1
|
28653
|
# Authors:
# Rob Crittenden <rcritten@redhat.com>
#
# Copyright (C) 2010 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Certificates should be stored internally DER-encoded. We can be passed
# a certificate several ways: read if from LDAP, read it from a 3rd party
# app (dogtag, candlepin, etc) or as user input.
# Conventions
#
# Where possible the following naming conventions are used:
#
# cert: the certificate is a PEM-encoded certificate
# dercert: the certificate is DER-encoded
# rawcert: the cert is in an unknown format
from __future__ import print_function
import os
import binascii
import datetime
import enum
import ipaddress
import ssl
import base64
import re
from cryptography import x509 as crypto_x509
from cryptography import utils as crypto_utils
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.serialization import (
Encoding, PublicFormat, PrivateFormat, load_pem_private_key
)
import pyasn1
import pyasn1.error
from pyasn1.type import univ, char, namedtype, tag
from pyasn1.codec.der import decoder, encoder
from pyasn1_modules import rfc2315, rfc2459
import six
from ipalib import errors
from ipapython.dnsutil import DNSName
if six.PY3:
unicode = str
PEM = 0
DER = 1
# The first group is the whole PEM datum and the second group is
# the base64 content (with newlines). For findall() the result is
# a list of 2-tuples of the PEM and base64 data.
PEM_CERT_REGEX = re.compile(
b'(-----BEGIN CERTIFICATE-----(.*?)-----END CERTIFICATE-----)',
re.DOTALL)
PEM_PRIV_REGEX = re.compile(
b'-----BEGIN(?: ENCRYPTED)?(?: (?:RSA|DSA|DH|EC))? PRIVATE KEY-----.*?'
b'-----END(?: ENCRYPTED)?(?: (?:RSA|DSA|DH|EC))? PRIVATE KEY-----',
re.DOTALL)
EKU_SERVER_AUTH = '1.3.6.1.5.5.7.3.1'
EKU_CLIENT_AUTH = '1.3.6.1.5.5.7.3.2'
EKU_CODE_SIGNING = '1.3.6.1.5.5.7.3.3'
EKU_EMAIL_PROTECTION = '1.3.6.1.5.5.7.3.4'
EKU_PKINIT_CLIENT_AUTH = '1.3.6.1.5.2.3.4'
EKU_PKINIT_KDC = '1.3.6.1.5.2.3.5'
EKU_ANY = '2.5.29.37.0'
EKU_PLACEHOLDER = '1.3.6.1.4.1.3319.6.10.16'
SAN_UPN = '1.3.6.1.4.1.311.20.2.3'
SAN_KRB5PRINCIPALNAME = '1.3.6.1.5.2.2'
@crypto_utils.register_interface(crypto_x509.Certificate)
class IPACertificate:
"""
A proxy class wrapping a python-cryptography certificate representation for
FreeIPA purposes
"""
def __init__(self, cert, backend=None):
"""
:param cert: A python-cryptography Certificate object
:param backend: A python-cryptography Backend object
"""
self._cert = cert
self.backend = default_backend() if backend is None else backend()
# initialize the certificate fields
# we have to do it this way so that some systems don't explode since
# some field types encode-decoding is not strongly defined
self._subject = self.__get_der_field('subject')
self._issuer = self.__get_der_field('issuer')
self._serial_number = self.__get_der_field('serialNumber')
def __getstate__(self):
state = {
'_cert': self.public_bytes(Encoding.DER),
'_subject': self.subject_bytes,
'_issuer': self.issuer_bytes,
'_serial_number': self._serial_number,
}
return state
def __setstate__(self, state):
self._subject = state['_subject']
self._issuer = state['_issuer']
self._issuer = state['_serial_number']
self._cert = crypto_x509.load_der_x509_certificate(
state['_cert'], backend=default_backend())
def __eq__(self, other):
"""
Checks equality.
:param other: either cryptography.Certificate or IPACertificate or
bytes representing a DER-formatted certificate
"""
if (isinstance(other, (crypto_x509.Certificate, IPACertificate))):
return (self.public_bytes(Encoding.DER) ==
other.public_bytes(Encoding.DER))
elif isinstance(other, bytes):
return self.public_bytes(Encoding.DER) == other
else:
return False
def __ne__(self, other):
"""
Checks not equal.
"""
return not self.__eq__(other)
def __hash__(self):
"""
Computes a hash of the wrapped cryptography.Certificate.
"""
return hash(self._cert)
def __encode_extension(self, oid, critical, value):
# TODO: have another proxy for crypto_x509.Extension which would
# provide public_bytes on the top of what python-cryptography has
ext = rfc2459.Extension()
# TODO: this does not have to be so weird, pyasn1 now has codecs
# which are capable of providing python-native types
ext['extnID'] = univ.ObjectIdentifier(oid)
ext['critical'] = univ.Boolean(critical)
if pyasn1.__version__.startswith('0.3'):
# pyasn1 <= 0.3.7 needs explicit encoding
# see https://pagure.io/freeipa/issue/7685
value = encoder.encode(univ.OctetString(value))
ext['extnValue'] = univ.Any(value)
ext = encoder.encode(ext)
return ext
def __get_pyasn1_field(self, field):
"""
:returns: a field of the certificate in pyasn1 representation
"""
cert_bytes = self.tbs_certificate_bytes
cert = decoder.decode(cert_bytes, rfc2459.TBSCertificate())[0]
field = cert[field]
return field
def __get_der_field(self, field):
"""
:field: the name of the field of the certificate
:returns: bytes representing the value of a certificate field
"""
return encoder.encode(self.__get_pyasn1_field(field))
def public_bytes(self, encoding):
"""
Serializes the certificate to PEM or DER format.
"""
return self._cert.public_bytes(encoding)
def is_self_signed(self):
"""
:returns: True if this certificate is self-signed, False otherwise
"""
return self._cert.issuer == self._cert.subject
def fingerprint(self, algorithm):
"""
Counts fingerprint of the wrapped cryptography.Certificate
"""
return self._cert.fingerprint(algorithm)
@property
def serial_number(self):
return self._cert.serial_number
@property
def serial_number_bytes(self):
return self._serial_number
@property
def version(self):
return self._cert.version
@property
def subject(self):
return self._cert.subject
@property
def subject_bytes(self):
return self._subject
@property
def signature_hash_algorithm(self):
"""
Returns a HashAlgorithm corresponding to the type of the digest signed
in the certificate.
"""
return self._cert.signature_hash_algorithm
@property
def signature_algorithm_oid(self):
"""
Returns the ObjectIdentifier of the signature algorithm.
"""
return self._cert.signature_algorithm_oid
@property
def signature(self):
"""
Returns the signature bytes.
"""
return self._cert.signature
@property
def issuer(self):
return self._cert.issuer
@property
def issuer_bytes(self):
return self._issuer
@property
def not_valid_before(self):
return self._cert.not_valid_before
@property
def not_valid_after(self):
return self._cert.not_valid_after
@property
def tbs_certificate_bytes(self):
return self._cert.tbs_certificate_bytes
@property
def extensions(self):
# TODO: own Extension and Extensions classes proxying
# python-cryptography
return self._cert.extensions
def public_key(self):
return self._cert.public_key()
@property
def public_key_info_bytes(self):
return self._cert.public_key().public_bytes(
encoding=Encoding.DER, format=PublicFormat.SubjectPublicKeyInfo)
@property
def extended_key_usage(self):
try:
ext_key_usage = self._cert.extensions.get_extension_for_oid(
crypto_x509.oid.ExtensionOID.EXTENDED_KEY_USAGE).value
except crypto_x509.ExtensionNotFound:
return None
return set(oid.dotted_string for oid in ext_key_usage)
@property
def extended_key_usage_bytes(self):
eku = self.extended_key_usage
if eku is None:
return None
ekurfc = rfc2459.ExtKeyUsageSyntax()
for i, oid in enumerate(sorted(eku)):
ekurfc[i] = univ.ObjectIdentifier(oid)
ekurfc = encoder.encode(ekurfc)
return self.__encode_extension('2.5.29.37', EKU_ANY not in eku, ekurfc)
@property
def san_general_names(self):
"""
Return SAN general names from a python-cryptography
certificate object. If the SAN extension is not present,
return an empty sequence.
Because python-cryptography does not yet provide a way to
handle unrecognised critical extensions (which may occur),
we must parse the certificate and extract the General Names.
For uniformity with other code, we manually construct values
of python-crytography GeneralName subtypes.
python-cryptography does not yet provide types for
ediPartyName or x400Address, so we drop these name types.
otherNames are NOT instantiated to more specific types where
the type is known. Use ``process_othernames`` to do that.
When python-cryptography can handle certs with unrecognised
critical extensions and implements ediPartyName and
x400Address, this function (and helpers) will be redundant
and should go away.
"""
gns = self.__pyasn1_get_san_general_names()
GENERAL_NAME_CONSTRUCTORS = {
'rfc822Name': lambda x: crypto_x509.RFC822Name(unicode(x)),
'dNSName': lambda x: crypto_x509.DNSName(unicode(x)),
'directoryName': _pyasn1_to_cryptography_directoryname,
'registeredID': _pyasn1_to_cryptography_registeredid,
'iPAddress': _pyasn1_to_cryptography_ipaddress,
'uniformResourceIdentifier':
lambda x: crypto_x509.UniformResourceIdentifier(unicode(x)),
'otherName': _pyasn1_to_cryptography_othername,
}
result = []
for gn in gns:
gn_type = gn.getName()
if gn_type in GENERAL_NAME_CONSTRUCTORS:
result.append(
GENERAL_NAME_CONSTRUCTORS[gn_type](gn.getComponent()))
return result
def __pyasn1_get_san_general_names(self):
# pyasn1 returns None when the key is not present in the certificate
# but we need an iterable
extensions = self.__get_pyasn1_field('extensions') or []
OID_SAN = univ.ObjectIdentifier('2.5.29.17')
gns = []
for ext in extensions:
if ext['extnID'] == OID_SAN:
der = ext['extnValue']
if pyasn1.__version__.startswith('0.3'):
# pyasn1 <= 0.3.7 needs explicit unwrap of ANY container
# see https://pagure.io/freeipa/issue/7685
der = decoder.decode(der, asn1Spec=univ.OctetString())[0]
gns = decoder.decode(der, asn1Spec=rfc2459.SubjectAltName())[0]
break
return gns
@property
def san_a_label_dns_names(self):
gns = self.__pyasn1_get_san_general_names()
result = []
for gn in gns:
if gn.getName() == 'dNSName':
result.append(unicode(gn.getComponent()))
return result
def match_hostname(self, hostname):
match_cert = {}
match_cert['subject'] = match_subject = []
for rdn in self._cert.subject.rdns:
match_rdn = []
for ava in rdn:
if ava.oid == crypto_x509.oid.NameOID.COMMON_NAME:
match_rdn.append(('commonName', ava.value))
match_subject.append(match_rdn)
values = self.san_a_label_dns_names
if values:
match_cert['subjectAltName'] = match_san = []
for value in values:
match_san.append(('DNS', value))
ssl.match_hostname(match_cert, DNSName(hostname).ToASCII())
def load_pem_x509_certificate(data):
"""
Load an X.509 certificate in PEM format.
:returns: a ``IPACertificate`` object.
:raises: ``ValueError`` if unable to load the certificate.
"""
return IPACertificate(
crypto_x509.load_pem_x509_certificate(data, backend=default_backend())
)
def load_der_x509_certificate(data):
"""
Load an X.509 certificate in DER format.
:returns: a ``IPACertificate`` object.
:raises: ``ValueError`` if unable to load the certificate.
"""
return IPACertificate(
crypto_x509.load_der_x509_certificate(data, backend=default_backend())
)
def load_unknown_x509_certificate(data):
"""
Only use this function when you can't be sure what kind of format does
your certificate have, e.g. input certificate files in installers
:returns: a ``IPACertificate`` object.
:raises: ``ValueError`` if unable to load the certificate.
"""
try:
return load_pem_x509_certificate(data)
except ValueError:
return load_der_x509_certificate(data)
def load_certificate_from_file(filename):
"""
Load a certificate from a PEM file.
Returns a python-cryptography ``Certificate`` object.
"""
with open(filename, mode='rb') as f:
return load_pem_x509_certificate(f.read())
def load_certificate_list(data):
"""
Load a certificate list from a sequence of concatenated PEMs.
Return a list of python-cryptography ``Certificate`` objects.
"""
certs = PEM_CERT_REGEX.findall(data)
return [load_pem_x509_certificate(cert[0]) for cert in certs]
def load_certificate_list_from_file(filename):
"""
Load a certificate list from a PEM file.
Return a list of python-cryptography ``Certificate`` objects.
"""
with open(filename, 'rb') as f:
return load_certificate_list(f.read())
def load_private_key_list(data, password=None):
"""
Load a private key list from a sequence of concatenated PEMs.
:param data: bytes containing the private keys
:param password: bytes, the password to encrypted keys in the bundle
:returns: List of python-cryptography ``PrivateKey`` objects
"""
crypto_backend = default_backend()
priv_keys = []
for match in re.finditer(PEM_PRIV_REGEX, data):
if re.search(b"ENCRYPTED", match.group()) is not None:
if password is None:
raise RuntimeError("Password is required for the encrypted "
"keys in the bundle.")
# Load private key as encrypted
priv_keys.append(
load_pem_private_key(match.group(), password,
backend=crypto_backend))
else:
priv_keys.append(
load_pem_private_key(match.group(), None,
backend=crypto_backend))
return priv_keys
def pkcs7_to_certs(data, datatype=PEM):
"""
Extract certificates from a PKCS #7 object.
:returns: a ``list`` of ``IPACertificate`` objects.
"""
if datatype == PEM:
match = re.match(
br'-----BEGIN PKCS7-----(.*?)-----END PKCS7-----',
data,
re.DOTALL)
if not match:
raise ValueError("not a valid PKCS#7 PEM")
data = base64.b64decode(match.group(1))
content_info, tail = decoder.decode(data, rfc2315.ContentInfo())
if tail:
raise ValueError("not a valid PKCS#7 message")
if content_info['contentType'] != rfc2315.signedData:
raise ValueError("not a PKCS#7 signed data message")
signed_data, tail = decoder.decode(bytes(content_info['content']),
rfc2315.SignedData())
if tail:
raise ValueError("not a valid PKCS#7 signed data message")
result = []
for certificate in signed_data['certificates']:
certificate = encoder.encode(certificate)
certificate = load_der_x509_certificate(certificate)
result.append(certificate)
return result
def validate_pem_x509_certificate(cert):
"""
Perform cert validation by trying to load it via python-cryptography.
"""
try:
load_pem_x509_certificate(cert)
except ValueError as e:
raise errors.CertificateFormatError(error=str(e))
def validate_der_x509_certificate(cert):
"""
Perform cert validation by trying to load it via python-cryptography.
"""
try:
load_der_x509_certificate(cert)
except ValueError as e:
raise errors.CertificateFormatError(error=str(e))
def write_certificate(cert, filename):
"""
Write the certificate to a file in PEM format.
:param cert: cryptograpy ``Certificate`` object
"""
try:
with open(filename, 'wb') as fp:
fp.write(cert.public_bytes(Encoding.PEM))
except (IOError, OSError) as e:
raise errors.FileError(reason=str(e))
def write_certificate_list(certs, filename, mode=None):
"""
Write a list of certificates to a file in PEM format.
:param certs: a list of IPACertificate objects to be written to a file
:param filename: a path to the file the certificates should be written into
"""
try:
with open(filename, 'wb') as f:
if mode is not None:
os.fchmod(f.fileno(), mode)
for cert in certs:
f.write(cert.public_bytes(Encoding.PEM))
except (IOError, OSError) as e:
raise errors.FileError(reason=str(e))
def write_pem_private_key(priv_key, filename, passwd=None):
"""
Write a private key to a file in PEM format. Will force 0x600 permissions
on file.
:param priv_key: cryptography ``PrivateKey`` object
:param passwd: ``bytes`` representing the password to store the
private key with
"""
if passwd is not None:
enc_alg = serialization.BestAvailableEncryption(passwd)
else:
enc_alg = serialization.NoEncryption()
try:
with open(filename, 'wb') as fp:
os.fchmod(fp.fileno(), 0o600)
fp.write(priv_key.private_bytes(
Encoding.PEM,
PrivateFormat.PKCS8,
encryption_algorithm=enc_alg))
except (IOError, OSError) as e:
raise errors.FileError(reason=str(e))
class _PrincipalName(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('name-type', univ.Integer().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
),
namedtype.NamedType('name-string', univ.SequenceOf(char.GeneralString()).subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))
),
)
class _KRB5PrincipalName(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('realm', char.GeneralString().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
),
namedtype.NamedType('principalName', _PrincipalName().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))
),
)
def _decode_krb5principalname(data):
principal = decoder.decode(data, asn1Spec=_KRB5PrincipalName())[0]
realm = (unicode(principal['realm']).replace('\\', '\\\\')
.replace('@', '\\@'))
name = principal['principalName']['name-string']
name = u'/'.join(unicode(n).replace('\\', '\\\\')
.replace('/', '\\/')
.replace('@', '\\@') for n in name)
name = u'%s@%s' % (name, realm)
return name
class KRB5PrincipalName(crypto_x509.general_name.OtherName):
def __init__(self, type_id, value):
super(KRB5PrincipalName, self).__init__(type_id, value)
self.name = _decode_krb5principalname(value)
class UPN(crypto_x509.general_name.OtherName):
def __init__(self, type_id, value):
super(UPN, self).__init__(type_id, value)
self.name = unicode(
decoder.decode(value, asn1Spec=char.UTF8String())[0])
OTHERNAME_CLASS_MAP = {
SAN_KRB5PRINCIPALNAME: KRB5PrincipalName,
SAN_UPN: UPN,
}
def process_othernames(gns):
"""
Process python-cryptography GeneralName values, yielding
OtherName values of more specific type if type is known.
"""
for gn in gns:
if isinstance(gn, crypto_x509.general_name.OtherName):
cls = OTHERNAME_CLASS_MAP.get(
gn.type_id.dotted_string,
crypto_x509.general_name.OtherName)
yield cls(gn.type_id, gn.value)
else:
yield gn
def _pyasn1_to_cryptography_directoryname(dn):
attrs = []
# Name is CHOICE { RDNSequence } (only one possibility)
for rdn in dn.getComponent():
for ava in rdn:
attr = crypto_x509.NameAttribute(
_pyasn1_to_cryptography_oid(ava['type']),
unicode(decoder.decode(ava['value'])[0])
)
attrs.append(attr)
return crypto_x509.DirectoryName(crypto_x509.Name(attrs))
def _pyasn1_to_cryptography_registeredid(oid):
return crypto_x509.RegisteredID(_pyasn1_to_cryptography_oid(oid))
def _pyasn1_to_cryptography_ipaddress(octet_string):
return crypto_x509.IPAddress(
ipaddress.ip_address(bytes(octet_string)))
def _pyasn1_to_cryptography_othername(on):
return crypto_x509.OtherName(
_pyasn1_to_cryptography_oid(on['type-id']),
bytes(on['value'])
)
def _pyasn1_to_cryptography_oid(oid):
return crypto_x509.ObjectIdentifier(str(oid))
def chunk(size, s):
"""Yield chunks of the specified size from the given string.
The input must be a multiple of the chunk size (otherwise
trailing characters are dropped).
Works on character strings only.
"""
return (u''.join(span) for span in six.moves.zip(*[iter(s)] * size))
def add_colons(s):
"""Add colons between each nibble pair in a hex string."""
return u':'.join(chunk(2, s))
def to_hex_with_colons(bs):
"""Convert bytes to a hex string with colons."""
return add_colons(binascii.hexlify(bs).decode('utf-8'))
class UTC(datetime.tzinfo):
ZERO = datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def utcoffset(self, dt):
return self.ZERO
def dst(self, dt):
return self.ZERO
def format_datetime(t):
if t.tzinfo is None:
t = t.replace(tzinfo=UTC())
return unicode(t.strftime("%a %b %d %H:%M:%S %Y %Z"))
class ExternalCAType(enum.Enum):
GENERIC = 'generic'
MS_CS = 'ms-cs'
class ExternalCAProfile:
"""
An external CA profile configuration. Currently the only
subclasses are for Microsoft CAs, for providing data in the
"Certificate Template" extension.
Constructing this class will actually return an instance of a
subclass.
Subclasses MUST set ``valid_for``.
"""
def __init__(self, s=None):
self.unparsed_input = s
# Which external CA types is the data valid for?
# A set of VALUES of the ExternalCAType enum.
valid_for = set()
def __new__(cls, s=None):
"""Construct the ExternalCAProfile value.
Return an instance of a subclass determined by
the format of the argument.
"""
# we are directly constructing a subclass; instantiate
# it and be done
if cls is not ExternalCAProfile:
return super(ExternalCAProfile, cls).__new__(cls)
# construction via the base class; therefore the string
# argument is required, and is used to determine which
# subclass to construct
if s is None:
raise ValueError('string argument is required')
parts = s.split(':')
try:
# Is the first part on OID?
_oid = univ.ObjectIdentifier(parts[0])
# It is; construct a V2 template
# pylint: disable=too-many-function-args
return MSCSTemplateV2.__new__(MSCSTemplateV2, s)
except pyasn1.error.PyAsn1Error:
# It is not an OID; treat as a template name
# pylint: disable=too-many-function-args
return MSCSTemplateV1.__new__(MSCSTemplateV1, s)
def __getstate__(self):
return self.unparsed_input
def __setstate__(self, state):
# explicitly call __init__ method to initialise object
self.__init__(state)
class MSCSTemplate(ExternalCAProfile):
"""
An Microsoft AD-CS Template specifier.
Subclasses MUST set ext_oid.
Subclass constructors MUST set asn1obj.
"""
valid_for = set([ExternalCAType.MS_CS.value])
ext_oid = None # extension OID, as a Python str
asn1obj = None # unencoded extension data
def get_ext_data(self):
"""Return DER-encoded extension data."""
return encoder.encode(self.asn1obj)
class MSCSTemplateV1(MSCSTemplate):
"""
A v1 template specifier, per
https://msdn.microsoft.com/en-us/library/cc250011.aspx.
::
CertificateTemplateName ::= SEQUENCE {
Name UTF8String
}
But note that a bare BMPString is used in practice.
"""
ext_oid = "1.3.6.1.4.1.311.20.2"
def __init__(self, s):
super(MSCSTemplateV1, self).__init__(s)
parts = s.split(':')
if len(parts) > 1:
raise ValueError(
"Cannot specify certificate template version when using name.")
self.asn1obj = char.BMPString(str(parts[0]))
class MSCSTemplateV2(MSCSTemplate):
"""
A v2 template specifier, per
https://msdn.microsoft.com/en-us/library/windows/desktop/aa378274(v=vs.85).aspx
::
CertificateTemplate ::= SEQUENCE {
templateID EncodedObjectID,
templateMajorVersion TemplateVersion,
templateMinorVersion TemplateVersion OPTIONAL
}
TemplateVersion ::= INTEGER (0..4294967295)
"""
ext_oid = "1.3.6.1.4.1.311.21.7"
@staticmethod
def check_version_in_range(desc, n):
if n < 0 or n >= 2**32:
raise ValueError(
"Template {} version must be in range 0..4294967295"
.format(desc))
def __init__(self, s):
super(MSCSTemplateV2, self).__init__(s)
parts = s.split(':')
obj = CertificateTemplateV2()
if len(parts) < 2 or len(parts) > 3:
raise ValueError(
"Incorrect template specification; required format is: "
"<oid>:<majorVersion>[:<minorVersion>]")
try:
obj['templateID'] = univ.ObjectIdentifier(parts[0])
major = int(parts[1])
self.check_version_in_range("major", major)
obj['templateMajorVersion'] = major
if len(parts) > 2:
minor = int(parts[2])
self.check_version_in_range("minor", minor)
obj['templateMinorVersion'] = int(parts[2])
except pyasn1.error.PyAsn1Error:
raise ValueError("Could not parse certificate template specifier.")
self.asn1obj = obj
class CertificateTemplateV2(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('templateID', univ.ObjectIdentifier()),
namedtype.NamedType('templateMajorVersion', univ.Integer()),
namedtype.OptionalNamedType('templateMinorVersion', univ.Integer())
)
|
gpl-3.0
| 5,514,516,305,728,191,000
| 30.077007
| 89
| 0.621191
| false
| 3.898898
| false
| false
| false
|
beiko-lab/gengis
|
bin/Lib/site-packages/numpy/core/tests/test_errstate.py
|
1
|
1930
|
# The following exec statement (or something like it) is needed to
# prevent SyntaxError on Python < 2.5. Even though this is a test,
# SyntaxErrors are not acceptable; on Debian systems, they block
# byte-compilation during install and thus cause the package to fail
# to install.
import sys
if sys.version_info[:2] >= (2, 5):
exec """
from __future__ import with_statement
import platform
from numpy.core import *
from numpy.random import rand, randint
from numpy.testing import *
class TestErrstate(TestCase):
@dec.skipif(platform.machine() == "armv5tel", "See gh-413.")
def test_invalid(self):
with errstate(all='raise', under='ignore'):
a = -arange(3)
# This should work
with errstate(invalid='ignore'):
sqrt(a)
# While this should fail!
try:
sqrt(a)
except FloatingPointError:
pass
else:
self.fail("Did not raise an invalid error")
def test_divide(self):
with errstate(all='raise', under='ignore'):
a = -arange(3)
# This should work
with errstate(divide='ignore'):
a // 0
# While this should fail!
try:
a // 0
except FloatingPointError:
pass
else:
self.fail("Did not raise divide by zero error")
def test_errcall(self):
def foo(*args):
print(args)
olderrcall = geterrcall()
with errstate(call=foo):
assert_(geterrcall() is foo, 'call is not foo')
with errstate(call=None):
assert_(geterrcall() is None, 'call is not None')
assert_(geterrcall() is olderrcall, 'call is not olderrcall')
"""
if __name__ == "__main__":
run_module_suite()
|
gpl-3.0
| 8,773,401,320,805,839,000
| 29.639344
| 69
| 0.542487
| false
| 4.177489
| true
| false
| false
|
BuzzFeedNews/nics-firearm-background-checks
|
scripts/chart-total-checks-36-months.py
|
1
|
1215
|
#!/usr/bin/env python
import sys, os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import StrMethodFormatter
from matplotlib.dates import MonthLocator
import seaborn as sb
sb.set()
checks = (
pd.read_csv(sys.stdin)
.assign(
month_dt = lambda df: pd.to_datetime(df["month"], format = "%Y-%m")
)
)
checks["year_int"] = checks["month"].apply(lambda x: int(x.split("-")[0]))
checks["month_int"] = checks["month"].apply(lambda x: int(x.split("-")[1]))
latest_month_count = (
checks
.iloc[0]
.pipe(lambda x: x["month_int"] + (x["year_int"] * 12))
)
totals = (
checks
.loc[lambda df: (df["month_int"] + (df["year_int"] * 12))
> (latest_month_count - 12*3)]
.groupby("month_dt")
["totals"]
.sum()
)
ax = totals.plot(kind="area", figsize=(12, 8), color="#000000", alpha=0.5)
ax.figure.set_facecolor("#FFFFFF")
ax.set_title(
"NICS Background Check Totals — Past 36 Months",
fontsize=24
)
plt.setp(ax.get_yticklabels(), fontsize=12)
ax.yaxis.set_major_formatter(StrMethodFormatter("{x:,.0f}"))
ax.xaxis.set_minor_locator(MonthLocator(range(1, 13)))
ax.set_xlabel("")
plt.savefig(sys.stdout.buffer)
|
mit
| -1,528,021,792,744,747,500
| 23.734694
| 75
| 0.644389
| false
| 2.885714
| false
| true
| false
|
Universal-Model-Converter/UMC3.0a
|
dev tests and files/data backups/GUI_update.py
|
1
|
17618
|
#1
#v 0.001
#I don't have a TOC here yet as everything constantly changes
import COMMON #file vars and functions for import/export processing
import VIEWER #mainly for the toggles
from VIEWER import __GL,__GLU #GL functions
from VIEWER import __pyg
'''
from COMMON import Scripts
#Shapes (private)
#Widgets (private)
def Button(Text,X,Y,W,H,): pass
def Browser():
import os
Dir='C:/'; done=0
clicked = 0
while not done:
items = os.listdir(Dir)
cancel = Button('Cancel')
if not cancel:
if Button('..'):
Dir
else: #need a better RT method >_>
#TODO: parse the list and collect info first
for item in items:
if Button(item): #draw Clicked button
clicked=1
else: #draw unclicked button
if clicked: #action
clicked=0
if os.path.isdir(Dir+item):
Dir+=(item+'/')
else:
done=1
return Dir+item
else:
pass
else:
done=1
return None
'''
#the GL selection/feedback buffers are a bit complicated for me,
#so I've defined my own method derived from GL. (should be slightly faster than re-defining everything)
#this method compaires the hitdefs with the current selection and changes the state of a valid hit
W_States = {} #this stores the mouse state for the current widget
#further state processing can be done by the widget itself.
# { name: [L,M,R,O] } #O - mouseOver
W_Info = {} #this stores the state info of each widget
#this determines weather a toggle is active, or a selection has yet to be made
__UpdateHits=True #allow for hit updates
W_HitDefs = {} #this stores the hit-area for each widget
#this is constantly cleared and updated during state changes
# { name: [X1,Y1,X2,Y2] }
pw,ph = 1.0/800,1.0/600
#-----------------------------------
#I/O process functions
def __ImportModel():
pass
def __ExportModel():
pass
def __ImportAnim():
pass
def __ExportAnim():
pass
def __Browser(Scripts): #overlays GUI when activated (Clears hit-defs to avoid improper activation)
#return file_path, Module
pass
#-----------------------------------
#widget resources
FontSize=0
def __font(x,y,size,text,color=(0,0,0,255)):
global pw,ph,FontSize
#__GL.glEnable(__GL.GL_TEXTURE_2D)
#Create Font
#to increase performance, only create a new font when changing the size
if size != FontSize: F=__pyg.font.Font('fonts/tahoma.ttf',size) #don't use .fon files
w,h=F.size(text)
#_w,_h=1,1 #GL-modified width/height (binary multiple)
#while _w<w: _w<<=1
#while _h<h: _h<<=1
#fsurf=__pyg.Surface((w,h),__pyg.SRCALPHA)
#fsurf.blit(__pyg.transform.flip(F.render(text,True,color), False, True),(0,0)) #Create GL-Font Image
#w,h=fsurf.get_size()
image=__pyg.transform.flip(F.render(text,True,color), False, True).get_buffer().raw #get raw pixel data
# Create Texture __GL.glGenTextures(1)
'''__GL.glBindTexture(__GL.GL_TEXTURE_2D, 0) # 2d texture (x and y size)
__GL.glPixelStorei(__GL.GL_UNPACK_ALIGNMENT,1)
__GL.glTexImage2D(__GL.GL_TEXTURE_2D, 0, 3, _w, _h, 0, __GL.GL_BGRA, __GL.GL_UNSIGNED_BYTE, image)
__GL.glTexParameterf(__GL.GL_TEXTURE_2D, __GL.GL_TEXTURE_WRAP_S, __GL.GL_CLAMP)
__GL.glTexParameterf(__GL.GL_TEXTURE_2D, __GL.GL_TEXTURE_WRAP_T, __GL.GL_CLAMP)
__GL.glTexParameterf(__GL.GL_TEXTURE_2D, __GL.GL_TEXTURE_WRAP_S, __GL.GL_REPEAT)
__GL.glTexParameterf(__GL.GL_TEXTURE_2D, __GL.GL_TEXTURE_WRAP_T, __GL.GL_REPEAT)
__GL.glTexParameterf(__GL.GL_TEXTURE_2D, __GL.GL_TEXTURE_MAG_FILTER, __GL.GL_NEAREST)
__GL.glTexParameterf(__GL.GL_TEXTURE_2D, __GL.GL_TEXTURE_MIN_FILTER, __GL.GL_NEAREST)
__GL.glTexEnvf(__GL.GL_TEXTURE_ENV, __GL.GL_TEXTURE_ENV_MODE, __GL.GL_DECAL)
w*=pw; h*=ph
__GL.glBegin(__GL.GL_QUADS)
__GL.glColor4f(0.0,0.0,0.0,color[3]*(1.0/255))
__GL.glVertex2f(x,y); __GL.glTexCoord2f(0.0,0.0)
__GL.glVertex2f(x+w,y); __GL.glTexCoord2f(1.0,0.0)
__GL.glVertex2f(x+w,y+h); __GL.glTexCoord2f(1.0,1.0)
__GL.glVertex2f(x,y+h); __GL.glTexCoord2f(0.0,1.0)
__GL.glEnd()'''
__GL.glRasterPos2f(float(x)*pw if type(x)==int else x ,
float(y+h)*ph if type(y)==int else y+(h*ph) )
__GL.glDrawPixels(w,h,__GL.GL_BGRA,__GL.GL_UNSIGNED_BYTE,image)
del(image) #remove the old buffer
#__GL.glDisable(__GL.GL_TEXTURE_2D)
#-----------------------------------
#internal widgets (bound to change)
def __DropBox(X,Y,W,Na,Items,Def=0,Text=''):
global W_States,W_Info,W_HitDefs,__UpdateHits
global pw,ph
X2,Y2 = X+(pw*(W*10)),Y+(ph*20)
#Widget init info
try: W_States[Na]
except KeyError:
W_States.update({Na:[0,0,0,False]})
W_Info.update({Na:[Def,False]})
if __UpdateHits: W_HitDefs.update({Na:[X,Y,X2+(pw*15),Y2]})
#Widget logic
L,M,R,O = W_States[Na]
if L==2:
W_Info[Na][1]=True
W_States[Na][0]=0
State = W_Info[Na]
__GL.glBegin(__GL.GL_QUADS)
__GL.glColor4f(1.0,1.0,1.0,0.25)
__GL.glVertex2f(X,Y)
__GL.glVertex2f(X2,Y)
__GL.glVertex2f(X2,Y2)
__GL.glVertex2f(X,Y2)
__GL.glColor4f(0.0,0.0,0.0,0.1)
__GL.glVertex2f(X2,Y)
__GL.glVertex2f(X2+(pw*15),Y)
__GL.glVertex2f(X2+(pw*15),Y2)
__GL.glVertex2f(X2,Y2)
__GL.glEnd()
__font(X+(5*pw),Y+(2*ph),12,Na,(0,0,0,100))
if State[1]:
W_HitDefs={}
__UpdateHits=False #prevent hit updates from other widgets
#once we've made our selection, we can then allow hit updates
remove=False
for i,v in enumerate(Items):
#we have to create custom widget defs for each entry here
N = '%s_%s_Sel%i'%(Na,v,i) #Na+v+'_Sel'+str(i)
x1,y1,x2,y2=X,Y+((Y2-Y)*(i+1)),X2,Y2+((Y2-Y)*(i+1))
try: W_States[N]
except KeyError: W_States.update({N:[0,0,0,False]}) #mouse updates
W_HitDefs.update({N:[x1,y1,x2,y2]})
#these should be the only hits avaliable
l,m,r,o = W_States[N]
#all we need to worry about here, is the state, and the hit-def
if o: __GL.glColor4f(0.375,0.375,0.375,0.75)
else: __GL.glColor4f(0.0,0.0,0.0,0.5)
__GL.glBegin(__GL.GL_QUADS)
__GL.glVertex2f(x1,y1)
__GL.glVertex2f(x2,y1)
__GL.glVertex2f(x2,y2)
__GL.glVertex2f(x1,y2)
__GL.glEnd()
__font(x1+(5*pw),y1+(2*ph),12,v,(200,200,200,100))
if l==2:
W_Info[Na]=[i,False] #State should not be an index
remove=True
if remove:
for i,v in enumerate(Items): #clear the buffers of these widgets
n = '%s_%s_Sel%i'%(Na,v,i)
W_States.pop(n)
W_HitDefs.pop(n)
__UpdateHits=True
return State[0]
def __TButton(X,Y,Na,St=False,Text=''):
global W_States,W_Info,W_HitDefs,__UpdateHits
global pw,ph
#Widget init info
try: W_States[Na]
except KeyError:
W_States.update({Na:[0,0,0,False]})
W_Info.update({Na:St})
if __UpdateHits: W_HitDefs.update({Na:[X,Y,X+(pw*20),Y+(ph*20)]})
#Widget logic
L,M,R,O = W_States[Na]
if L==2:
W_Info[Na]=(False if W_Info[Na] else True)
W_States[Na][0]=0
State = W_Info[Na]
if State: __GL.glColor4f(0.0,0.0,0.0,0.25)
else: __GL.glColor4f(0.0,0.0,0.0,0.1)
__GL.glBegin(__GL.GL_QUADS)
__GL.glVertex2f(X,Y)
__GL.glVertex2f(X+(pw*20),Y)
__GL.glVertex2f(X+(pw*20),Y+(ph*20))
__GL.glVertex2f(X,Y+(ph*20))
__GL.glEnd()
__font(X+(25*pw),Y+(2*ph),12,Text,(0,0,0,100))
return State
def __Button(X1,Y1,X2,Y2,Na,Text=''):
global pw,ph
def __BrowseBar(X1,Y1,W):
global pw,ph
#-----------------------------------
#panel drawing functions
def __ModelPanel():
global pw,ph
__BrowseBar(pw*10,ph*40,180)
def __AnimPanel():
global pw,ph
pass
def __DisplayPanel(X1,X2):
global pw,ph
VIEWER.TOGGLE_LIGHTING = __TButton(pw*(X1+11),ph*31,'EnLight',True,'Lighting')
VIEWER.TOGGLE_WIREFRAME = __TButton(pw*(X1+11),ph*56,'EnWire',False,'Wireframe')
VIEWER.TOGGLE_BONES = __DropBox(pw*(X1+11),ph*81,10,'Draw Bones',['None','Standard','Overlay (X-Ray)'],0)
#reversed drawing order here so fonts overlay properly
if VIEWER.TOGGLE_3D==2: VIEWER.TOGGLE_3D_MODE[1] = [1./60,1./120][__DropBox(pw*(X1+251),ph*81,5,'Freq (WIP)',['60hz','120hz'],0)]
if VIEWER.TOGGLE_3D==1: VIEWER.TOGGLE_3D_MODE[0] = __DropBox(pw*(X1+251),ph*81,5,'Colors',['R|GB','G|RB','B|RG'],0)
VIEWER.TOGGLE_3D = __DropBox(pw*(X1+131),ph*81,10,'3D Drawing',['Off','Analglyph','Shutter'],0)
VIEWER.TOGGLE_ORTHO = __DropBox(pw*(X1+131),ph*56,10,'Projection',['Perspective','Orthographic'],1)
VIEWER.TOGGLE_GRID = [2 if VIEWER.TOGGLE_GRID>2 else VIEWER.TOGGLE_GRID,3,4][
__DropBox(pw*(X1+131),ph*31,10,'Display',['Grid','Floor','Off'],0)]
#'''
def __ControlPanel(X1,X2):
global pw,ph
pass
#-----------------------------------
def __ExPanel(X1,Y1,X2,Y2,EB,Na,MX=0,MY=0,St=True): #returns current state for other panels
global W_States,W_Info,W_HitDefs,__UpdateHits
global pw,ph
#Widget init info
try: W_States[Na]
except KeyError:
W_States.update({Na:[0,0,0,False]})
W_Info.update({Na:St})
#Widget logic
L,M,R,O = W_States[Na]
if L==2:
W_Info[Na]=(False if W_Info[Na] else True)
W_States[Na][0]=0
State = W_Info[Na]
if State:
__GL.glBegin(__GL.GL_QUADS)
__GL.glColor4f(0.5,0.5,0.5,0.8) #model (left) panel
__GL.glVertex2f(X1,Y1)
__GL.glVertex2f(X1,Y2)
__GL.glVertex2f(X2,Y2)
__GL.glVertex2f(X2,Y1)
__GL.glEnd()
#60x15px rectangle
if EB==0: #top
EBX1,EBY1,EBX2,EBY2=(X1+((X2-X1)/2)-(pw*30)),Y1,(X1+((X2-X1)/2)+(pw*30)),Y1+(ph*15)
TPX1,TPY1 = EBX1+(pw*25),EBY1+(ph*5)
TPX2,TPY2 = EBX1+(pw*30),EBY1+(ph*10)
TPX3,TPY3 = EBX1+(pw*35),EBY1+(ph*5)
elif EB==1: #right
EBX1,EBY1,EBX2,EBY2=X2-(pw*15),((Y2-Y1)/2)-(ph*30),X2,((Y2-Y1)/2)+(ph*30)
TPX1,TPY1 = EBX1+(pw*10),EBY1+(ph*25)
TPX2,TPY2 = EBX1+(pw*5),EBY1+(ph*30)
TPX3,TPY3 = EBX1+(pw*10),EBY1+(ph*35)
elif EB==2: #bottom
EBX1,EBY1,EBX2,EBY2=(X1+((X2-X1)/2)-(pw*30)),Y2-(ph*15),(X1+((X2-X1)/2)+(pw*30)),Y2
TPX1,TPY1 = EBX1+(pw*25),EBY1+(ph*10)
TPX2,TPY2 = EBX1+(pw*30),EBY1+(ph*5)
TPX3,TPY3 = EBX1+(pw*35),EBY1+(ph*10)
elif EB==3: #left
EBX1,EBY1,EBX2,EBY2=X1,((Y2-Y1)/2)-(ph*30),X1+(pw*15),((Y2-Y1)/2)+(ph*30)
TPX1,TPY1 = EBX1+(pw*5),EBY1+(ph*25)
TPX2,TPY2 = EBX1+(pw*10),EBY1+(ph*30)
TPX3,TPY3 = EBX1+(pw*5),EBY1+(ph*35)
#is the panel expanded?
if not State:
if EB==0: #top
Eq=((Y2-Y1)-(ph*15))
EBY1,EBY2=EBY1+Eq,EBY2+Eq
TPY1,TPY2,TPY3=TPY1+(Eq+(ph*5)),TPY2+(Eq-(ph*5)),TPY3+(Eq+(ph*5))
elif EB==1: #right
Eq=((X2-X1)-(pw*15))
EBX1,EBX2=EBX1-Eq,EBX2-Eq
TPX1,TPX2,TPX3=TPX1-(Eq+(pw*5)),TPX2-(Eq-(pw*5)),TPX3-(Eq+(pw*5))
elif EB==2: #bottom
Eq=((Y2-Y1)-(ph*15))
EBY1,EBY2=EBY1-Eq,EBY2-Eq
TPY1,TPY2,TPY3=TPY1-(Eq+(ph*5)),TPY2-(Eq-(ph*5)),TPY3-(Eq+(ph*5))
elif EB==3: #left
Eq=((X2-X1)-(pw*15))
EBX1,EBX2=EBX1+Eq,EBX2+Eq
TPX1,TPX2,TPX3=TPX1+(Eq+(pw*5)),TPX2+(Eq-(pw*5)),TPX3+(Eq+(pw*5))
__GL.glColor4f(0.5,0.5,0.5,0.8)
__GL.glBegin(__GL.GL_QUADS) #(just the BG color behind the toggle button)
__GL.glVertex2f(EBX1+MX,EBY1+MY)
__GL.glVertex2f(EBX1+MX,EBY2+MY)
__GL.glVertex2f(EBX2+MX,EBY2+MY)
__GL.glVertex2f(EBX2+MX,EBY1+MY)
__GL.glEnd()
if __UpdateHits: W_HitDefs.update({Na:[EBX1+MX,EBY1+MY,EBX2+MX,EBY2+MY]})
__GL.glColor4f(0.0,0.0,0.0,0.2)
__GL.glBegin(__GL.GL_QUADS)
__GL.glVertex2f(EBX1+MX,EBY1+MY)
__GL.glVertex2f(EBX1+MX,EBY2+MY)
__GL.glVertex2f(EBX2+MX,EBY2+MY)
__GL.glVertex2f(EBX2+MX,EBY1+MY)
__GL.glEnd()
__GL.glBegin(__GL.GL_TRIANGLES)
__GL.glVertex2f(TPX1+MX,TPY1+MY)
__GL.glVertex2f(TPX2+MX,TPY2+MY)
__GL.glVertex2f(TPX3+MX,TPY3+MY)
__GL.glEnd()
return State
def __DrawGUI(w,h,RotMatrix): #called directly by the display function after drawing the scene
global pw,ph
#the GUI is drawn over the scene by clearing the depth buffer
pw,ph=1./w,1./h
global W_HitDefs
W_HitDefs = {} #clear the hitdefs to avoid improper activation
__GL.glMatrixMode(__GL.GL_PROJECTION)
__GL.glLoadIdentity()
#glOrtho(-2*P, 2*P, -2, 2, -100, 100)
__GLU.gluOrtho2D(0.0, 1.0, 1.0, 0.0) #TODO update the viewport with the pixel range instead of 1.0 (less GUI calculations will be needed)
__GL.glMatrixMode(__GL.GL_MODELVIEW)
__GL.glClear( __GL.GL_DEPTH_BUFFER_BIT )
__GL.glPolygonMode(__GL.GL_FRONT_AND_BACK,__GL.GL_FILL)
__GL.glLoadIdentity()
__GL.glEnable(__GL.GL_BLEND)
__GL.glDisable(__GL.GL_DEPTH_TEST)
__GL.glDisable(__GL.GL_TEXTURE_2D)
__GL.glDisable(__GL.GL_LIGHTING)
__GL.glBegin(__GL.GL_QUADS)
__GL.glColor4f(0.4,0.4,0.4,0.8) #options toggle
__GL.glVertex2f(pw*0,ph*0)
__GL.glVertex2f(pw*w,ph*0)
__GL.glVertex2f(pw*w,ph*20)
__GL.glVertex2f(pw*0,ph*20)
__GL.glEnd()
__GL.glColor4f(0.0,0.0,0.0,0.2)
__GL.glBegin(__GL.GL_TRIANGLES)
__GL.glVertex2f(pw*((w/2)-10),ph*6)
__GL.glVertex2f(pw*((w/2)+10),ph*6)
__GL.glVertex2f(pw*(w/2),ph*15)
__GL.glEnd()
M = __ExPanel(pw*0,ph*21,pw*210,ph*h,1,'MODEL')
if M: __ModelPanel()
A = __ExPanel(pw*(w-210),ph*21,pw*w,ph*h,3,'ANIM')
if A: __AnimPanel()
D = __ExPanel(pw*(211 if M else 1),ph*21,pw*(w-(211 if A else 1)),ph*150,2,'DSPL',(0 if M else pw*105)+(0 if A else pw*-105))
if D: __DisplayPanel(210 if M else 0,-210 if A else 0)
C = __ExPanel(pw*(211 if M else 1),ph*(h-150),pw*(w-(211 if A else 1)),ph*h,0,'CTRL',(0 if M else pw*105)+(0 if A else pw*-105))
if C: __ControlPanel(210 if M else 0,-210 if A else 0)
#__font(40,40,14,"testing",(128,0,0,100))
__GL.glDisable(__GL.GL_BLEND)
__GL.glEnable(__GL.GL_DEPTH_TEST)
#axis
__GL.glLineWidth(1.0)
__GL.glPushMatrix()
__GL.glTranslatef(pw*(228 if M else 17),ph*(h-(167 if C else 17)),0)
__GL.glScalef(pw*600,ph*600,1)
__GL.glMultMatrixf(RotMatrix)
__GL.glColor3f(1.0,0.0,0.0)
__GL.glBegin(__GL.GL_LINES); __GL.glVertex3f(0.0,0.0,0.0); __GL.glVertex3f(0.02,0.0,0.0); __GL.glEnd() #X
__GL.glTranslatef(0.0145,0.0,0.0); __GL.glRotatef(90, 0.0, 1.0, 0.0)
#__GLUT.glutSolidCone(0.003, 0.011, 8, 1)
__GL.glRotatef(-90, 0.0, 1.0, 0.0); __GL.glTranslatef(-0.0145,0.0,0.0)
__GL.glColor3f(0.0,1.0,0.0)
__GL.glBegin(__GL.GL_LINES); __GL.glVertex3f(0.0,0.0,0.0); __GL.glVertex3f(0.0,-0.02,0.0); __GL.glEnd() #Y
__GL.glTranslatef(0.0,-0.0145,0.0); __GL.glRotatef(90, 1.0, 0.0, 0.0)
#__GLUT.glutSolidCone(0.003, 0.011, 8, 1)
__GL.glRotatef(-90, 1.0, 0.0, 0.0); __GL.glTranslatef(0.0,0.0145,0.0)
__GL.glColor3f(0.0,0.0,1.0)
__GL.glBegin(__GL.GL_LINES); __GL.glVertex3f(0.0,0.0,0.0); __GL.glVertex3f(0.0,0.0,0.02); __GL.glEnd() #Z
__GL.glTranslatef(0.0,0.0,0.0145)
#__GLUT.glutSolidCone(0.003, 0.011, 8, 1)
__GL.glTranslatef(0.0,0.0,-0.0145)
__GL.glColor3f(0.5,0.5,0.5) ; #__GLUT.glutSolidSphere(0.003, 8, 4)
__GL.glPopMatrix()
lastHit = [0,False] #last hit record to be compaired with current hit record [ button, state ]
def __CheckHit(b,x,y,s): #checks if the hit (click) executes a command
L,M,R,U,D=range(1,6)
for name in W_HitDefs: #we currently want to concentrait on if we have a hit (o is not handled here)
X1,Y1,X2,Y2 = W_HitDefs[name] #Hit Area
l,m,r,o = W_States[name] #we only want the release states to last 1 frame
if X1<x<X2 and Y1<y<Y2: #are we in the hit area of this widget?
#if we have our hit, then we can updte the name state of our hit
if b==L:
if s: W_States[name][0]=1 #we have clicked
else: W_States[name][0]=2 #we have released
if b==M:
if s: W_States[name][1]=1 #we have clicked
else: W_States[name][1]=2 #we have released
if b==R:
if s: W_States[name][2]=1 #we have clicked
else: W_States[name][2]=2 #we have released
else: #do we have any states to clean up?
#this would happen if we click a widget, then move out of it's area
if l==1: W_States[name][0]=0
if m==1: W_States[name][1]=0
if r==1: W_States[name][2]=0
#release states are to be taken care of by the widget.
def __CheckPos(x,y): #checks the new mouse position when moved
import sys
for name in W_HitDefs: #we want to concentrait on if we're over a hit area
X1,Y1,X2,Y2 = W_HitDefs[name] #Hit Area
#are we in the hit area of this widget?
if X1<x<X2 and Y1<y<Y2: W_States[name][3]=True
else: W_States[name][3]=False
def __initGUI():
__pyg.font.init()
|
mit
| 511,102,529,398,882,100
| 32.686424
| 142
| 0.564082
| false
| 2.511833
| false
| false
| false
|
CloudBrewery/duplicity-swiftkeys
|
duplicity/backends/swiftbackend.py
|
1
|
4654
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright 2013 Matthieu Huin <mhu@enovance.com>
#
# This file is part of duplicity.
#
# Duplicity is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# Duplicity is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with duplicity; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
import duplicity.backend
from duplicity import log
from duplicity import util
from duplicity.errors import BackendException
class SwiftBackend(duplicity.backend.Backend):
"""
Backend for Swift
"""
def __init__(self, parsed_url):
try:
from swiftclient import Connection
from swiftclient import ClientException
except ImportError:
raise BackendException("This backend requires "
"the python-swiftclient library.")
self.resp_exc = ClientException
conn_kwargs = {}
# if the user has already authenticated
if 'SWIFT_PREAUTHURL' in os.environ and 'SWIFT_PREAUTHTOKEN' in os.environ:
conn_kwargs['preauthurl'] = os.environ['SWIFT_PREAUTHURL']
conn_kwargs['preauthtoken'] = os.environ['SWIFT_PREAUTHTOKEN']
else:
if 'SWIFT_USERNAME' not in os.environ:
raise BackendException('SWIFT_USERNAME environment variable '
'not set.')
if 'SWIFT_PASSWORD' not in os.environ:
raise BackendException('SWIFT_PASSWORD environment variable '
'not set.')
if 'SWIFT_AUTHURL' not in os.environ:
raise BackendException('SWIFT_AUTHURL environment variable '
'not set.')
conn_kwargs['user'] = os.environ['SWIFT_USERNAME']
conn_kwargs['key'] = os.environ['SWIFT_PASSWORD']
conn_kwargs['authurl'] = os.environ['SWIFT_AUTHURL']
if 'SWIFT_AUTHVERSION' in os.environ:
conn_kwargs['auth_version'] = os.environ['SWIFT_AUTHVERSION']
else:
conn_kwargs['auth_version'] = '1'
if 'SWIFT_TENANTNAME' in os.environ:
conn_kwargs['tenant_name'] = os.environ['SWIFT_TENANTNAME']
self.container = parsed_url.path.lstrip('/')
container_metadata = None
try:
self.conn = Connection(**conn_kwargs)
container_metadata = self.conn.head_container(self.container)
except ClientException:
pass
except Exception as e:
log.FatalError("Connection failed: %s %s"
% (e.__class__.__name__, str(e)),
log.ErrorCode.connection_failed)
if container_metadata is None:
log.Info("Creating container %s" % self.container)
try:
self.conn.put_container(self.container)
except Exception as e:
log.FatalError("Container creation failed: %s %s"
% (e.__class__.__name__, str(e)),
log.ErrorCode.connection_failed)
def _error_code(self, operation, e):
if isinstance(e, self.resp_exc):
if e.http_status == 404:
return log.ErrorCode.backend_not_found
def _put(self, source_path, remote_filename):
self.conn.put_object(self.container, remote_filename,
file(source_path.name))
def _get(self, remote_filename, local_path):
headers, body = self.conn.get_object(self.container, remote_filename)
with open(local_path.name, 'wb') as f:
for chunk in body:
f.write(chunk)
def _list(self):
headers, objs = self.conn.get_container(self.container)
return [ o['name'] for o in objs ]
def _delete(self, filename):
self.conn.delete_object(self.container, filename)
def _query(self, filename):
sobject = self.conn.head_object(self.container, filename)
return {'size': int(sobject['content-length'])}
duplicity.backend.register_backend("swift", SwiftBackend)
|
gpl-2.0
| -4,472,042,862,085,545,000
| 37.46281
| 83
| 0.603997
| false
| 4.181491
| false
| false
| false
|
google/ci_edit
|
tools/checkSpelling.py
|
1
|
3269
|
#!/usr/bin/env python3
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import glob
import io
import os
import pprint
import re
import sys
from fnmatch import fnmatch
ciEditDir = os.path.dirname(os.path.dirname(__file__))
sys.path.append(ciEditDir)
import app.regex
import app.spelling
print("checking spelling")
doValues = False
root = (len(sys.argv) > 1 and sys.argv[1]) or "."
filePattern = (len(sys.argv) > 2 and sys.argv[2]) or "*.*"
kReWords = re.compile(r"""(\w+)""")
# The first group is a hack to allow upper case pluralized, e.g. URLs.
kReSubwords = re.compile(
r"((?:[A-Z]{2,}s\b)|(?:[A-Z][a-z]+)|(?:[A-Z]+(?![a-z]))|(?:[a-z]+))"
)
kReIgnoreDirs = re.compile(r"""/\.git/""")
kReIgnoreFiles = re.compile(
r"""\.(pyc|pyo|png|a|jpg|tif|mp3|mp4|cpuperf|dylib|avi|so|plist|raw|webm)$"""
)
kReIncludeFiles = re.compile(r"""\.(cc)$""")
assert kReIgnoreDirs.search("/apple/.git/orange")
assert kReIgnoreFiles.search("/apple.pyc")
dictionaryList = glob.glob(os.path.join(ciEditDir, "app/dictionary.*.words"))
dictionaryList = [os.path.basename(i)[11:-6] for i in dictionaryList]
print(pprint.pprint(dictionaryList))
pathPrefs = []
dictionary = app.spelling.Dictionary(dictionaryList, pathPrefs)
assert dictionary.is_correct(u"has", "cpp")
def handle_file(fileName, unrecognizedWords):
# print(fileName, end="")
try:
with io.open(fileName, "r") as f:
data = f.read()
if not data:
return
for sre in kReSubwords.finditer(data):
# print(repr(sre.groups()))
word = sre.groups()[0].lower()
if not dictionary.is_correct(word, "cpp"):
if word not in unrecognizedWords:
print(word, end=",")
unrecognizedWords.add(word)
except UnicodeDecodeError:
print("Error decoding:", fileName)
def walk_tree(root):
unrecognizedWords = set()
for (dirPath, dirNames, fileNames) in os.walk(root):
if kReIgnoreDirs.search(dirPath):
continue
for fileName in filter(lambda x: fnmatch(x, filePattern), fileNames):
if kReIgnoreFiles.search(fileName):
continue
if kReIncludeFiles.search(fileName):
handle_file(os.path.join(dirPath, fileName), unrecognizedWords)
if unrecognizedWords:
print("found", fileName)
print(unrecognizedWords)
print()
return unrecognizedWords
if os.path.isfile(root):
print(handle_file(root))
elif os.path.isdir(root):
words = sorted(walk_tree(root))
for i in words:
print(i)
else:
print("root is not a file or directory")
print("---- end ----")
|
apache-2.0
| 4,742,552,453,476,556,000
| 30.432692
| 81
| 0.649128
| false
| 3.496257
| false
| false
| false
|
nathanielksmith/prosaicweb
|
prosaicweb/__init__.py
|
1
|
2338
|
# prosaicweb
# Copyright (C) 2016 nathaniel smith
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
from . import views
from .models import Base, engine
from .views.auth import login, logout, register
from .app import app, bcrypt
routes = [
# TODO
# because html is dumb and forms can only use post/get, that's all we take
# here. However, within each view function, we check for a _method on a
# POST and treat that as the method. This should really be handled by a
# middleware.
('/', 'index', views.index, {}),
('/generate', 'generate', views.generate, {'methods': ['GET', 'POST']}),
('/corpora', 'corpora', views.corpora, {'methods': ['GET', 'POST',]}),
('/sources', 'sources', views.sources, {'methods': ['GET', 'POST',]}),
('/sources/<source_id>', 'source', views.source,
{'methods': ['GET', 'POST']}),
('/corpora/<corpus_id>', 'corpus', views.corpus,
{'methods': ['GET', 'POST']}),
('/phrases', 'phrases', views.phrases, {'methods': ['POST']}),
('/templates', 'templates', views.templates, {'methods': ['GET', 'POST']}),
('/templates/<template_id>', 'template', views.template,
{'methods': ['GET', 'POST']}),
('/auth/login', 'login', login, {'methods': ['POST']}),
('/auth/register', 'register', register, {'methods':['GET', 'POST']}),
('/auth/logout', 'logout', logout, {}),
]
for [route, name, fn, opts] in routes:
app.add_url_rule(route, name, fn, **opts)
def main() -> None:
if len(sys.argv) > 1 and sys.argv[1] == 'dbinit':
print('initializing prosaic and prosaicweb database state...')
Base.metadata.create_all(bind=engine)
exit(0)
app.run()
if __name__ == '__main__':
main()
|
agpl-3.0
| 1,900,292,323,592,605,700
| 39.310345
| 79
| 0.640719
| false
| 3.608025
| false
| false
| false
|
liqin75/vse-vpnaas-plugin
|
quantum/plugins/vmware/vshield/plugin.py
|
1
|
14836
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from quantum.db import api as qdbapi
from quantum.db import model_base
from quantum.db.loadbalancer import loadbalancer_db
from quantum.db import firewall_db as fw_db
from quantum.extensions import loadbalancer
from quantum.openstack.common import log as logging
from quantum.plugins.common import constants
from vseapi import VseAPI
from lbapi import LoadBalancerAPI
from fwapi import FirewallAPI
LOG = logging.getLogger(__name__)
edgeUri = 'https://fank-dev2.eng.vmware.com'
edgeId = 'edge-27'
edgeUser = 'admin'
edgePasswd = 'default'
class VShieldEdgeLBPlugin(loadbalancer_db.LoadBalancerPluginDb):
"""
Implementation of the Quantum Loadbalancer Service Plugin.
This class manages the workflow of LBaaS request/response.
Most DB related works are implemented in class
loadbalancer_db.LoadBalancerPluginDb.
"""
supported_extension_aliases = ["lbaas"]
def __init__(self):
"""
Do the initialization for the loadbalancer service plugin here.
"""
# Hard coded for now
vseapi = VseAPI(edgeUri, edgeUser, edgePasswd, edgeId)
self.vselb = LoadBalancerAPI(vseapi)
qdbapi.register_models(base=model_base.BASEV2)
def get_plugin_type(self):
return constants.LOADBALANCER
def get_plugin_description(self):
return "Quantum LoadBalancer Service Plugin"
def create_vip(self, context, vip):
with context.session.begin(subtransactions=True):
v = super(VShieldEdgeLBPlugin, self).create_vip(context, vip)
self.update_status(context, loadbalancer_db.Vip, v['id'],
constants.PENDING_CREATE)
LOG.debug(_("Create vip: %s") % v['id'])
self.vselb.create_vip(context, v)
self.update_status(context, loadbalancer_db.Vip, v['id'],
constants.ACTIVE)
# If we adopt asynchronous mode, this method should return immediately
# and let client to query the object status. The plugin will listen on
# the event from device and update the object status by calling
# self.update_state(context, Vip, id, ACTIVE/ERROR)
#
# In synchronous mode, send the request to device here and wait for
# response. Eventually update the object status prior to the return.
v_query = self.get_vip(context, v['id'])
return v_query
def update_vip(self, context, id, vip):
with context.session.begin(subtransactions=True):
v_query = self.get_vip(
context, id, fields=["status"])
if v_query['status'] in [
constants.PENDING_DELETE, constants.ERROR]:
raise loadbalancer.StateInvalid(id=id,
state=v_query['status'])
v = super(VShieldEdgeLBPlugin, self).update_vip(context, id, vip)
self.update_status(context, loadbalancer_db.Vip, id,
constants.PENDING_UPDATE)
LOG.debug(_("Update vip: %s"), id)
self.vselb.update_vip(context, v)
self.update_status(context, loadbalancer_db.Vip, id,
constants.ACTIVE)
v_rt = self.get_vip(context, id)
return v_rt
def delete_vip(self, context, id):
with context.session.begin(subtransactions=True):
vip = self.get_vip(context, id)
uuid2vseid = self.vselb.get_vip_vseid(context, vip['id'])
self.update_status(context, loadbalancer_db.Vip, id,
constants.PENDING_DELETE)
LOG.debug(_("Delete vip: %s"), id)
super(VShieldEdgeLBPlugin, self).delete_vip(context, id)
vip['vseid'] = uuid2vseid
self.vselb.delete_vip(context, vip)
def get_vip(self, context, id, fields=None):
res = super(VShieldEdgeLBPlugin, self).get_vip(context, id, fields)
LOG.debug(_("Get vip: %s"), id)
return res
def get_vips(self, context, filters=None, fields=None):
res = super(VShieldEdgeLBPlugin, self).get_vips(
context, filters, fields)
LOG.debug(_("Get vips"))
return res
def create_pool(self, context, pool):
with context.session.begin(subtransactions=True):
p = super(VShieldEdgeLBPlugin, self).create_pool(context, pool)
self.update_status(context, loadbalancer_db.Pool, p['id'],
constants.PENDING_CREATE)
LOG.debug(_("Create pool: %s"), p['id'])
self.vselb.create_pool(context, p)
# pool may not be created if no member is specified, however we
# still update the status to ACTIVE in case the client is waiting
# for the pool to be created before pusing create member request
self.update_status(context, loadbalancer_db.Pool, p['id'],
constants.ACTIVE)
p_rt = self.get_pool(context, p['id'])
return p_rt
def update_pool(self, context, id, pool):
with context.session.begin(subtransactions=True):
p_query = self.get_pool(context, id, fields=["status"])
if p_query['status'] in [
constants.PENDING_DELETE, constants.ERROR]:
raise loadbalancer.StateInvalid(id=id,
state=p_query['status'])
p = super(VShieldEdgeLBPlugin, self).update_pool(context, id, pool)
LOG.debug(_("Update pool: %s"), p['id'])
self.vselb.update_pool(context, pool)
p_rt = self.get_pool(context, id)
return p_rt
def delete_pool(self, context, id):
with context.session.begin(subtransactions=True):
pool = self.get_pool(context, id)
self.update_status(context, loadbalancer_db.Pool, id,
constants.PENDING_DELETE)
self.vselb.delete_pool(context, pool)
super(VShieldEdgeLBPlugin, self).delete_pool(context, id)
LOG.debug(_("Delete pool: %s"), id)
def get_pool(self, context, id, fields=None):
res = super(VShieldEdgeLBPlugin, self).get_pool(context, id, fields)
LOG.debug(_("Get pool: %s"), id)
return res
def get_pools(self, context, filters=None, fields=None):
res = super(VShieldEdgeLBPlugin, self).get_pools(
context, filters, fields)
LOG.debug(_("Get Pools"))
return res
def stats(self, context, pool_id):
res = super(VShieldEdgeLBPlugin, self).get_stats(context, pool_id)
LOG.debug(_("Get stats of Pool: %s"), pool_id)
return res
def create_pool_health_monitor(self, context, health_monitor, pool_id):
m = super(VShieldEdgeLBPlugin, self).create_pool_health_monitor(
context, health_monitor, pool_id)
LOG.debug(_("Create health_monitor of pool: %s"), pool_id)
return m
def get_pool_health_monitor(self, context, id, pool_id, fields=None):
m = super(VShieldEdgeLBPlugin, self).get_pool_health_monitor(
context, id, pool_id, fields)
LOG.debug(_("Get health_monitor of pool: %s"), pool_id)
return m
def delete_pool_health_monitor(self, context, id, pool_id):
super(VShieldEdgeLBPlugin, self).delete_pool_health_monitor(
context, id, pool_id)
LOG.debug(_("Delete health_monitor %(id)s of pool: %(pool_id)s"),
{"id": id, "pool_id": pool_id})
def get_member(self, context, id, fields=None):
res = super(VShieldEdgeLBPlugin, self).get_member(
context, id, fields)
LOG.debug(_("Get member: %s"), id)
return res
def get_members(self, context, filters=None, fields=None):
res = super(VShieldEdgeLBPlugin, self).get_members(
context, filters, fields)
LOG.debug(_("Get members"))
return res
def create_member(self, context, member):
with context.session.begin(subtransactions=True):
m = super(VShieldEdgeLBPlugin, self).create_member(context, member)
self.update_status(context, loadbalancer_db.Member, m['id'],
constants.PENDING_CREATE)
LOG.debug(_("Create member: %s"), m['id'])
self.vselb.create_member(context, m)
self.update_status(context, loadbalancer_db.Member, m['id'],
constants.ACTIVE)
m_rt = self.get_member(context, m['id'])
return m_rt
def update_member(self, context, id, member):
with context.session.begin(subtransactions=True):
m_query = self.get_member(context, id, fields=["status"])
if m_query['status'] in [
constants.PENDING_DELETE, constants.ERROR]:
raise loadbalancer.StateInvalid(id=id,
state=m_query['status'])
m = super(VShieldEdgeLBPlugin, self).update_member(
context, id, member)
self.update_status(context, loadbalancer_db.Member, id,
constants.PENDING_UPDATE)
LOG.debug(_("Update member: %s"), m['id'])
self.vselb.update_member(context, m)
self.update_status(context, loadbalancer_db.Member, id,
constants.ACTIVE)
m_rt = self.get_member(context, id)
return m_rt
def delete_member(self, context, id):
with context.session.begin(subtransactions=True):
m = self.get_member(context, id)
self.update_status(context, loadbalancer_db.Member, id,
constants.PENDING_DELETE)
LOG.debug(_("Delete member: %s"), id)
super(VShieldEdgeLBPlugin, self).delete_member(context, id)
self.vselb.delete_member(context, m)
def get_health_monitor(self, context, id, fields=None):
res = super(VShieldEdgeLBPlugin, self).get_health_monitor(
context, id, fields)
LOG.debug(_("Get health_monitor: %s"), id)
return res
def get_health_monitors(self, context, filters=None, fields=None):
res = super(VShieldEdgeLBPlugin, self).get_health_monitors(
context, filters, fields)
LOG.debug(_("Get health_monitors"))
return res
def create_health_monitor(self, context, health_monitor):
h = super(VShieldEdgeLBPlugin, self).create_health_monitor(
context, health_monitor)
self.update_status(context, loadbalancer_db.HealthMonitor, h['id'],
constants.PENDING_CREATE)
LOG.debug(_("Create health_monitor: %s"), h['id'])
# TODO: notify lbagent
h_rt = self.get_health_monitor(context, h['id'])
return h_rt
def update_health_monitor(self, context, id, health_monitor):
h_query = self.get_health_monitor(context, id, fields=["status"])
if h_query['status'] in [
constants.PENDING_DELETE, constants.ERROR]:
raise loadbalancer.StateInvalid(id=id,
state=h_query['status'])
h = super(VShieldEdgeLBPlugin, self).update_health_monitor(
context, id, health_monitor)
self.update_status(context, loadbalancer_db.HealthMonitor, id,
constants.PENDING_UPDATE)
LOG.debug(_("Update health_monitor: %s"), h['id'])
# TODO notify lbagent
h_rt = self.get_health_monitor(context, id)
return h_rt
def delete_health_monitor(self, context, id):
self.update_status(context, loadbalancer_db.HealthMonitor, id,
constants.PENDING_DELETE)
LOG.debug(_("Delete health_monitor: %s"), id)
super(VShieldEdgeLBPlugin, self).delete_health_monitor(context, id)
class VShieldEdgeFWPlugin(fw_db.FirewallPluginDb):
supported_extension_aliases = ["fwaas"]
def __init__(self):
"""
Do the initialization for the firewall service plugin here.
"""
# Hard coded for now
vseapi = VseAPI(edgeUri, edgeUser, edgePasswd, edgeId)
self.vsefw = FirewallAPI(vseapi)
qdbapi.register_models(base=model_base.BASEV2)
def get_plugin_type(self):
return constants.FIREWALL
def get_plugin_description(self):
return "Quantum Firewall Service Plugin"
def create_rule(self, context, rule):
with context.session.begin(subtransactions=True):
print rule
rule = super(VShieldEdgeFWPlugin, self).create_rule(context, rule)
print rule
self.vsefw.create_rule(context, rule)
return rule
def delete_rule(self, context, id):
with context.session.begin(subtransactions=True):
rule = self.get_rule(context, id)
self.vsefw.delete_rule(context, rule)
super(VShieldEdgeFWPlugin, self).delete_rule(context, id)
def create_ipobj(self, context, ipobj):
with context.session.begin(subtransactions=True):
ipobj = super(VShieldEdgeFWPlugin, self).create_ipobj(context, ipobj)
self.vsefw.create_ipset(context, ipobj)
return ipobj
def delete_ipobj(self, context, id):
with context.session.begin(subtransactions=True):
ipobj = self.get_ipobj(context, id)
self.vsefw.delete_ipset(context, ipobj)
super(VShieldEdgeFWPlugin, self).delete_ipobj(context, id)
def create_serviceobj(self, context, serviceobj):
with context.session.begin(subtransactions=True):
svcobj = super(VShieldEdgeFWPlugin, self).create_serviceobj(context, serviceobj)
self.vsefw.create_application(context, svcobj)
return svcobj
def delete_serviceobj(self, context, id):
with context.session.begin(subtransactions=True):
svcobj = self.get_serviceobj(context, id)
self.vsefw.delete_application(context, svcobj)
super(VShieldEdgeFWPlugin, self).delete_serviceobj(context, id)
|
apache-2.0
| -8,121,185,046,052,741,000
| 41.267806
| 92
| 0.611957
| false
| 3.872618
| false
| false
| false
|
pohzhiee/ghetto_omr
|
edcamcam_testfile_imgproc7.py
|
1
|
5239
|
import numpy as np
import cv2
from edcamcam_testfile_shape import shapedetector
import matplotlib.pyplot as plt
#Part 1: Image Loading
#-------------------------------------------------------------------
#load image
img = cv2.imread('img_data/omstest2.jpg',cv2.IMREAD_GRAYSCALE)
img2= cv2.imread('img_data/omstest2.jpg')
#bilateral filter, sharpen, thresh
biblur=cv2.bilateralFilter(img,20,175,175)
sharp=cv2.addWeighted(img,1.55,biblur,-0.5,0)
ret1,thresh1 = cv2.threshold(sharp,127,255,cv2.THRESH_OTSU)
#negative image
inv=cv2.bitwise_not(thresh1)
#closed image
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
closed = cv2.morphologyEx(inv, cv2.MORPH_CLOSE, kernel)
#Part 2: Finding Valid Contours
#-------------------------------------------------------------------
#find countours
im2, contours, hierarchy = cv2.findContours(closed, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
#prepare empty canvas
height, width = img.shape[:2]
emptycanvas=np.zeros((height,width),dtype=np.uint8)
#truncate contours with A<10
kcounter = 0
for c in contours:
A = cv2.contourArea(c)
if A<100:
contours=np.delete(contours,kcounter,0)
kcounter=kcounter-1
kcounter=kcounter+1
#find length of contour array
clen=len(contours)
#create match_array [dimension = len x len] with 0s
match_array=np.zeros((clen,clen),np.uint8)
#loop over the contours and compare two by two
icounter = 0
for i in contours:
jcounter = 0
for j in contours:
#If difference has index <0.01 then regard as TRUE
ret=cv2.matchShapes(i,j,1,0.0)
if ret<0.01:
match_array[icounter,jcounter]=1
else:
match_array[icounter,jcounter]=0
jcounter=jcounter+1
icounter=icounter+1
#sum each row of the array (for TRUEs and FALSEs]
sum_array=np.sum(match_array,axis=1,dtype=np.int32)
#finding mean of the comparison value
sum_array2=np.sum(sum_array,axis=0,dtype=np.int32)
sum_array_len=len(sum_array)
ave_sim_val=sum_array2/sum_array_len
#Assumption: there is a lot of 1s
#counters
#creation of new array to store centre point
#variables
counter_a=0
counter_s=0
counter_m=0
valid_counter =0
centpt_array = np.array([[0,0,0]])
hor_dist_acc=0
ver_dist_acc=0
#Area array
area_arr=np.array([])
#find valid mean area and SD
for k in sum_array:
if k>ave_sim_val:
A = cv2.contourArea(contours[counter_s])
area_arr=np.append(area_arr,[A],0)
counter_a=counter_a+1
counter_s=counter_s +1
sum_area_array=np.array([])
sum_area_array=np.sum(area_arr,axis=0,dtype=np.uint32)
mean_valid_A=sum_area_array/counter_a
sum_dif=0
for a in area_arr:
dif = (mean_valid_A - a)**2
sum_dif=sum_dif+dif
SD_valid=(sum_dif/counter_a)**0.5
print area_arr
#find midpoints of contours that fulfils 1)high similarity 2)occurence greater than average 3)least deviation from valid mean area
for i in sum_array:
if i>ave_sim_val:
cv2.drawContours(img2, contours, counter_m, (0, 255, 0), 2)
#Determine valid mean area
condition = cv2.contourArea(contours[counter_m])>mean_valid_A-2*SD_valid and cv2.contourArea(contours[counter_m])<mean_valid_A+2*SD_valid
if condition:
# obtain centre point of each contour
M = cv2.moments(contours[counter_m])
cX = int(M['m10'] / M['m00'])
cY = int(M['m01'] / M['m00'])
# store in it arrays
new_centpt_array=np.array([[cX,cY,counter_m]])
centpt_array=np.concatenate((centpt_array,new_centpt_array),axis=0)
#determine horizontal point and vertical point
c=contours[counter_m]
Xt_right=np.asarray(tuple(c[c[:,:,0].argmax()][0]))
Xt_bot=np.asarray(tuple(c[c[:,:,1].argmax()][0]))
hor_dist=Xt_right[0]-cX
ver_dist=Xt_bot[1]-cY
hor_dist_acc=hor_dist_acc+hor_dist
ver_dist_acc=ver_dist_acc+ver_dist
valid_counter = valid_counter +1
counter_m = counter_m+1
mean_hor_dist=hor_dist_acc/valid_counter
mean_ver_dist=ver_dist_acc/valid_counter
#delete 1st row
centpt_array=np.delete(centpt_array,0,0)
#checkpoint for adding array
centpt_array=np.append(centpt_array,[[48,185,1000]],0)
centpt_array=np.append(centpt_array,[[40,290,1001]],0)
centpt_array=np.append(centpt_array,[[500,500,1002]],0)
centpt_array=np.append(centpt_array,[[300,300,1003]],0)
centpt_array=np.append(centpt_array,[[0,0,1004]],0)
#Removing Duplicates
g=0
arr_len=len(centpt_array)
while arr_len>g:
target_arr1 = centpt_array[g]
h=1+g
while arr_len>h and h>g:
target_arr2 = centpt_array[h]
if abs(target_arr1[0]-target_arr2[0])<mean_hor_dist:
if abs(target_arr1[1]-target_arr2[1])<mean_ver_dist:
centpt_array=np.delete(centpt_array,h,0)
h=h-1
arr_len=arr_len-1
h = h + 1
g=g+1
#checkpoint
#print centpt_array
print '-----------'
#print valid_counter
#print len(centpt_array)
#print mean_hor_dist
#print mean_ver_dist
#initialise plot
plt.subplot(111),plt.imshow(img2)
plt.title('dilate1 Image'), plt.xticks([]), plt.yticks([])
for k in centpt_array:
plt.plot(k[0],k[1],'ro')
plt.show()
cv2.waitKey(0)
|
gpl-3.0
| 6,262,305,167,322,656,000
| 24.807882
| 145
| 0.649361
| false
| 2.766103
| false
| false
| false
|
zorna/zorna
|
zorna/calendars/forms.py
|
1
|
2212
|
from django import forms
from django.utils.translation import ugettext_lazy as _
from schedule.models import Event, Occurrence
import datetime
import time
from zorna.calendars.models import ZornaCalendar, EventDetails, ZornaResourceCalendar
FREQUENCIES_CHOICES = (
("", _("-----")),
("WEEKLY", _("Weekly")),
("DAILY", _("Daily")),
("MONTHLY", _("Monthly")),
("YEARLY", _("Yearly")))
DAYS_CHOICES = (
(6, _("Sun")),
(0, _("Mon")),
(1, _("Tue")),
(2, _("Wed")),
(3, _("Thu")),
(4, _("Fri")),
(5, _("Sat")),
)
class EditEventForm(forms.ModelForm):
interval_choices = [(i, i) for i in range(1, 31)]
title = forms.CharField()
description = forms.Textarea()
start = forms.DateTimeField(label=_(
"Start date"), widget=forms.SplitDateTimeWidget)
end = forms.DateTimeField(label=_("End date"), widget=forms.SplitDateTimeWidget, help_text=_(
"The end time must be later than start time."))
end_recurring_period = forms.DateField(label=_("Until date"), help_text=_(
"This date is ignored for one time only events."), required=False)
rule = forms.ChoiceField(label=_("Rule"), choices=FREQUENCIES_CHOICES, help_text=_(
"Select '----' for a one time only event."), required=False)
weekdays = forms.MultipleChoiceField(label=_(
"Repeat on"), choices=DAYS_CHOICES, widget=forms.CheckboxSelectMultiple, required=False)
interval = forms.ChoiceField(label=_(
"Repeat every"), choices=interval_choices, required=False)
class Meta:
model = Event
exclude = ('creator', 'created_on', 'calendar', 'rule')
def clean(self):
start = self.cleaned_data.get("start")
end = self.cleaned_data.get("end")
if start >= end:
raise forms.ValidationError(_(
u'The end time must be later than start time.'))
return self.cleaned_data
class EditEventDetailsForm(forms.ModelForm):
class Meta:
model = EventDetails
class ResourceCalendarForm(forms.ModelForm):
class Meta:
model = ZornaResourceCalendar
class ZornaCalendarSettingsForm(forms.ModelForm):
class Meta:
model = ZornaCalendar
|
bsd-3-clause
| 1,895,415,933,786,963,200
| 29.30137
| 97
| 0.631555
| false
| 3.880702
| false
| false
| false
|
gregcorbett/SciBot
|
src/Board.py
|
1
|
4163
|
"""This file defines the Board class."""
import pygame
from src.Component import Component
from src.Point import Point
class Board(Component):
"""This class defines the board (a.k.a. map)."""
def __init__(self, scenario):
"""Create the board."""
self.step = scenario.get_board_step()
# Call superclass constructor
super().__init__(scenario.get_background(),
Point(0, 0), # This is the window topleft corner
self.step)
# Work out (and check) screen size, also store for
# checking the BeeBot has not fallen of the edge
self.logical_board_height = scenario.get_logical_height()
self.logical_board_width = scenario.get_logical_width()
# Board dimensions in terms of pixels
self.board_height = self.logical_board_height * self.step
self.board_width = self.logical_board_width * self.step
self.border_colour = scenario.get_border_colour()
self.obstacle_group = scenario.get_obstacle_group()
self.goal_group = scenario.get_goal_group()
# Need to check the Board pixel height matches the image pixel height
if self.board_height != self.sprite.get_height():
raise ValueError(("Error 1: board height does "
"not match image height.\n"
"Board Height = %s\n"
"Image Height = %s"
% (self.board_height,
self.sprite.get_height())))
# Need to check the Board pixel width matches the image pixel width
if self.board_width != self.sprite.get_width():
raise ValueError(("Error 2: board width does "
"not match image width.\n"
"Board Width = %s\n"
"Image Width = %s"
% (self.board_width,
self.sprite.get_width())))
# Need to check the pixel height is a multiple of step
if self.board_height % self.step != 0:
raise ValueError(("Error 3: height mod step != 0.\n"
"Height = %s\n"
"Step = %s" % (self.board_height,
self.step)))
# Need to check the pixel height is a multiple of step
if self.board_width % self.step != 0:
raise ValueError(("Error 4: width mod step != 0.\n"
"Width = %s\n"
"Step = %s" % (self.board_width,
self.step)))
def display(self, screen):
"""Display the board on screen."""
# Call the superclass display method
super().display(screen)
self.obstacle_group.display(screen)
self.goal_group.display(screen)
# Draw lines over Board background image
if self.border_colour is not None:
for iter_width in range(0, self.board_width + 1, self.step):
line_start = Point(iter_width, 0)
line_end = Point(iter_width, self.board_height)
# Draw a line from line_start to line_end.
pygame.draw.line(screen, self.border_colour,
line_start, line_end, 5)
for iter_height in range(0, self.board_height + 1, self.step):
line_start = Point(0, iter_height)
line_end = Point(self.board_width, iter_height)
# Draw a line from line_start to line_end.
pygame.draw.line(screen, self.border_colour,
line_start, line_end, 5)
def is_equal_to(self, other_component):
"""Compare this Board for equality with other_component."""
if not isinstance(other_component, Board):
# An Board can obviously never be equal to a non Board
return False
# Comparing a Board to another Board has not yet been implemented
raise NotImplementedError()
|
gpl-2.0
| 5,462,970,932,799,122,000
| 41.050505
| 77
| 0.528705
| false
| 4.476344
| false
| false
| false
|
fretsonfire/fof-python
|
src/Credits.py
|
1
|
9490
|
#####################################################################
# -*- coding: iso-8859-1 -*- #
# #
# Frets on Fire #
# Copyright (C) 2006 Sami Kyöstilä #
# #
# This program is free software; you can redistribute it and/or #
# modify it under the terms of the GNU General Public License #
# as published by the Free Software Foundation; either version 2 #
# of the License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, #
# MA 02110-1301, USA. #
#####################################################################
import pygame
from OpenGL.GL import *
from OpenGL.GLU import *
import math
from View import Layer
from Input import KeyListener
from Language import _
import MainMenu
import Song
import Version
import Player
class Element:
"""A basic element in the credits scroller."""
def getHeight(self):
"""@return: The height of this element in fractions of the screen height"""
return 0
def render(self, offset):
"""
Render this element.
@param offset: Offset in the Y direction in fractions of the screen height
"""
pass
class Text(Element):
def __init__(self, font, scale, color, alignment, text):
self.text = text
self.font = font
self.color = color
self.alignment = alignment
self.scale = scale
self.size = self.font.getStringSize(self.text, scale = scale)
def getHeight(self):
return self.size[1]
def render(self, offset):
if self.alignment == "left":
x = .1
elif self.alignment == "right":
x = .9 - self.size[0]
elif self.alignment == "center":
x = .5 - self.size[0] / 2
glColor4f(*self.color)
self.font.render(self.text, (x, offset), scale = self.scale)
class Picture(Element):
def __init__(self, engine, fileName, height):
self.height = height
self.engine = engine
engine.loadSvgDrawing(self, "drawing", fileName)
def getHeight(self):
return self.height
def render(self, offset):
self.drawing.transform.reset()
w, h = self.engine.view.geometry[2:4]
self.drawing.transform.translate(.5 * w, h - (.5 * self.height + offset) * h * float(w) / float(h))
self.drawing.transform.scale(1, -1)
self.drawing.draw()
class Credits(Layer, KeyListener):
"""Credits scroller."""
def __init__(self, engine, songName = None):
self.engine = engine
self.time = 0.0
self.offset = 1.0
self.songLoader = self.engine.resource.load(self, "song", lambda: Song.loadSong(self.engine, "defy", playbackOnly = True),
onLoad = self.songLoaded)
self.engine.loadSvgDrawing(self, "background1", "editor.svg")
self.engine.loadSvgDrawing(self, "background2", "keyboard.svg")
self.engine.loadSvgDrawing(self, "background3", "cassette.svg")
self.engine.boostBackgroundThreads(True)
nf = self.engine.data.font
bf = self.engine.data.bigFont
ns = 0.002
bs = 0.001
hs = 0.003
c1 = (1, 1, .5, 1)
c2 = (1, .75, 0, 1)
space = Text(nf, hs, c1, "center", " ")
self.credits = [
Text(nf, ns, c2, "center", _("Unreal Voodoo")),
Text(nf, ns, c1, "center", _("presents")),
Text(nf, bs, c2, "center", " "),
Picture(self.engine, "logo.svg", .25),
Text(nf, bs, c2, "center", " "),
Text(nf, bs, c2, "center", _("Version %s") % Version.version()),
space,
Text(nf, ns, c1, "left", _("Game Design,")),
Text(nf, ns, c1, "left", _("Programming:")),
Text(nf, ns, c2, "right", "Sami Kyostila"),
space,
Text(nf, ns, c1, "left", _("Music,")),
Text(nf, ns, c1, "left", _("Sound Effects:")),
Text(nf, ns, c2, "right", "Tommi Inkila"),
space,
Text(nf, ns, c1, "left", _("Graphics:")),
Text(nf, ns, c2, "right", "Joonas Kerttula"),
space,
Text(nf, ns, c1, "left", _("Introducing:")),
Text(nf, ns, c2, "right", "Mikko Korkiakoski"),
Text(nf, ns, c2, "right", _("as Jurgen, Your New God")),
space,
Text(nf, ns, c2, "right", "Marjo Hakkinen"),
Text(nf, ns, c2, "right", _("as Groupie")),
space,
Text(nf, ns, c1, "left", _("Song Credits:")),
Text(nf, ns, c2, "right", _("Bang Bang, Mystery Man")),
Text(nf, bs, c2, "right", _("music by Mary Jo and Tommi Inkila")),
Text(nf, bs, c2, "right", _("lyrics by Mary Jo")),
space,
Text(nf, ns, c2, "right", _("Defy The Machine")),
Text(nf, bs, c2, "right", _("music by Tommi Inkila")),
space,
Text(nf, ns, c2, "right", _("This Week I've Been")),
Text(nf, ns, c2, "right", _("Mostly Playing Guitar")),
Text(nf, bs, c2, "right", _("composed and performed by Tommi Inkila")),
space,
Text(nf, ns, c1, "left", _("Testing:")),
Text(nf, ns, c2, "right", "Mikko Korkiakoski"),
Text(nf, ns, c2, "right", "Tomi Kyostila"),
Text(nf, ns, c2, "right", "Jani Vaarala"),
Text(nf, ns, c2, "right", "Juho Jamsa"),
Text(nf, ns, c2, "right", "Olli Jakola"),
space,
Text(nf, ns, c1, "left", _("Mac OS X port:")),
Text(nf, ns, c2, "right", "Tero Pihlajakoski"),
space,
Text(nf, ns, c1, "left", _("Special thanks to:")),
Text(nf, ns, c2, "right", "Tutorial inspired by adam02"),
space,
Text(nf, ns, c1, "left", _("Made with:")),
Text(nf, ns, c2, "right", "Python"),
Text(nf, bs, c2, "right", "http://www.python.org"),
space,
Text(nf, ns, c2, "right", "PyGame"),
Text(nf, bs, c2, "right", "http://www.pygame.org"),
space,
Text(nf, ns, c2, "right", "PyOpenGL"),
Text(nf, bs, c2, "right", "http://pyopengl.sourceforge.net"),
space,
Text(nf, ns, c2, "right", "Amanith Framework"),
Text(nf, bs, c2, "right", "http://www.amanith.org"),
space,
Text(nf, ns, c2, "right", "Illusoft Collada module 1.4"),
Text(nf, bs, c2, "right", "http://colladablender.illusoft.com"),
space,
Text(nf, ns, c2, "right", "Psyco specializing compiler"),
Text(nf, bs, c2, "right", "http://psyco.sourceforge.net"),
space,
Text(nf, ns, c2, "right", "MXM Python Midi Package 0.1.4"),
Text(nf, bs, c2, "right", "http://www.mxm.dk/products/public/pythonmidi"),
space,
space,
Text(nf, bs, c1, "center", _("Source Code available under the GNU General Public License")),
Text(nf, bs, c2, "center", "http://www.unrealvoodoo.org"),
space,
space,
space,
space,
Text(nf, bs, c1, "center", _("Copyright 2006-2008 by Unreal Voodoo")),
]
def songLoaded(self, song):
self.engine.boostBackgroundThreads(False)
song.play()
def shown(self):
self.engine.input.addKeyListener(self)
def hidden(self):
if self.song:
self.song.fadeout(1000)
self.engine.input.removeKeyListener(self)
self.engine.view.pushLayer(MainMenu.MainMenu(self.engine))
def quit(self):
self.engine.view.popLayer(self)
def keyPressed(self, key, unicode):
if self.engine.input.controls.getMapping(key) in [Player.CANCEL, Player.KEY1, Player.KEY2] or key == pygame.K_RETURN:
self.songLoader.cancel()
self.quit()
return True
def run(self, ticks):
self.time += ticks / 50.0
if self.song:
self.offset -= ticks / 5000.0
if self.offset < -6.1:
self.quit()
def render(self, visibility, topMost):
v = 1.0 - ((1 - visibility) ** 2)
# render the background
t = self.time / 100 + 34
w, h, = self.engine.view.geometry[2:4]
r = .5
for i, background in [(0, self.background1), (1, self.background2), (2, self.background3)]:
background.transform.reset()
background.transform.translate((1 - v) * 2 * w + w / 2 + math.cos(t / 2) * w / 2 * r, h / 2 + math.sin(t) * h / 2 * r)
background.transform.translate(0, -h * (((self.offset + i * 2) % 6.0) - 3.0))
background.transform.rotate(math.sin(t * 4 + i) / 2)
background.transform.scale(math.sin(t / 8) + 3, math.sin(t / 8) + 3)
background.draw()
self.engine.view.setOrthogonalProjection(normalize = True)
font = self.engine.data.font
# render the scroller elements
y = self.offset
glTranslatef(-(1 - v), 0, 0)
try:
for element in self.credits:
h = element.getHeight()
if y + h > 0.0 and y < 1.0:
element.render(y)
y += h
if y > 1.0:
break
finally:
self.engine.view.resetProjection()
|
mit
| -1,735,713,498,330,326,300
| 36.509881
| 127
| 0.546891
| false
| 3.255575
| false
| false
| false
|
cggh/DQXServer
|
_CreateFilterBankData.py
|
1
|
1099
|
# This file is part of DQXServer - (C) Copyright 2014, Paul Vauterin, Ben Jeffery, Alistair Miles <info@cggh.org>
# This program is free software licensed under the GNU Affero General Public License.
# You can find a copy of this license in LICENSE in the top directory of the source code or at <http://opensource.org/licenses/AGPL-3.0>
import math
import random
import SummCreate
import sys
basedir='.'
#============= FAKE STUFF FOR DEBUGGING; REMOVE FOR PRODUCTION ==============
#basedir='C:\Data\Test\Genome'
#sys.argv=['','/home/pvaut/Documents/Genome/Tracks-PfPopGen3.1/Coverage2','Summ01']
#============= END OF FAKE STUFF ============================================
if len(sys.argv)<3:
print('Usage: COMMAND DataFolder ConfigFilename')
print(' DataFolder= folder containing the source data, relative to the current path')
print(' ConfigFilename= name of the source configuration file (do not provide the extension ".cnf").')
sys.exit()
dataFolder=sys.argv[1]
summaryFile=sys.argv[2]
creat=SummCreate.Creator(basedir,dataFolder,summaryFile)
creat.Summarise()
|
agpl-3.0
| -1,543,107,310,729,181,400
| 32.30303
| 136
| 0.690628
| false
| 3.522436
| false
| false
| false
|
GammaC0de/pyload
|
src/pyload/plugins/downloaders/ShareonlineBiz.py
|
1
|
5833
|
# -*- coding: utf-8 -*-
import base64
import re
import time
from datetime import timedelta
from pyload.core.network.request_factory import get_url
from ..anticaptchas.ReCaptcha import ReCaptcha
from ..base.simple_downloader import SimpleDownloader
class ShareonlineBiz(SimpleDownloader):
__name__ = "ShareonlineBiz"
__type__ = "downloader"
__version__ = "0.67"
__status__ = "testing"
__pyload_version__ = "0.5"
__pattern__ = r"https?://(?:www\.)?(share-online\.biz|egoshare\.com)/(download\.php\?id=|dl/)(?P<ID>\w+)"
__config__ = [
("enabled", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10),
]
__description__ = """Shareonline.biz downloader plugin"""
__license__ = "GPLv3"
__authors__ = [
("spoob", "spoob@pyload.net"),
("mkaay", "mkaay@mkaay.de"),
("zoidberg", "zoidberg@mujmail.cz"),
("Walter Purcaro", "vuolter@gmail.com"),
]
URL_REPLACEMENTS = [(__pattern__ + ".*", r"http://www.share-online.biz/dl/\g<ID>")]
CHECK_TRAFFIC = True
ERROR_PATTERN = r'<p class="b">Information:</p>\s*<div>\s*<strong>(.*?)</strong>'
@classmethod
def api_info(cls, url):
info = {}
field = get_url(
"http://api.share-online.biz/linkcheck.php",
get={"md5": "1", "links": re.match(cls.__pattern__, url).group("ID")},
).split(";")
try:
if field[1] == "OK":
info["fileid"] = field[0]
info["status"] = 2
info["name"] = field[2]
info["size"] = field[3] #: In bytes
info["md5"] = field[4].strip().lower().replace("\n\n", "") #: md5
elif field[1] in ("DELETED", "NOTFOUND"):
info["status"] = 1
except IndexError:
pass
return info
def setup(self):
self.resume_download = self.premium
self.multi_dl = False
def handle_captcha(self):
self.captcha = ReCaptcha(self.pyfile)
response, challenge = self.captcha.challenge()
m = re.search(r"var wait=(\d+);", self.data)
self.set_wait(int(m.group(1)) if m else 30)
res = self.load(
"{}/free/captcha/{}".format(self.pyfile.url, int(time.time() * 1000)),
post={
"dl_free": "1",
"recaptcha_challenge_field": challenge,
"recaptcha_response_field": response,
},
)
if res != "0":
self.captcha.correct()
return res
else:
self.retry_captcha()
def handle_free(self, pyfile):
self.wait(3)
self.data = self.load(
"{}/free/".format(pyfile.url), post={"dl_free": "1", "choice": "free"}
)
self.check_errors()
res = self.handle_captcha()
self.link = base64.b64decode(res)
if not self.link.startswith("http://"):
self.error(self._("Invalid url"))
self.wait()
def check_download(self):
check = self.scan_download(
{
"cookie": re.compile(r'<div id="dl_failure"'),
"fail": re.compile(r"<title>Share-Online"),
}
)
if check == "cookie":
self.retry_captcha(5, 60, self._("Cookie failure"))
elif check == "fail":
self.retry_captcha(
5, timedelta(minutes=5).seconds, self._("Download failed")
)
return SimpleDownloader.check_download(self)
#: Should be working better loading (account) api internally
def handle_premium(self, pyfile):
self.api_data = dlinfo = {}
html = self.load(
"https://api.share-online.biz/account.php",
get={
"username": self.account.user,
"password": self.account.get_login("password"),
"act": "download",
"lid": self.info["fileid"],
},
)
self.log_debug(html)
for line in html.splitlines():
try:
key, value = line.split(": ")
dlinfo[key.lower()] = value
except ValueError:
pass
if dlinfo["status"] != "online":
self.offline()
else:
pyfile.name = dlinfo["name"]
pyfile.size = int(dlinfo["size"])
self.link = dlinfo["url"]
if self.link == "server_under_maintenance":
self.temp_offline()
else:
self.multi_dl = True
def check_errors(self):
m = re.search(r"/failure/(.*?)/", self.req.last_effective_url)
if m is None:
self.info.pop("error", None)
return
errmsg = m.group(1).lower()
try:
self.log_error(errmsg, re.search(self.ERROR_PATTERN, self.data).group(1))
except Exception:
self.log_error(self._("Unknown error occurred"), errmsg)
if errmsg == "invalid":
self.fail(self._("File not available"))
elif errmsg in ("freelimit", "size", "proxy"):
self.fail(self._("Premium account needed"))
elif errmsg in ("expired", "server"):
self.retry(wait=600, msg=errmsg)
elif errmsg == "full":
self.fail(self._("Server is full"))
elif "slot" in errmsg:
self.wait(3600, reconnect=True)
self.restart(errmsg)
else:
self.wait(60, reconnect=True)
self.restart(errmsg)
|
agpl-3.0
| 814,808,045,400,288,100
| 28.459596
| 109
| 0.513286
| false
| 3.755956
| false
| false
| false
|
yossan4343434/TK_15
|
src/yamashita/preprocessing/capture_face.py
|
1
|
1798
|
# -*- coding: utf-8 -*-
import os
import shutil
import cv2
import glob
ROOT = os.path.abspath(os.path.dirname(__file__))
SRCDIR = ROOT.replace("src/yamashita/preprocessing", "data/rugby/goromaru/raw_goromaru/")
TARDIR = ROOT.replace("src/yamashita/preprocessing", "data/rugby/goromaru/pre_goromaru/")
NODIR = ROOT.replace("src/yamashita/preprocessing", "data/rugby/goromaru/no_goromaru/")
cascade = cv2.CascadeClassifier("/usr//local/Cellar/opencv/2.4.11_1/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml")
#cascade = cv2.CascadeClassifier("/usr//local/Cellar/opencv/2.4.11_1/share/OpenCV/haarcascades/haarcascade_frontalface_alt2.xml")
#cascade = cv2.CascadeClassifier("/usr//local/Cellar/opencv/2.4.11_1/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml")
#cascade = cv2.CascadeClassifier("/usr//local/Cellar/opencv/2.4.11_1/share/OpenCV/haarcascades/haarcascade_mcs_mouth.xml")
#cascade = cv2.CascadeClassifier("/usr//local/Cellar/opencv/2.4.11_1/share/OpenCV/haarcascades/haarcascade_mcs_nose.xml")
def cap_face(paths):
i = 0
for path in paths:
i += 1
img = cv2.imread(path)
face = cascade.detectMultiScale(img, 1.3, 3)
r_name = TARDIR + "goromaru_" + str(i)
if len(face) != 0:
j = 0
for (x, y, w, h) in face:
j += 1
name = r_name + "_" + str(j) + ".jpg"
tmp = img[y:y+h, x:x+w]
tmp = cv2.resize(tmp, (100, 100))
cv2.imwrite(name, tmp)
else:
nogoro = NODIR + path.split("/")[-1]
shutil.copy(path, nogoro)
def getlist():
filelist = glob.glob(SRCDIR+"*")
return filelist
if __name__ == '__main__':
imgpaths = getlist()
cap_face(imgpaths)
|
mit
| 5,616,299,162,888,946,000
| 27.539683
| 132
| 0.626808
| false
| 2.724242
| false
| false
| false
|
indashnet/InDashNet.Open.UN2000
|
android/external/chromium_org/third_party/WebKit/Tools/Scripts/webkitpy/common/checkout/scm/scm_unittest.py
|
1
|
73915
|
# Copyright (C) 2009 Google Inc. All rights reserved.
# Copyright (C) 2009 Apple Inc. All rights reserved.
# Copyright (C) 2011 Daniel Bates (dbates@intudata.com). All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import atexit
import base64
import codecs
import getpass
import os
import os.path
import re
import stat
import sys
import subprocess
import tempfile
import time
import webkitpy.thirdparty.unittest2 as unittest
import urllib
import shutil
from datetime import date
from webkitpy.common.checkout.checkout import Checkout
from webkitpy.common.system.executive import Executive, ScriptError
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.executive_mock import MockExecutive
from .git import Git, AmbiguousCommitError
from .detection import detect_scm_system
from .scm import SCM, CheckoutNeedsUpdate, commit_error_handler, AuthenticationError
from .svn import SVN
# We cache the mock SVN repo so that we don't create it again for each call to an SVNTest or GitTest test_ method.
# We store it in a global variable so that we can delete this cached repo on exit(3).
# FIXME: Remove this once we migrate to Python 2.7. Unittest in Python 2.7 supports module-specific setup and teardown functions.
cached_svn_repo_path = None
def remove_dir(path):
# Change directory to / to ensure that we aren't in the directory we want to delete.
os.chdir('/')
shutil.rmtree(path)
# FIXME: Remove this once we migrate to Python 2.7. Unittest in Python 2.7 supports module-specific setup and teardown functions.
@atexit.register
def delete_cached_mock_repo_at_exit():
if cached_svn_repo_path:
remove_dir(cached_svn_repo_path)
# Eventually we will want to write tests which work for both scms. (like update_webkit, changed_files, etc.)
# Perhaps through some SCMTest base-class which both SVNTest and GitTest inherit from.
def run_command(*args, **kwargs):
# FIXME: This should not be a global static.
# New code should use Executive.run_command directly instead
return Executive().run_command(*args, **kwargs)
# FIXME: This should be unified into one of the executive.py commands!
# Callers could use run_and_throw_if_fail(args, cwd=cwd, quiet=True)
def run_silent(args, cwd=None):
# Note: Not thread safe: http://bugs.python.org/issue2320
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
process.communicate() # ignore output
exit_code = process.wait()
if exit_code:
raise ScriptError('Failed to run "%s" exit_code: %d cwd: %s' % (args, exit_code, cwd))
def write_into_file_at_path(file_path, contents, encoding="utf-8"):
if encoding:
with codecs.open(file_path, "w", encoding) as file:
file.write(contents)
else:
with open(file_path, "w") as file:
file.write(contents)
def read_from_path(file_path, encoding="utf-8"):
with codecs.open(file_path, "r", encoding) as file:
return file.read()
def _make_diff(command, *args):
# We use this wrapper to disable output decoding. diffs should be treated as
# binary files since they may include text files of multiple differnet encodings.
# FIXME: This should use an Executive.
return run_command([command, "diff"] + list(args), decode_output=False)
def _svn_diff(*args):
return _make_diff("svn", *args)
def _git_diff(*args):
return _make_diff("git", *args)
# Exists to share svn repository creation code between the git and svn tests
class SVNTestRepository(object):
@classmethod
def _svn_add(cls, path):
run_command(["svn", "add", path])
@classmethod
def _svn_commit(cls, message):
run_command(["svn", "commit", "--quiet", "--message", message])
@classmethod
def _setup_test_commits(cls, svn_repo_url):
svn_checkout_path = tempfile.mkdtemp(suffix="svn_test_checkout")
run_command(['svn', 'checkout', '--quiet', svn_repo_url, svn_checkout_path])
# Add some test commits
os.chdir(svn_checkout_path)
write_into_file_at_path("test_file", "test1")
cls._svn_add("test_file")
cls._svn_commit("initial commit")
write_into_file_at_path("test_file", "test1test2")
# This used to be the last commit, but doing so broke
# GitTest.test_apply_git_patch which use the inverse diff of the last commit.
# svn-apply fails to remove directories in Git, see:
# https://bugs.webkit.org/show_bug.cgi?id=34871
os.mkdir("test_dir")
# Slash should always be the right path separator since we use cygwin on Windows.
test_file3_path = "test_dir/test_file3"
write_into_file_at_path(test_file3_path, "third file")
cls._svn_add("test_dir")
cls._svn_commit("second commit")
write_into_file_at_path("test_file", "test1test2test3\n")
write_into_file_at_path("test_file2", "second file")
cls._svn_add("test_file2")
cls._svn_commit("third commit")
# This 4th commit is used to make sure that our patch file handling
# code correctly treats patches as binary and does not attempt to
# decode them assuming they're utf-8.
write_into_file_at_path("test_file", u"latin1 test: \u00A0\n", "latin1")
write_into_file_at_path("test_file2", u"utf-8 test: \u00A0\n", "utf-8")
cls._svn_commit("fourth commit")
# svn does not seem to update after commit as I would expect.
run_command(['svn', 'update'])
remove_dir(svn_checkout_path)
# This is a hot function since it's invoked by unittest before calling each test_ method in SVNTest and
# GitTest. We create a mock SVN repo once and then perform an SVN checkout from a filesystem copy of
# it since it's expensive to create the mock repo.
@classmethod
def setup(cls, test_object):
global cached_svn_repo_path
if not cached_svn_repo_path:
cached_svn_repo_path = cls._setup_mock_repo()
test_object.temp_directory = tempfile.mkdtemp(suffix="svn_test")
test_object.svn_repo_path = os.path.join(test_object.temp_directory, "repo")
test_object.svn_repo_url = "file://%s" % test_object.svn_repo_path
test_object.svn_checkout_path = os.path.join(test_object.temp_directory, "checkout")
shutil.copytree(cached_svn_repo_path, test_object.svn_repo_path)
run_command(['svn', 'checkout', '--quiet', test_object.svn_repo_url + "/trunk", test_object.svn_checkout_path])
@classmethod
def _setup_mock_repo(cls):
# Create an test SVN repository
svn_repo_path = tempfile.mkdtemp(suffix="svn_test_repo")
svn_repo_url = "file://%s" % svn_repo_path # Not sure this will work on windows
# git svn complains if we don't pass --pre-1.5-compatible, not sure why:
# Expected FS format '2'; found format '3' at /usr/local/libexec/git-core//git-svn line 1477
run_command(['svnadmin', 'create', '--pre-1.5-compatible', svn_repo_path])
# Create a test svn checkout
svn_checkout_path = tempfile.mkdtemp(suffix="svn_test_checkout")
run_command(['svn', 'checkout', '--quiet', svn_repo_url, svn_checkout_path])
# Create and checkout a trunk dir to match the standard svn configuration to match git-svn's expectations
os.chdir(svn_checkout_path)
os.mkdir('trunk')
cls._svn_add('trunk')
# We can add tags and branches as well if we ever need to test those.
cls._svn_commit('add trunk')
# Change directory out of the svn checkout so we can delete the checkout directory.
remove_dir(svn_checkout_path)
cls._setup_test_commits(svn_repo_url + "/trunk")
return svn_repo_path
@classmethod
def tear_down(cls, test_object):
remove_dir(test_object.temp_directory)
# Now that we've deleted the checkout paths, cwddir may be invalid
# Change back to a valid directory so that later calls to os.getcwd() do not fail.
if os.path.isabs(__file__):
path = os.path.dirname(__file__)
else:
path = sys.path[0]
os.chdir(detect_scm_system(path).checkout_root)
# For testing the SCM baseclass directly.
class SCMClassTests(unittest.TestCase):
def setUp(self):
self.dev_null = open(os.devnull, "w") # Used to make our Popen calls quiet.
def tearDown(self):
self.dev_null.close()
def test_run_command_with_pipe(self):
input_process = subprocess.Popen(['echo', 'foo\nbar'], stdout=subprocess.PIPE, stderr=self.dev_null)
self.assertEqual(run_command(['grep', 'bar'], input=input_process.stdout), "bar\n")
# Test the non-pipe case too:
self.assertEqual(run_command(['grep', 'bar'], input="foo\nbar"), "bar\n")
command_returns_non_zero = ['/bin/sh', '--invalid-option']
# Test when the input pipe process fails.
input_process = subprocess.Popen(command_returns_non_zero, stdout=subprocess.PIPE, stderr=self.dev_null)
self.assertNotEqual(input_process.poll(), 0)
self.assertRaises(ScriptError, run_command, ['grep', 'bar'], input=input_process.stdout)
# Test when the run_command process fails.
input_process = subprocess.Popen(['echo', 'foo\nbar'], stdout=subprocess.PIPE, stderr=self.dev_null) # grep shows usage and calls exit(2) when called w/o arguments.
self.assertRaises(ScriptError, run_command, command_returns_non_zero, input=input_process.stdout)
def test_error_handlers(self):
git_failure_message="Merge conflict during commit: Your file or directory 'WebCore/ChangeLog' is probably out-of-date: resource out of date; try updating at /usr/local/libexec/git-core//git-svn line 469"
svn_failure_message="""svn: Commit failed (details follow):
svn: File or directory 'ChangeLog' is out of date; try updating
svn: resource out of date; try updating
"""
command_does_not_exist = ['does_not_exist', 'invalid_option']
self.assertRaises(OSError, run_command, command_does_not_exist)
self.assertRaises(OSError, run_command, command_does_not_exist, error_handler=Executive.ignore_error)
command_returns_non_zero = ['/bin/sh', '--invalid-option']
self.assertRaises(ScriptError, run_command, command_returns_non_zero)
# Check if returns error text:
self.assertTrue(run_command(command_returns_non_zero, error_handler=Executive.ignore_error))
self.assertRaises(CheckoutNeedsUpdate, commit_error_handler, ScriptError(output=git_failure_message))
self.assertRaises(CheckoutNeedsUpdate, commit_error_handler, ScriptError(output=svn_failure_message))
self.assertRaises(ScriptError, commit_error_handler, ScriptError(output='blah blah blah'))
# GitTest and SVNTest inherit from this so any test_ methods here will be run once for this class and then once for each subclass.
class SCMTest(unittest.TestCase):
def _create_patch(self, patch_contents):
# FIXME: This code is brittle if the Attachment API changes.
attachment = Attachment({"bug_id": 12345}, None)
attachment.contents = lambda: patch_contents
joe_cool = Committer("Joe Cool", "joe@cool.com")
attachment.reviewer = lambda: joe_cool
return attachment
def _setup_webkittools_scripts_symlink(self, local_scm):
webkit_scm = detect_scm_system(os.path.dirname(os.path.abspath(__file__)))
webkit_scripts_directory = webkit_scm.scripts_directory()
local_scripts_directory = local_scm.scripts_directory()
os.mkdir(os.path.dirname(local_scripts_directory))
os.symlink(webkit_scripts_directory, local_scripts_directory)
# Tests which both GitTest and SVNTest should run.
# FIXME: There must be a simpler way to add these w/o adding a wrapper method to both subclasses
def _shared_test_changed_files(self):
write_into_file_at_path("test_file", "changed content")
self.assertItemsEqual(self.scm.changed_files(), ["test_file"])
write_into_file_at_path("test_dir/test_file3", "new stuff")
self.assertItemsEqual(self.scm.changed_files(), ["test_dir/test_file3", "test_file"])
old_cwd = os.getcwd()
os.chdir("test_dir")
# Validate that changed_files does not change with our cwd, see bug 37015.
self.assertItemsEqual(self.scm.changed_files(), ["test_dir/test_file3", "test_file"])
os.chdir(old_cwd)
def _shared_test_added_files(self):
write_into_file_at_path("test_file", "changed content")
self.assertItemsEqual(self.scm.added_files(), [])
write_into_file_at_path("added_file", "new stuff")
self.scm.add("added_file")
write_into_file_at_path("added_file3", "more new stuff")
write_into_file_at_path("added_file4", "more new stuff")
self.scm.add_list(["added_file3", "added_file4"])
os.mkdir("added_dir")
write_into_file_at_path("added_dir/added_file2", "new stuff")
self.scm.add("added_dir")
# SVN reports directory changes, Git does not.
added_files = self.scm.added_files()
if "added_dir" in added_files:
added_files.remove("added_dir")
self.assertItemsEqual(added_files, ["added_dir/added_file2", "added_file", "added_file3", "added_file4"])
# Test also to make sure discard_working_directory_changes removes added files
self.scm.discard_working_directory_changes()
self.assertItemsEqual(self.scm.added_files(), [])
self.assertFalse(os.path.exists("added_file"))
self.assertFalse(os.path.exists("added_file3"))
self.assertFalse(os.path.exists("added_file4"))
self.assertFalse(os.path.exists("added_dir"))
def _shared_test_changed_files_for_revision(self):
# SVN reports directory changes, Git does not.
changed_files = self.scm.changed_files_for_revision(3)
if "test_dir" in changed_files:
changed_files.remove("test_dir")
self.assertItemsEqual(changed_files, ["test_dir/test_file3", "test_file"])
self.assertItemsEqual(self.scm.changed_files_for_revision(4), ["test_file", "test_file2"]) # Git and SVN return different orders.
self.assertItemsEqual(self.scm.changed_files_for_revision(2), ["test_file"])
def _shared_test_contents_at_revision(self):
self.assertEqual(self.scm.contents_at_revision("test_file", 3), "test1test2")
self.assertEqual(self.scm.contents_at_revision("test_file", 4), "test1test2test3\n")
# Verify that contents_at_revision returns a byte array, aka str():
self.assertEqual(self.scm.contents_at_revision("test_file", 5), u"latin1 test: \u00A0\n".encode("latin1"))
self.assertEqual(self.scm.contents_at_revision("test_file2", 5), u"utf-8 test: \u00A0\n".encode("utf-8"))
self.assertEqual(self.scm.contents_at_revision("test_file2", 4), "second file")
# Files which don't exist:
# Currently we raise instead of returning None because detecting the difference between
# "file not found" and any other error seems impossible with svn (git seems to expose such through the return code).
self.assertRaises(ScriptError, self.scm.contents_at_revision, "test_file2", 2)
self.assertRaises(ScriptError, self.scm.contents_at_revision, "does_not_exist", 2)
def _shared_test_revisions_changing_file(self):
self.assertItemsEqual(self.scm.revisions_changing_file("test_file"), [5, 4, 3, 2])
self.assertRaises(ScriptError, self.scm.revisions_changing_file, "non_existent_file")
def _shared_test_committer_email_for_revision(self):
self.assertEqual(self.scm.committer_email_for_revision(3), getpass.getuser()) # Committer "email" will be the current user
def _shared_test_reverse_diff(self):
self._setup_webkittools_scripts_symlink(self.scm) # Git's apply_reverse_diff uses resolve-ChangeLogs
# Only test the simple case, as any other will end up with conflict markers.
self.scm.apply_reverse_diff('5')
self.assertEqual(read_from_path('test_file'), "test1test2test3\n")
def _shared_test_diff_for_revision(self):
# Patch formats are slightly different between svn and git, so just regexp for things we know should be there.
r3_patch = self.scm.diff_for_revision(4)
self.assertRegexpMatches(r3_patch, 'test3')
self.assertNotRegexpMatches(r3_patch, 'test4')
self.assertRegexpMatches(r3_patch, 'test2')
self.assertRegexpMatches(self.scm.diff_for_revision(3), 'test2')
def _shared_test_svn_apply_git_patch(self):
self._setup_webkittools_scripts_symlink(self.scm)
git_binary_addition = """diff --git a/fizzbuzz7.gif b/fizzbuzz7.gif
new file mode 100644
index 0000000000000000000000000000000000000000..64a9532e7794fcd791f6f12157406d90
60151690
GIT binary patch
literal 512
zcmZ?wbhEHbRAx|MU|?iW{Kxc~?KofD;ckY;H+&5HnHl!!GQMD7h+sU{_)e9f^V3c?
zhJP##HdZC#4K}7F68@!1jfWQg2daCm-gs#3|JREDT>c+pG4L<_2;w##WMO#ysPPap
zLqpAf1OE938xAsSp4!5f-o><?VKe(#0jEcwfHGF4%M1^kRs14oVBp2ZEL{E1N<-zJ
zsfLmOtKta;2_;2c#^S1-8cf<nb!QnGl>c!Xe6RXvrEtAWBvSDTgTO1j3vA31Puw!A
zs(87q)j_mVDTqBo-P+03-P5mHCEnJ+x}YdCuS7#bCCyePUe(ynK+|4b-3qK)T?Z&)
zYG+`tl4h?GZv_$t82}X4*DTE|$;{DEiPyF@)U-1+FaX++T9H{&%cag`W1|zVP@`%b
zqiSkp6{BTpWTkCr!=<C6Q=?#~R8^JfrliAF6Q^gV9Iup8RqCXqqhqC`qsyhk<-nlB
z00f{QZvfK&|Nm#oZ0TQl`Yr$BIa6A@16O26ud7H<QM=xl`toLKnz-3h@9c9q&wm|X
z{89I|WPyD!*M?gv?q`;L=2YFeXrJQNti4?}s!zFo=5CzeBxC69xA<zrjP<wUcCRh4
ptUl-ZG<%a~#LwkIWv&q!KSCH7tQ8cJDiw+|GV?MN)RjY50RTb-xvT&H
literal 0
HcmV?d00001
"""
self.checkout.apply_patch(self._create_patch(git_binary_addition))
added = read_from_path('fizzbuzz7.gif', encoding=None)
self.assertEqual(512, len(added))
self.assertTrue(added.startswith('GIF89a'))
self.assertIn('fizzbuzz7.gif', self.scm.changed_files())
# The file already exists.
self.assertRaises(ScriptError, self.checkout.apply_patch, self._create_patch(git_binary_addition))
git_binary_modification = """diff --git a/fizzbuzz7.gif b/fizzbuzz7.gif
index 64a9532e7794fcd791f6f12157406d9060151690..323fae03f4606ea9991df8befbb2fca7
GIT binary patch
literal 7
OcmYex&reD$;sO8*F9L)B
literal 512
zcmZ?wbhEHbRAx|MU|?iW{Kxc~?KofD;ckY;H+&5HnHl!!GQMD7h+sU{_)e9f^V3c?
zhJP##HdZC#4K}7F68@!1jfWQg2daCm-gs#3|JREDT>c+pG4L<_2;w##WMO#ysPPap
zLqpAf1OE938xAsSp4!5f-o><?VKe(#0jEcwfHGF4%M1^kRs14oVBp2ZEL{E1N<-zJ
zsfLmOtKta;2_;2c#^S1-8cf<nb!QnGl>c!Xe6RXvrEtAWBvSDTgTO1j3vA31Puw!A
zs(87q)j_mVDTqBo-P+03-P5mHCEnJ+x}YdCuS7#bCCyePUe(ynK+|4b-3qK)T?Z&)
zYG+`tl4h?GZv_$t82}X4*DTE|$;{DEiPyF@)U-1+FaX++T9H{&%cag`W1|zVP@`%b
zqiSkp6{BTpWTkCr!=<C6Q=?#~R8^JfrliAF6Q^gV9Iup8RqCXqqhqC`qsyhk<-nlB
z00f{QZvfK&|Nm#oZ0TQl`Yr$BIa6A@16O26ud7H<QM=xl`toLKnz-3h@9c9q&wm|X
z{89I|WPyD!*M?gv?q`;L=2YFeXrJQNti4?}s!zFo=5CzeBxC69xA<zrjP<wUcCRh4
ptUl-ZG<%a~#LwkIWv&q!KSCH7tQ8cJDiw+|GV?MN)RjY50RTb-xvT&H
"""
self.checkout.apply_patch(self._create_patch(git_binary_modification))
modified = read_from_path('fizzbuzz7.gif', encoding=None)
self.assertEqual('foobar\n', modified)
self.assertIn('fizzbuzz7.gif', self.scm.changed_files())
# Applying the same modification should fail.
self.assertRaises(ScriptError, self.checkout.apply_patch, self._create_patch(git_binary_modification))
git_binary_deletion = """diff --git a/fizzbuzz7.gif b/fizzbuzz7.gif
deleted file mode 100644
index 323fae0..0000000
GIT binary patch
literal 0
HcmV?d00001
literal 7
OcmYex&reD$;sO8*F9L)B
"""
self.checkout.apply_patch(self._create_patch(git_binary_deletion))
self.assertFalse(os.path.exists('fizzbuzz7.gif'))
self.assertNotIn('fizzbuzz7.gif', self.scm.changed_files())
# Cannot delete again.
self.assertRaises(ScriptError, self.checkout.apply_patch, self._create_patch(git_binary_deletion))
def _shared_test_add_recursively(self):
os.mkdir("added_dir")
write_into_file_at_path("added_dir/added_file", "new stuff")
self.scm.add("added_dir/added_file")
self.assertIn("added_dir/added_file", self.scm.added_files())
def _shared_test_delete_recursively(self):
os.mkdir("added_dir")
write_into_file_at_path("added_dir/added_file", "new stuff")
self.scm.add("added_dir/added_file")
self.assertIn("added_dir/added_file", self.scm.added_files())
self.scm.delete("added_dir/added_file")
self.assertNotIn("added_dir", self.scm.added_files())
def _shared_test_delete_recursively_or_not(self):
os.mkdir("added_dir")
write_into_file_at_path("added_dir/added_file", "new stuff")
write_into_file_at_path("added_dir/another_added_file", "more new stuff")
self.scm.add("added_dir/added_file")
self.scm.add("added_dir/another_added_file")
self.assertIn("added_dir/added_file", self.scm.added_files())
self.assertIn("added_dir/another_added_file", self.scm.added_files())
self.scm.delete("added_dir/added_file")
self.assertIn("added_dir/another_added_file", self.scm.added_files())
def _shared_test_exists(self, scm, commit_function):
os.chdir(scm.checkout_root)
self.assertFalse(scm.exists('foo.txt'))
write_into_file_at_path('foo.txt', 'some stuff')
self.assertFalse(scm.exists('foo.txt'))
scm.add('foo.txt')
commit_function('adding foo')
self.assertTrue(scm.exists('foo.txt'))
scm.delete('foo.txt')
commit_function('deleting foo')
self.assertFalse(scm.exists('foo.txt'))
def _shared_test_head_svn_revision(self):
self.assertEqual(self.scm.head_svn_revision(), '5')
def _shared_test_move(self):
write_into_file_at_path('added_file', 'new stuff')
self.scm.add('added_file')
self.scm.move('added_file', 'moved_file')
self.assertIn('moved_file', self.scm.added_files())
def _shared_test_move_recursive(self):
os.mkdir("added_dir")
write_into_file_at_path('added_dir/added_file', 'new stuff')
write_into_file_at_path('added_dir/another_added_file', 'more new stuff')
self.scm.add('added_dir')
self.scm.move('added_dir', 'moved_dir')
self.assertIn('moved_dir/added_file', self.scm.added_files())
self.assertIn('moved_dir/another_added_file', self.scm.added_files())
# Context manager that overrides the current timezone.
class TimezoneOverride(object):
def __init__(self, timezone_string):
self._timezone_string = timezone_string
def __enter__(self):
if hasattr(time, 'tzset'):
self._saved_timezone = os.environ.get('TZ', None)
os.environ['TZ'] = self._timezone_string
time.tzset()
def __exit__(self, type, value, traceback):
if hasattr(time, 'tzset'):
if self._saved_timezone:
os.environ['TZ'] = self._saved_timezone
else:
del os.environ['TZ']
time.tzset()
class SVNTest(SCMTest):
@staticmethod
def _set_date_and_reviewer(changelog_entry):
# Joe Cool matches the reviewer set in SCMTest._create_patch
changelog_entry = changelog_entry.replace('REVIEWER_HERE', 'Joe Cool')
# svn-apply will update ChangeLog entries with today's date (as in Cupertino, CA, US)
with TimezoneOverride('PST8PDT'):
return changelog_entry.replace('DATE_HERE', date.today().isoformat())
def test_svn_apply(self):
first_entry = """2009-10-26 Eric Seidel <eric@webkit.org>
Reviewed by Foo Bar.
Most awesome change ever.
* scm_unittest.py:
"""
intermediate_entry = """2009-10-27 Eric Seidel <eric@webkit.org>
Reviewed by Baz Bar.
A more awesomer change yet!
* scm_unittest.py:
"""
one_line_overlap_patch = """Index: ChangeLog
===================================================================
--- ChangeLog (revision 5)
+++ ChangeLog (working copy)
@@ -1,5 +1,13 @@
2009-10-26 Eric Seidel <eric@webkit.org>
%(whitespace)s
+ Reviewed by NOBODY (OOPS!).
+
+ Second most awesome change ever.
+
+ * scm_unittest.py:
+
+2009-10-26 Eric Seidel <eric@webkit.org>
+
Reviewed by Foo Bar.
%(whitespace)s
Most awesome change ever.
""" % {'whitespace': ' '}
one_line_overlap_entry = """DATE_HERE Eric Seidel <eric@webkit.org>
Reviewed by REVIEWER_HERE.
Second most awesome change ever.
* scm_unittest.py:
"""
two_line_overlap_patch = """Index: ChangeLog
===================================================================
--- ChangeLog (revision 5)
+++ ChangeLog (working copy)
@@ -2,6 +2,14 @@
%(whitespace)s
Reviewed by Foo Bar.
%(whitespace)s
+ Second most awesome change ever.
+
+ * scm_unittest.py:
+
+2009-10-26 Eric Seidel <eric@webkit.org>
+
+ Reviewed by Foo Bar.
+
Most awesome change ever.
%(whitespace)s
* scm_unittest.py:
""" % {'whitespace': ' '}
two_line_overlap_entry = """DATE_HERE Eric Seidel <eric@webkit.org>
Reviewed by Foo Bar.
Second most awesome change ever.
* scm_unittest.py:
"""
write_into_file_at_path('ChangeLog', first_entry)
run_command(['svn', 'add', 'ChangeLog'])
run_command(['svn', 'commit', '--quiet', '--message', 'ChangeLog commit'])
# Patch files were created against just 'first_entry'.
# Add a second commit to make svn-apply have to apply the patches with fuzz.
changelog_contents = "%s\n%s" % (intermediate_entry, first_entry)
write_into_file_at_path('ChangeLog', changelog_contents)
run_command(['svn', 'commit', '--quiet', '--message', 'Intermediate commit'])
self._setup_webkittools_scripts_symlink(self.scm)
self.checkout.apply_patch(self._create_patch(one_line_overlap_patch))
expected_changelog_contents = "%s\n%s" % (self._set_date_and_reviewer(one_line_overlap_entry), changelog_contents)
self.assertEqual(read_from_path('ChangeLog'), expected_changelog_contents)
self.scm.revert_files(['ChangeLog'])
self.checkout.apply_patch(self._create_patch(two_line_overlap_patch))
expected_changelog_contents = "%s\n%s" % (self._set_date_and_reviewer(two_line_overlap_entry), changelog_contents)
self.assertEqual(read_from_path('ChangeLog'), expected_changelog_contents)
def setUp(self):
SVNTestRepository.setup(self)
os.chdir(self.svn_checkout_path)
self.scm = detect_scm_system(self.svn_checkout_path)
self.scm.svn_server_realm = None
# For historical reasons, we test some checkout code here too.
self.checkout = Checkout(self.scm)
def tearDown(self):
SVNTestRepository.tear_down(self)
def test_detect_scm_system_relative_url(self):
scm = detect_scm_system(".")
# I wanted to assert that we got the right path, but there was some
# crazy magic with temp folder names that I couldn't figure out.
self.assertTrue(scm.checkout_root)
def test_create_patch_is_full_patch(self):
test_dir_path = os.path.join(self.svn_checkout_path, "test_dir2")
os.mkdir(test_dir_path)
test_file_path = os.path.join(test_dir_path, 'test_file2')
write_into_file_at_path(test_file_path, 'test content')
run_command(['svn', 'add', 'test_dir2'])
# create_patch depends on 'svn-create-patch', so make a dummy version.
scripts_path = os.path.join(self.svn_checkout_path, 'Tools', 'Scripts')
os.makedirs(scripts_path)
create_patch_path = os.path.join(scripts_path, 'svn-create-patch')
write_into_file_at_path(create_patch_path, '#!/bin/sh\necho $PWD') # We could pass -n to prevent the \n, but not all echo accept -n.
os.chmod(create_patch_path, stat.S_IXUSR | stat.S_IRUSR)
# Change into our test directory and run the create_patch command.
os.chdir(test_dir_path)
scm = detect_scm_system(test_dir_path)
self.assertEqual(scm.checkout_root, self.svn_checkout_path) # Sanity check that detection worked right.
patch_contents = scm.create_patch()
# Our fake 'svn-create-patch' returns $PWD instead of a patch, check that it was executed from the root of the repo.
self.assertEqual("%s\n" % os.path.realpath(scm.checkout_root), patch_contents) # Add a \n because echo adds a \n.
def test_detection(self):
self.assertEqual(self.scm.display_name(), "svn")
self.assertEqual(self.scm.supports_local_commits(), False)
def test_apply_small_binary_patch(self):
patch_contents = """Index: test_file.swf
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes on: test_file.swf
___________________________________________________________________
Name: svn:mime-type
+ application/octet-stream
Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
"""
expected_contents = base64.b64decode("Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==")
self._setup_webkittools_scripts_symlink(self.scm)
patch_file = self._create_patch(patch_contents)
self.checkout.apply_patch(patch_file)
actual_contents = read_from_path("test_file.swf", encoding=None)
self.assertEqual(actual_contents, expected_contents)
def test_apply_svn_patch(self):
patch = self._create_patch(_svn_diff("-r5:4"))
self._setup_webkittools_scripts_symlink(self.scm)
Checkout(self.scm).apply_patch(patch)
def test_commit_logs(self):
# Commits have dates and usernames in them, so we can't just direct compare.
self.assertRegexpMatches(self.scm.last_svn_commit_log(), 'fourth commit')
self.assertRegexpMatches(self.scm.svn_commit_log(3), 'second commit')
def _shared_test_commit_with_message(self, username=None):
write_into_file_at_path('test_file', 'more test content')
commit_text = self.scm.commit_with_message("another test commit", username)
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
def test_commit_in_subdir(self, username=None):
write_into_file_at_path('test_dir/test_file3', 'more test content')
os.chdir("test_dir")
commit_text = self.scm.commit_with_message("another test commit", username)
os.chdir("..")
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
def test_commit_text_parsing(self):
self._shared_test_commit_with_message()
def test_commit_with_username(self):
self._shared_test_commit_with_message("dbates@webkit.org")
def test_commit_without_authorization(self):
# FIXME: https://bugs.webkit.org/show_bug.cgi?id=111669
# This test ends up looking in the actal $HOME/.subversion for authorization,
# which makes it fragile. For now, set it to use a realm that won't be authorized,
# but we should really plumb through a fake_home_dir here like we do in
# test_has_authorization_for_realm.
self.scm.svn_server_realm = '<http://svn.example.com:80> Example'
self.assertRaises(AuthenticationError, self._shared_test_commit_with_message)
def test_has_authorization_for_realm_using_credentials_with_passtype(self):
credentials = """
K 8
passtype
V 8
keychain
K 15
svn:realmstring
V 39
<http://svn.webkit.org:80> Mac OS Forge
K 8
username
V 17
dbates@webkit.org
END
"""
self.assertTrue(self._test_has_authorization_for_realm_using_credentials(SVN.svn_server_realm, credentials))
def test_has_authorization_for_realm_using_credentials_with_password(self):
credentials = """
K 15
svn:realmstring
V 39
<http://svn.webkit.org:80> Mac OS Forge
K 8
username
V 17
dbates@webkit.org
K 8
password
V 4
blah
END
"""
self.assertTrue(self._test_has_authorization_for_realm_using_credentials(SVN.svn_server_realm, credentials))
def _test_has_authorization_for_realm_using_credentials(self, realm, credentials):
fake_home_dir = tempfile.mkdtemp(suffix="fake_home_dir")
svn_config_dir_path = os.path.join(fake_home_dir, ".subversion")
os.mkdir(svn_config_dir_path)
fake_webkit_auth_file = os.path.join(svn_config_dir_path, "fake_webkit_auth_file")
write_into_file_at_path(fake_webkit_auth_file, credentials)
result = self.scm.has_authorization_for_realm(realm, home_directory=fake_home_dir)
os.remove(fake_webkit_auth_file)
os.rmdir(svn_config_dir_path)
os.rmdir(fake_home_dir)
return result
def test_not_have_authorization_for_realm_with_credentials_missing_password_and_passtype(self):
credentials = """
K 15
svn:realmstring
V 39
<http://svn.webkit.org:80> Mac OS Forge
K 8
username
V 17
dbates@webkit.org
END
"""
self.assertFalse(self._test_has_authorization_for_realm_using_credentials(SVN.svn_server_realm, credentials))
def test_not_have_authorization_for_realm_when_missing_credentials_file(self):
fake_home_dir = tempfile.mkdtemp(suffix="fake_home_dir")
svn_config_dir_path = os.path.join(fake_home_dir, ".subversion")
os.mkdir(svn_config_dir_path)
self.assertFalse(self.scm.has_authorization_for_realm(SVN.svn_server_realm, home_directory=fake_home_dir))
os.rmdir(svn_config_dir_path)
os.rmdir(fake_home_dir)
def test_reverse_diff(self):
self._shared_test_reverse_diff()
def test_diff_for_revision(self):
self._shared_test_diff_for_revision()
def test_svn_apply_git_patch(self):
self._shared_test_svn_apply_git_patch()
def test_changed_files(self):
self._shared_test_changed_files()
def test_changed_files_for_revision(self):
self._shared_test_changed_files_for_revision()
def test_added_files(self):
self._shared_test_added_files()
def test_contents_at_revision(self):
self._shared_test_contents_at_revision()
def test_revisions_changing_file(self):
self._shared_test_revisions_changing_file()
def test_committer_email_for_revision(self):
self._shared_test_committer_email_for_revision()
def test_add_recursively(self):
self._shared_test_add_recursively()
def test_delete(self):
os.chdir(self.svn_checkout_path)
self.scm.delete("test_file")
self.assertIn("test_file", self.scm.deleted_files())
def test_delete_list(self):
os.chdir(self.svn_checkout_path)
self.scm.delete_list(["test_file", "test_file2"])
self.assertIn("test_file", self.scm.deleted_files())
self.assertIn("test_file2", self.scm.deleted_files())
def test_delete_recursively(self):
self._shared_test_delete_recursively()
def test_delete_recursively_or_not(self):
self._shared_test_delete_recursively_or_not()
def test_head_svn_revision(self):
self._shared_test_head_svn_revision()
def test_move(self):
self._shared_test_move()
def test_move_recursive(self):
self._shared_test_move_recursive()
def test_propset_propget(self):
filepath = os.path.join(self.svn_checkout_path, "test_file")
expected_mime_type = "x-application/foo-bar"
self.scm.propset("svn:mime-type", expected_mime_type, filepath)
self.assertEqual(expected_mime_type, self.scm.propget("svn:mime-type", filepath))
def test_show_head(self):
write_into_file_at_path("test_file", u"Hello!", "utf-8")
SVNTestRepository._svn_commit("fourth commit")
self.assertEqual("Hello!", self.scm.show_head('test_file'))
def test_show_head_binary(self):
data = "\244"
write_into_file_at_path("binary_file", data, encoding=None)
self.scm.add("binary_file")
self.scm.commit_with_message("a test commit")
self.assertEqual(data, self.scm.show_head('binary_file'))
def do_test_diff_for_file(self):
write_into_file_at_path('test_file', 'some content')
self.scm.commit_with_message("a test commit")
diff = self.scm.diff_for_file('test_file')
self.assertEqual(diff, "")
write_into_file_at_path("test_file", "changed content")
diff = self.scm.diff_for_file('test_file')
self.assertIn("-some content", diff)
self.assertIn("+changed content", diff)
def clean_bogus_dir(self):
self.bogus_dir = self.scm._bogus_dir_name()
if os.path.exists(self.bogus_dir):
shutil.rmtree(self.bogus_dir)
def test_diff_for_file_with_existing_bogus_dir(self):
self.clean_bogus_dir()
os.mkdir(self.bogus_dir)
self.do_test_diff_for_file()
self.assertTrue(os.path.exists(self.bogus_dir))
shutil.rmtree(self.bogus_dir)
def test_diff_for_file_with_missing_bogus_dir(self):
self.clean_bogus_dir()
self.do_test_diff_for_file()
self.assertFalse(os.path.exists(self.bogus_dir))
def test_svn_lock(self):
svn_root_lock_path = ".svn/lock"
write_into_file_at_path(svn_root_lock_path, "", "utf-8")
# webkit-patch uses a Checkout object and runs update-webkit, just use svn update here.
self.assertRaises(ScriptError, run_command, ['svn', 'update'])
self.scm.discard_working_directory_changes()
self.assertFalse(os.path.exists(svn_root_lock_path))
run_command(['svn', 'update']) # Should succeed and not raise.
def test_exists(self):
self._shared_test_exists(self.scm, self.scm.commit_with_message)
class GitTest(SCMTest):
def setUp(self):
"""Sets up fresh git repository with one commit. Then setups a second git
repo that tracks the first one."""
# FIXME: We should instead clone a git repo that is tracking an SVN repo.
# That better matches what we do with WebKit.
self.original_dir = os.getcwd()
self.untracking_checkout_path = tempfile.mkdtemp(suffix="git_test_checkout2")
run_command(['git', 'init', self.untracking_checkout_path])
os.chdir(self.untracking_checkout_path)
write_into_file_at_path('foo_file', 'foo')
run_command(['git', 'add', 'foo_file'])
run_command(['git', 'commit', '-am', 'dummy commit'])
self.untracking_scm = detect_scm_system(self.untracking_checkout_path)
self.tracking_git_checkout_path = tempfile.mkdtemp(suffix="git_test_checkout")
run_command(['git', 'clone', '--quiet', self.untracking_checkout_path, self.tracking_git_checkout_path])
os.chdir(self.tracking_git_checkout_path)
self.tracking_scm = detect_scm_system(self.tracking_git_checkout_path)
def tearDown(self):
# Change back to a valid directory so that later calls to os.getcwd() do not fail.
os.chdir(self.original_dir)
run_command(['rm', '-rf', self.tracking_git_checkout_path])
run_command(['rm', '-rf', self.untracking_checkout_path])
def test_remote_branch_ref(self):
self.assertEqual(self.tracking_scm.remote_branch_ref(), 'refs/remotes/origin/master')
os.chdir(self.untracking_checkout_path)
self.assertRaises(ScriptError, self.untracking_scm.remote_branch_ref)
def test_multiple_remotes(self):
run_command(['git', 'config', '--add', 'svn-remote.svn.fetch', 'trunk:remote1'])
run_command(['git', 'config', '--add', 'svn-remote.svn.fetch', 'trunk:remote2'])
self.assertEqual(self.tracking_scm.remote_branch_ref(), 'remote1')
def test_create_patch(self):
write_into_file_at_path('test_file_commit1', 'contents')
run_command(['git', 'add', 'test_file_commit1'])
scm = self.tracking_scm
scm.commit_locally_with_message('message')
patch = scm.create_patch()
self.assertNotRegexpMatches(patch, r'Subversion Revision:')
def test_orderfile(self):
os.mkdir("Tools")
os.mkdir("Source")
os.mkdir("LayoutTests")
os.mkdir("Websites")
# Slash should always be the right path separator since we use cygwin on Windows.
Tools_ChangeLog = "Tools/ChangeLog"
write_into_file_at_path(Tools_ChangeLog, "contents")
Source_ChangeLog = "Source/ChangeLog"
write_into_file_at_path(Source_ChangeLog, "contents")
LayoutTests_ChangeLog = "LayoutTests/ChangeLog"
write_into_file_at_path(LayoutTests_ChangeLog, "contents")
Websites_ChangeLog = "Websites/ChangeLog"
write_into_file_at_path(Websites_ChangeLog, "contents")
Tools_ChangeFile = "Tools/ChangeFile"
write_into_file_at_path(Tools_ChangeFile, "contents")
Source_ChangeFile = "Source/ChangeFile"
write_into_file_at_path(Source_ChangeFile, "contents")
LayoutTests_ChangeFile = "LayoutTests/ChangeFile"
write_into_file_at_path(LayoutTests_ChangeFile, "contents")
Websites_ChangeFile = "Websites/ChangeFile"
write_into_file_at_path(Websites_ChangeFile, "contents")
run_command(['git', 'add', 'Tools/ChangeLog'])
run_command(['git', 'add', 'LayoutTests/ChangeLog'])
run_command(['git', 'add', 'Source/ChangeLog'])
run_command(['git', 'add', 'Websites/ChangeLog'])
run_command(['git', 'add', 'Tools/ChangeFile'])
run_command(['git', 'add', 'LayoutTests/ChangeFile'])
run_command(['git', 'add', 'Source/ChangeFile'])
run_command(['git', 'add', 'Websites/ChangeFile'])
scm = self.tracking_scm
scm.commit_locally_with_message('message')
patch = scm.create_patch()
self.assertTrue(re.search(r'Tools/ChangeLog', patch).start() < re.search(r'Tools/ChangeFile', patch).start())
self.assertTrue(re.search(r'Websites/ChangeLog', patch).start() < re.search(r'Websites/ChangeFile', patch).start())
self.assertTrue(re.search(r'Source/ChangeLog', patch).start() < re.search(r'Source/ChangeFile', patch).start())
self.assertTrue(re.search(r'LayoutTests/ChangeLog', patch).start() < re.search(r'LayoutTests/ChangeFile', patch).start())
self.assertTrue(re.search(r'Source/ChangeLog', patch).start() < re.search(r'LayoutTests/ChangeLog', patch).start())
self.assertTrue(re.search(r'Tools/ChangeLog', patch).start() < re.search(r'LayoutTests/ChangeLog', patch).start())
self.assertTrue(re.search(r'Websites/ChangeLog', patch).start() < re.search(r'LayoutTests/ChangeLog', patch).start())
self.assertTrue(re.search(r'Source/ChangeFile', patch).start() < re.search(r'LayoutTests/ChangeLog', patch).start())
self.assertTrue(re.search(r'Tools/ChangeFile', patch).start() < re.search(r'LayoutTests/ChangeLog', patch).start())
self.assertTrue(re.search(r'Websites/ChangeFile', patch).start() < re.search(r'LayoutTests/ChangeLog', patch).start())
self.assertTrue(re.search(r'Source/ChangeFile', patch).start() < re.search(r'LayoutTests/ChangeFile', patch).start())
self.assertTrue(re.search(r'Tools/ChangeFile', patch).start() < re.search(r'LayoutTests/ChangeFile', patch).start())
self.assertTrue(re.search(r'Websites/ChangeFile', patch).start() < re.search(r'LayoutTests/ChangeFile', patch).start())
def test_exists(self):
scm = self.untracking_scm
self._shared_test_exists(scm, scm.commit_locally_with_message)
def test_head_svn_revision(self):
scm = detect_scm_system(self.untracking_checkout_path)
# If we cloned a git repo tracking an SVN repo, this would give the same result as
# self._shared_test_head_svn_revision().
self.assertEqual(scm.head_svn_revision(), '')
def test_rename_files(self):
scm = self.tracking_scm
scm.move('foo_file', 'bar_file')
scm.commit_locally_with_message('message')
patch = scm.create_patch()
self.assertNotRegexpMatches(patch, r'rename from ')
self.assertNotRegexpMatches(patch, r'rename to ')
class GitSVNTest(SCMTest):
def _setup_git_checkout(self):
self.git_checkout_path = tempfile.mkdtemp(suffix="git_test_checkout")
# --quiet doesn't make git svn silent, so we use run_silent to redirect output
run_silent(['git', 'svn', 'clone', '-T', 'trunk', self.svn_repo_url, self.git_checkout_path])
os.chdir(self.git_checkout_path)
def _tear_down_git_checkout(self):
# Change back to a valid directory so that later calls to os.getcwd() do not fail.
os.chdir(self.original_dir)
run_command(['rm', '-rf', self.git_checkout_path])
def setUp(self):
self.original_dir = os.getcwd()
SVNTestRepository.setup(self)
self._setup_git_checkout()
self.scm = detect_scm_system(self.git_checkout_path)
self.scm.svn_server_realm = None
# For historical reasons, we test some checkout code here too.
self.checkout = Checkout(self.scm)
def tearDown(self):
SVNTestRepository.tear_down(self)
self._tear_down_git_checkout()
def test_detection(self):
self.assertEqual(self.scm.display_name(), "git")
self.assertEqual(self.scm.supports_local_commits(), True)
def test_read_git_config(self):
key = 'test.git-config'
value = 'git-config value'
run_command(['git', 'config', key, value])
self.assertEqual(self.scm.read_git_config(key), value)
def test_local_commits(self):
test_file = os.path.join(self.git_checkout_path, 'test_file')
write_into_file_at_path(test_file, 'foo')
run_command(['git', 'commit', '-a', '-m', 'local commit'])
self.assertEqual(len(self.scm.local_commits()), 1)
def test_discard_local_commits(self):
test_file = os.path.join(self.git_checkout_path, 'test_file')
write_into_file_at_path(test_file, 'foo')
run_command(['git', 'commit', '-a', '-m', 'local commit'])
self.assertEqual(len(self.scm.local_commits()), 1)
self.scm.discard_local_commits()
self.assertEqual(len(self.scm.local_commits()), 0)
def test_delete_branch(self):
new_branch = 'foo'
run_command(['git', 'checkout', '-b', new_branch])
self.assertEqual(run_command(['git', 'symbolic-ref', 'HEAD']).strip(), 'refs/heads/' + new_branch)
run_command(['git', 'checkout', '-b', 'bar'])
self.scm.delete_branch(new_branch)
self.assertNotRegexpMatches(run_command(['git', 'branch']), r'foo')
def test_remote_merge_base(self):
# Diff to merge-base should include working-copy changes,
# which the diff to svn_branch.. doesn't.
test_file = os.path.join(self.git_checkout_path, 'test_file')
write_into_file_at_path(test_file, 'foo')
diff_to_common_base = _git_diff(self.scm.remote_branch_ref() + '..')
diff_to_merge_base = _git_diff(self.scm.remote_merge_base())
self.assertNotRegexpMatches(diff_to_common_base, r'foo')
self.assertRegexpMatches(diff_to_merge_base, r'foo')
def test_rebase_in_progress(self):
svn_test_file = os.path.join(self.svn_checkout_path, 'test_file')
write_into_file_at_path(svn_test_file, "svn_checkout")
run_command(['svn', 'commit', '--message', 'commit to conflict with git commit'], cwd=self.svn_checkout_path)
git_test_file = os.path.join(self.git_checkout_path, 'test_file')
write_into_file_at_path(git_test_file, "git_checkout")
run_command(['git', 'commit', '-a', '-m', 'commit to be thrown away by rebase abort'])
# --quiet doesn't make git svn silent, so use run_silent to redirect output
self.assertRaises(ScriptError, run_silent, ['git', 'svn', '--quiet', 'rebase']) # Will fail due to a conflict leaving us mid-rebase.
self.assertTrue(self.scm.rebase_in_progress())
# Make sure our cleanup works.
self.scm.discard_working_directory_changes()
self.assertFalse(self.scm.rebase_in_progress())
# Make sure cleanup doesn't throw when no rebase is in progress.
self.scm.discard_working_directory_changes()
def test_commitish_parsing(self):
# Multiple revisions are cherry-picked.
self.assertEqual(len(self.scm.commit_ids_from_commitish_arguments(['HEAD~2'])), 1)
self.assertEqual(len(self.scm.commit_ids_from_commitish_arguments(['HEAD', 'HEAD~2'])), 2)
# ... is an invalid range specifier
self.assertRaises(ScriptError, self.scm.commit_ids_from_commitish_arguments, ['trunk...HEAD'])
def test_commitish_order(self):
commit_range = 'HEAD~3..HEAD'
actual_commits = self.scm.commit_ids_from_commitish_arguments([commit_range])
expected_commits = []
expected_commits += reversed(run_command(['git', 'rev-list', commit_range]).splitlines())
self.assertEqual(actual_commits, expected_commits)
def test_apply_git_patch(self):
# We carefullly pick a diff which does not have a directory addition
# as currently svn-apply will error out when trying to remove directories
# in Git: https://bugs.webkit.org/show_bug.cgi?id=34871
patch = self._create_patch(_git_diff('HEAD..HEAD^'))
self._setup_webkittools_scripts_symlink(self.scm)
Checkout(self.scm).apply_patch(patch)
def test_commit_text_parsing(self):
write_into_file_at_path('test_file', 'more test content')
commit_text = self.scm.commit_with_message("another test commit")
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
def test_commit_with_message_working_copy_only(self):
write_into_file_at_path('test_file_commit1', 'more test content')
run_command(['git', 'add', 'test_file_commit1'])
commit_text = self.scm.commit_with_message("yet another test commit")
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertRegexpMatches(svn_log, r'test_file_commit1')
def _local_commit(self, filename, contents, message):
write_into_file_at_path(filename, contents)
run_command(['git', 'add', filename])
self.scm.commit_locally_with_message(message)
def _one_local_commit(self):
self._local_commit('test_file_commit1', 'more test content', 'another test commit')
def _one_local_commit_plus_working_copy_changes(self):
self._one_local_commit()
write_into_file_at_path('test_file_commit2', 'still more test content')
run_command(['git', 'add', 'test_file_commit2'])
def _second_local_commit(self):
self._local_commit('test_file_commit2', 'still more test content', 'yet another test commit')
def _two_local_commits(self):
self._one_local_commit()
self._second_local_commit()
def _three_local_commits(self):
self._local_commit('test_file_commit0', 'more test content', 'another test commit')
self._two_local_commits()
def test_locally_commit_all_working_copy_changes(self):
self._local_commit('test_file', 'test content', 'test commit')
write_into_file_at_path('test_file', 'changed test content')
self.assertTrue(self.scm.has_working_directory_changes())
self.scm.commit_locally_with_message('all working copy changes')
self.assertFalse(self.scm.has_working_directory_changes())
def test_locally_commit_no_working_copy_changes(self):
self._local_commit('test_file', 'test content', 'test commit')
write_into_file_at_path('test_file', 'changed test content')
self.assertTrue(self.scm.has_working_directory_changes())
self.assertRaises(ScriptError, self.scm.commit_locally_with_message, 'no working copy changes', False)
def test_locally_commit_selected_working_copy_changes(self):
self._local_commit('test_file_1', 'test content 1', 'test commit 1')
self._local_commit('test_file_2', 'test content 2', 'test commit 2')
write_into_file_at_path('test_file_1', 'changed test content 1')
write_into_file_at_path('test_file_2', 'changed test content 2')
self.assertTrue(self.scm.has_working_directory_changes())
run_command(['git', 'add', 'test_file_1'])
self.scm.commit_locally_with_message('selected working copy changes', commit_all_working_directory_changes=False)
self.assertTrue(self.scm.has_working_directory_changes())
self.assertTrue(self.scm.diff_for_file('test_file_1') == '')
self.assertFalse(self.scm.diff_for_file('test_file_2') == '')
def test_revisions_changing_files_with_local_commit(self):
self._one_local_commit()
self.assertItemsEqual(self.scm.revisions_changing_file('test_file_commit1'), [])
def test_commit_with_message(self):
self._one_local_commit_plus_working_copy_changes()
self.assertRaises(AmbiguousCommitError, self.scm.commit_with_message, "yet another test commit")
commit_text = self.scm.commit_with_message("yet another test commit", force_squash=True)
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertRegexpMatches(svn_log, r'test_file_commit2')
self.assertRegexpMatches(svn_log, r'test_file_commit1')
def test_commit_with_message_git_commit(self):
self._two_local_commits()
commit_text = self.scm.commit_with_message("another test commit", git_commit="HEAD^")
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertRegexpMatches(svn_log, r'test_file_commit1')
self.assertNotRegexpMatches(svn_log, r'test_file_commit2')
def test_commit_with_message_git_commit_range(self):
self._three_local_commits()
commit_text = self.scm.commit_with_message("another test commit", git_commit="HEAD~2..HEAD")
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertNotRegexpMatches(svn_log, r'test_file_commit0')
self.assertRegexpMatches(svn_log, r'test_file_commit1')
self.assertRegexpMatches(svn_log, r'test_file_commit2')
def test_commit_with_message_only_local_commit(self):
self._one_local_commit()
commit_text = self.scm.commit_with_message("another test commit")
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertRegexpMatches(svn_log, r'test_file_commit1')
def test_commit_with_message_multiple_local_commits_and_working_copy(self):
self._two_local_commits()
write_into_file_at_path('test_file_commit1', 'working copy change')
self.assertRaises(AmbiguousCommitError, self.scm.commit_with_message, "another test commit")
commit_text = self.scm.commit_with_message("another test commit", force_squash=True)
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertRegexpMatches(svn_log, r'test_file_commit2')
self.assertRegexpMatches(svn_log, r'test_file_commit1')
def test_commit_with_message_git_commit_and_working_copy(self):
self._two_local_commits()
write_into_file_at_path('test_file_commit1', 'working copy change')
self.assertRaises(ScriptError, self.scm.commit_with_message, "another test commit", git_commit="HEAD^")
def test_commit_with_message_multiple_local_commits_always_squash(self):
run_command(['git', 'config', 'webkit-patch.commit-should-always-squash', 'true'])
self._two_local_commits()
commit_text = self.scm.commit_with_message("yet another test commit")
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertRegexpMatches(svn_log, r'test_file_commit2')
self.assertRegexpMatches(svn_log, r'test_file_commit1')
def test_commit_with_message_multiple_local_commits(self):
self._two_local_commits()
self.assertRaises(AmbiguousCommitError, self.scm.commit_with_message, "yet another test commit")
commit_text = self.scm.commit_with_message("yet another test commit", force_squash=True)
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertRegexpMatches(svn_log, r'test_file_commit2')
self.assertRegexpMatches(svn_log, r'test_file_commit1')
def test_commit_with_message_not_synced(self):
run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
self._two_local_commits()
self.assertRaises(AmbiguousCommitError, self.scm.commit_with_message, "another test commit")
commit_text = self.scm.commit_with_message("another test commit", force_squash=True)
self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
self.assertNotRegexpMatches(svn_log, r'test_file2')
self.assertRegexpMatches(svn_log, r'test_file_commit2')
self.assertRegexpMatches(svn_log, r'test_file_commit1')
def test_commit_with_message_not_synced_with_conflict(self):
run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
self._local_commit('test_file2', 'asdf', 'asdf commit')
# There's a conflict between trunk and the test_file2 modification.
self.assertRaises(ScriptError, self.scm.commit_with_message, "another test commit", force_squash=True)
def test_upstream_branch(self):
run_command(['git', 'checkout', '-t', '-b', 'my-branch'])
run_command(['git', 'checkout', '-t', '-b', 'my-second-branch'])
self.assertEqual(self.scm._upstream_branch(), 'my-branch')
def test_remote_branch_ref(self):
self.assertEqual(self.scm.remote_branch_ref(), 'refs/remotes/trunk')
def test_reverse_diff(self):
self._shared_test_reverse_diff()
def test_diff_for_revision(self):
self._shared_test_diff_for_revision()
def test_svn_apply_git_patch(self):
self._shared_test_svn_apply_git_patch()
def test_create_patch_local_plus_working_copy(self):
self._one_local_commit_plus_working_copy_changes()
patch = self.scm.create_patch()
self.assertRegexpMatches(patch, r'test_file_commit1')
self.assertRegexpMatches(patch, r'test_file_commit2')
def test_create_patch(self):
self._one_local_commit_plus_working_copy_changes()
patch = self.scm.create_patch()
self.assertRegexpMatches(patch, r'test_file_commit2')
self.assertRegexpMatches(patch, r'test_file_commit1')
self.assertRegexpMatches(patch, r'Subversion Revision: 5')
def test_create_patch_after_merge(self):
run_command(['git', 'checkout', '-b', 'dummy-branch', 'trunk~3'])
self._one_local_commit()
run_command(['git', 'merge', 'trunk'])
patch = self.scm.create_patch()
self.assertRegexpMatches(patch, r'test_file_commit1')
self.assertRegexpMatches(patch, r'Subversion Revision: 5')
def test_create_patch_with_changed_files(self):
self._one_local_commit_plus_working_copy_changes()
patch = self.scm.create_patch(changed_files=['test_file_commit2'])
self.assertRegexpMatches(patch, r'test_file_commit2')
def test_create_patch_with_rm_and_changed_files(self):
self._one_local_commit_plus_working_copy_changes()
os.remove('test_file_commit1')
patch = self.scm.create_patch()
patch_with_changed_files = self.scm.create_patch(changed_files=['test_file_commit1', 'test_file_commit2'])
self.assertEqual(patch, patch_with_changed_files)
def test_create_patch_git_commit(self):
self._two_local_commits()
patch = self.scm.create_patch(git_commit="HEAD^")
self.assertRegexpMatches(patch, r'test_file_commit1')
self.assertNotRegexpMatches(patch, r'test_file_commit2')
def test_create_patch_git_commit_range(self):
self._three_local_commits()
patch = self.scm.create_patch(git_commit="HEAD~2..HEAD")
self.assertNotRegexpMatches(patch, r'test_file_commit0')
self.assertRegexpMatches(patch, r'test_file_commit2')
self.assertRegexpMatches(patch, r'test_file_commit1')
def test_create_patch_working_copy_only(self):
self._one_local_commit_plus_working_copy_changes()
patch = self.scm.create_patch(git_commit="HEAD....")
self.assertNotRegexpMatches(patch, r'test_file_commit1')
self.assertRegexpMatches(patch, r'test_file_commit2')
def test_create_patch_multiple_local_commits(self):
self._two_local_commits()
patch = self.scm.create_patch()
self.assertRegexpMatches(patch, r'test_file_commit2')
self.assertRegexpMatches(patch, r'test_file_commit1')
def test_create_patch_not_synced(self):
run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
self._two_local_commits()
patch = self.scm.create_patch()
self.assertNotRegexpMatches(patch, r'test_file2')
self.assertRegexpMatches(patch, r'test_file_commit2')
self.assertRegexpMatches(patch, r'test_file_commit1')
def test_create_binary_patch(self):
# Create a git binary patch and check the contents.
test_file_name = 'binary_file'
test_file_path = os.path.join(self.git_checkout_path, test_file_name)
file_contents = ''.join(map(chr, range(256)))
write_into_file_at_path(test_file_path, file_contents, encoding=None)
run_command(['git', 'add', test_file_name])
patch = self.scm.create_patch()
self.assertRegexpMatches(patch, r'\nliteral 0\n')
self.assertRegexpMatches(patch, r'\nliteral 256\n')
# Check if we can apply the created patch.
run_command(['git', 'rm', '-f', test_file_name])
self._setup_webkittools_scripts_symlink(self.scm)
self.checkout.apply_patch(self._create_patch(patch))
self.assertEqual(file_contents, read_from_path(test_file_path, encoding=None))
# Check if we can create a patch from a local commit.
write_into_file_at_path(test_file_path, file_contents, encoding=None)
run_command(['git', 'add', test_file_name])
run_command(['git', 'commit', '-m', 'binary diff'])
patch_from_local_commit = self.scm.create_patch('HEAD')
self.assertRegexpMatches(patch_from_local_commit, r'\nliteral 0\n')
self.assertRegexpMatches(patch_from_local_commit, r'\nliteral 256\n')
def test_changed_files_local_plus_working_copy(self):
self._one_local_commit_plus_working_copy_changes()
files = self.scm.changed_files()
self.assertIn('test_file_commit1', files)
self.assertIn('test_file_commit2', files)
# working copy should *not* be in the list.
files = self.scm.changed_files('trunk..')
self.assertIn('test_file_commit1', files)
self.assertNotIn('test_file_commit2', files)
# working copy *should* be in the list.
files = self.scm.changed_files('trunk....')
self.assertIn('test_file_commit1', files)
self.assertIn('test_file_commit2', files)
def test_changed_files_git_commit(self):
self._two_local_commits()
files = self.scm.changed_files(git_commit="HEAD^")
self.assertIn('test_file_commit1', files)
self.assertNotIn('test_file_commit2', files)
def test_changed_files_git_commit_range(self):
self._three_local_commits()
files = self.scm.changed_files(git_commit="HEAD~2..HEAD")
self.assertNotIn('test_file_commit0', files)
self.assertIn('test_file_commit1', files)
self.assertIn('test_file_commit2', files)
def test_changed_files_working_copy_only(self):
self._one_local_commit_plus_working_copy_changes()
files = self.scm.changed_files(git_commit="HEAD....")
self.assertNotIn('test_file_commit1', files)
self.assertIn('test_file_commit2', files)
def test_changed_files_multiple_local_commits(self):
self._two_local_commits()
files = self.scm.changed_files()
self.assertIn('test_file_commit2', files)
self.assertIn('test_file_commit1', files)
def test_changed_files_not_synced(self):
run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
self._two_local_commits()
files = self.scm.changed_files()
self.assertNotIn('test_file2', files)
self.assertIn('test_file_commit2', files)
self.assertIn('test_file_commit1', files)
def test_changed_files_not_synced(self):
run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
self._two_local_commits()
files = self.scm.changed_files()
self.assertNotIn('test_file2', files)
self.assertIn('test_file_commit2', files)
self.assertIn('test_file_commit1', files)
def test_changed_files(self):
self._shared_test_changed_files()
def test_changed_files_for_revision(self):
self._shared_test_changed_files_for_revision()
def test_changed_files_upstream(self):
run_command(['git', 'checkout', '-t', '-b', 'my-branch'])
self._one_local_commit()
run_command(['git', 'checkout', '-t', '-b', 'my-second-branch'])
self._second_local_commit()
write_into_file_at_path('test_file_commit0', 'more test content')
run_command(['git', 'add', 'test_file_commit0'])
# equivalent to 'git diff my-branch..HEAD, should not include working changes
files = self.scm.changed_files(git_commit='UPSTREAM..')
self.assertNotIn('test_file_commit1', files)
self.assertIn('test_file_commit2', files)
self.assertNotIn('test_file_commit0', files)
# equivalent to 'git diff my-branch', *should* include working changes
files = self.scm.changed_files(git_commit='UPSTREAM....')
self.assertNotIn('test_file_commit1', files)
self.assertIn('test_file_commit2', files)
self.assertIn('test_file_commit0', files)
def test_contents_at_revision(self):
self._shared_test_contents_at_revision()
def test_revisions_changing_file(self):
self._shared_test_revisions_changing_file()
def test_added_files(self):
self._shared_test_added_files()
def test_committer_email_for_revision(self):
self._shared_test_committer_email_for_revision()
def test_add_recursively(self):
self._shared_test_add_recursively()
def test_delete(self):
self._two_local_commits()
self.scm.delete('test_file_commit1')
self.assertIn("test_file_commit1", self.scm.deleted_files())
def test_delete_list(self):
self._two_local_commits()
self.scm.delete_list(["test_file_commit1", "test_file_commit2"])
self.assertIn("test_file_commit1", self.scm.deleted_files())
self.assertIn("test_file_commit2", self.scm.deleted_files())
def test_delete_recursively(self):
self._shared_test_delete_recursively()
def test_delete_recursively_or_not(self):
self._shared_test_delete_recursively_or_not()
def test_head_svn_revision(self):
self._shared_test_head_svn_revision()
def test_move(self):
self._shared_test_move()
def test_move_recursive(self):
self._shared_test_move_recursive()
def test_to_object_name(self):
relpath = 'test_file_commit1'
fullpath = os.path.realpath(os.path.join(self.git_checkout_path, relpath))
self.assertEqual(relpath, self.scm.to_object_name(fullpath))
def test_show_head(self):
self._two_local_commits()
self.assertEqual("more test content", self.scm.show_head('test_file_commit1'))
def test_show_head_binary(self):
self._two_local_commits()
data = "\244"
write_into_file_at_path("binary_file", data, encoding=None)
self.scm.add("binary_file")
self.scm.commit_locally_with_message("a test commit")
self.assertEqual(data, self.scm.show_head('binary_file'))
def test_diff_for_file(self):
self._two_local_commits()
write_into_file_at_path('test_file_commit1', "Updated", encoding=None)
diff = self.scm.diff_for_file('test_file_commit1')
cached_diff = self.scm.diff_for_file('test_file_commit1')
self.assertIn("+Updated", diff)
self.assertIn("-more test content", diff)
self.scm.add('test_file_commit1')
cached_diff = self.scm.diff_for_file('test_file_commit1')
self.assertIn("+Updated", cached_diff)
self.assertIn("-more test content", cached_diff)
def test_exists(self):
self._shared_test_exists(self.scm, self.scm.commit_locally_with_message)
# We need to split off more of these SCM tests to use mocks instead of the filesystem.
# This class is the first part of that.
class GitTestWithMock(unittest.TestCase):
maxDiff = None
def make_scm(self, logging_executive=False):
# We do this should_log dance to avoid logging when Git.__init__ runs sysctl on mac to check for 64-bit support.
scm = Git(cwd=".", executive=MockExecutive(), filesystem=MockFileSystem())
scm.read_git_config = lambda *args, **kw: "MOCKKEY:MOCKVALUE"
scm._executive._should_log = logging_executive
return scm
def test_create_patch(self):
scm = self.make_scm(logging_executive=True)
expected_stderr = """\
MOCK run_command: ['git', 'merge-base', 'MOCKVALUE', 'HEAD'], cwd=%(checkout)s
MOCK run_command: ['git', 'diff', '--binary', '--no-color', '--no-ext-diff', '--full-index', '--no-renames', '', 'MOCK output of child process', '--'], cwd=%(checkout)s
MOCK run_command: ['git', 'rev-parse', '--show-toplevel'], cwd=%(checkout)s
MOCK run_command: ['git', 'log', '-1', '--grep=git-svn-id:', '--date=iso', './MOCK output of child process/MOCK output of child process'], cwd=%(checkout)s
""" % {'checkout': scm.checkout_root}
OutputCapture().assert_outputs(self, scm.create_patch, expected_logs=expected_stderr)
def test_push_local_commits_to_server_with_username_and_password(self):
self.assertEqual(self.make_scm().push_local_commits_to_server(username='dbates@webkit.org', password='blah'), "MOCK output of child process")
def test_push_local_commits_to_server_without_username_and_password(self):
self.assertRaises(AuthenticationError, self.make_scm().push_local_commits_to_server)
def test_push_local_commits_to_server_with_username_and_without_password(self):
self.assertRaises(AuthenticationError, self.make_scm().push_local_commits_to_server, {'username': 'dbates@webkit.org'})
def test_push_local_commits_to_server_without_username_and_with_password(self):
self.assertRaises(AuthenticationError, self.make_scm().push_local_commits_to_server, {'password': 'blah'})
def test_timestamp_of_revision(self):
scm = self.make_scm()
scm.find_checkout_root = lambda path: ''
scm._run_git = lambda args: 'Date: 2013-02-08 08:05:49 +0000'
self.assertEqual(scm.timestamp_of_revision('some-path', '12345'), '2013-02-08T08:05:49Z')
scm._run_git = lambda args: 'Date: 2013-02-08 01:02:03 +0130'
self.assertEqual(scm.timestamp_of_revision('some-path', '12345'), '2013-02-07T23:32:03Z')
scm._run_git = lambda args: 'Date: 2013-02-08 01:55:21 -0800'
self.assertEqual(scm.timestamp_of_revision('some-path', '12345'), '2013-02-08T09:55:21Z')
|
apache-2.0
| -2,000,034,110,292,578,300
| 43.905832
| 211
| 0.663235
| false
| 3.409521
| true
| false
| false
|
geoffsmiller/RetroTechClub
|
retrotechclub/views/game_views.py
|
1
|
5726
|
from flask import flash, redirect, render_template, url_for
from flask_login import login_required
from retrotechclub import app, db
from retrotechclub.models import Company, GameMaster, GameRelease, Platform
from retrotechclub.forms import GameMasterForm, GameReleaseForm
@app.route('/games')
def game_masters_list():
game_masters = GameMaster.query.all()
return render_template('game_masters_list.html', game_masters=game_masters)
@app.route('/games/<master_id>')
def game_master_view(master_id):
game_master = GameMaster.query.get_or_404(master_id)
game_releases = GameRelease.query.filter_by(game_master_id=master_id)
return render_template('game_master_view.html', game_master=game_master,
game_releases=game_releases)
@app.route('/games/add', methods=['POST', 'GET'])
@login_required
def game_master_add():
form = GameMasterForm()
if form.validate_on_submit():
game_master = GameMaster(
form.name.data,
form.release_date.data,
form.description.data
)
db.session.add(game_master)
db.session.commit()
flash('New game master added', 'alert-success')
return render_template('game_master_add.html', form=form)
@app.route('/games/<master_id>/edit', methods=['POST', 'GET'])
@login_required
def game_master_edit(master_id):
form = GameMasterForm()
game_master = GameMaster.query.get_or_404(master_id)
game_releases = GameRelease.query.filter_by(game_master_id=master_id)
if not form.is_submitted():
form.name.data = game_master.name
form.release_date.data = game_master.release_date
form.description.data = game_master.description
if form.validate_on_submit():
game_master.name = form.name.data
game_master.release_date = form.release_date.data
game_master.description = form.description.data
db.session.commit()
flash('Game master edited', 'alert-success')
return render_template(
'game_master_edit.html',
form=form,
game_master=game_master,
game_releases=game_releases
)
@app.route('/games/<master_id>/delete')
@login_required
def game_master_delete(master_id):
game_master = GameMaster.query.get_or_404(master_id)
db.session.delete(game_master)
db.session.commit()
flash('Game master deleted', 'alert-success')
return redirect(url_for('game_masters_list'))
@app.route('/games/<master_id>/release/<release_id>')
def game_release_view(master_id, release_id):
game_master = GameMaster.query.get_or_404(master_id)
game_release = GameRelease.query.get_or_404(release_id)
return render_template('game_release_view.html', game_master=game_master,
game_release=game_release)
@app.route('/games/<master_id>/release/add', methods=['POST', 'GET'])
@login_required
def game_release_add(master_id):
game_master = GameMaster.query.get_or_404(master_id)
platforms = Platform.query.all()
companies = Company.query.all()
form = GameReleaseForm()
company_choices = [(c.id, c.name) for c in companies]
form.publisher.choices = company_choices
form.developer.choices = company_choices
form.platform.choices = [(p.id, p.name) for p in platforms]
if form.validate_on_submit():
game_release = GameRelease(
form.name.data,
form.release_date.data,
master_id,
form.publisher.data,
form.developer.data,
form.platform.data,
form.description.data
)
db.session.add(game_release)
db.session.commit()
flash('New game release added', 'alert-success')
return render_template('game_release_edit.html', form=form,
game_master=game_master)
@app.route('/games/<master_id>/release/<release_id>/edit',
methods=['POST', 'GET'])
@login_required
def game_release_edit(master_id, release_id):
game_master = GameMaster.query.get_or_404(master_id)
platforms = Platform.query.all()
companies = Company.query.all()
game_release = GameRelease.query.get_or_404(release_id)
form = GameReleaseForm()
company_choices = [(c.id, c.name) for c in companies]
form.publisher.choices = company_choices
form.developer.choices = company_choices
form.platform.choices = [(p.id, p.name) for p in platforms]
if not form.is_submitted():
form.name.data = game_release.name
form.release_date.data = game_release.release_date
form.publisher.data = game_release.publisher_id
form.developer.data = game_release.developer_id
form.platform.data = game_release.platform_id
form.description.data = game_release.description
if form.validate_on_submit():
game_release.name = form.name.data
game_release.release_date = form.release_date.data
game_release.publisher_id = form.publisher.data
game_release.developer_id = form.developer.data
game_release.description = form.description.data
game_release.platform_id = form.platform.data
db.session.commit()
flash('Game release edited', 'alert-success')
return render_template('game_release_edit.html', form=form,
game_release=game_release, game_master=game_master)
@app.route('/games/<master_id>/release<release_id>/delete')
@login_required
def game_release_delete(master_id, release_id):
game_release = GameRelease.query.get_or_404(release_id)
db.session.delete(game_release)
db.session.commit()
flash('Game release deleted', 'alert-success')
return redirect(url_for('game_master_view', master_id=master_id))
|
mit
| -1,783,189,908,252,373,000
| 37.689189
| 79
| 0.66853
| false
| 3.558732
| false
| false
| false
|
Grumpy-Mike/Mikes-Pi-Bakery
|
Santa's-Run/software/santa's_run.py
|
1
|
8982
|
# Santa's Run - a Christmas game
# By Mike Cook - October 2020
import pygame
import time
import os
import random
import RPi.GPIO as io
def main():
global restart, santaState, coverTrack, santaDistance, targetRect, disInc
global santaL_R, delivered, santaHeight, drop, lastDistance, throwCount, distance
init()
initGPIO()
print("Santa's Run")
while True:
if restart:
distance = 0 ; lastDistance = 0 ; santaDistance = 0
santaState = 0 ; coverTrack=[] ; drop = False ; throwCount = 0
delivered = 0 ; santaL_R = 0 ; santaHeight = rigel - 150
targetRect = []
setUpGround()
restart = False
showPicture(distance)
waitNextFrame()
distance = santaDistance * 4
showPicture(distance)
santaHeight += 0.5 # normal loss of height
if santaHeight >= rigel : santaHeight = rigel # peg the lowest he can get
if santaHeight <= 0 : santaHeight = 0.0 # peg the highest he can get
if santaDistance >= 1150 : santaL_R = 1 # reverse run at end of screen
if santaDistance < 0 or throwCount >= 100: # stop at end of screen or when magazines run out
gameSound[3].play() # end
drawWords("Finished "+str(delivered)+" MagPi magazines delivered ",400,258)
drawWords("Type return for another run", 467, 300)
pygame.display.update()
while not restart:
checkForEvent()
def init():
global textHeight, font, restart, santaState, screen
global soundEffects, santaFrames, background, chimney
global cover, gameSound, snowLine, snowLineShort, drop
global groundPlotType, groundPlotY, groundTypeW, coverTrack
global targetRect, coverDrop, santaL_R, groundSpeed, rigel
global dropVel, groundLine, frame
pygame.init() # initialise graphics interface
pygame.mixer.quit()
pygame.mixer.init(frequency=22050, size=-16, channels=2, buffer=512)
os.environ['SDL_VIDEO_WINDOW_POS'] = 'center'
pygame.display.set_caption("Santa's Run")
pygame.event.set_allowed(None)
pygame.event.set_allowed([pygame.KEYDOWN, pygame.QUIT, pygame.MOUSEBUTTONDOWN])
screen = pygame.display.set_mode([1250,526],0,32)
textHeight = 36
font = pygame.font.Font(None, textHeight)
random.seed()
restart = True ; santaState = 0 ; drop = False
santaFrames = [[0,0] for _ in range(9) ]
frames1 = [ pygame.image.load("images/Santa/Santa"+str(frame)+".png").convert_alpha()
for frame in range(1,10)]
frames2 = [ pygame.transform.flip (pygame.image.load("images/Santa/Santa"+str(frame)+".png").convert_alpha(), True, False)
for frame in range(1,10)]
santaL_R = 0 # santa image flip 0 l to r, 1 r to l
frame = 0
for i in range(9) :
santaFrames[i][0] = frames1[i]
santaFrames[i][1] = frames2[i]
background = pygame.image.load("images/stars.png").convert_alpha()
chimney = pygame.image.load("images/chimney.png").convert_alpha()
cover = [pygame.image.load("images/covers/"+str(cov)+"-Cover1.png").convert_alpha()
for cov in range(1,101) ]
soundEffects = ["throw","hit","miss","end"]
gameSound = [ pygame.mixer.Sound("sounds/"+soundEffects[sound]+".wav")
for sound in range(0,4)]
snowLine = pygame.image.load("images/snow_line.png").convert_alpha()
snowLineShort = pygame.image.load("images/snow_line_short.png").convert_alpha()
groundSpeed = 4
groundPlotType = [chimney, snowLine, snowLineShort]
groundPlotY = [466, 517, 517]
groundTypeW = [130, 130, 65] # width of each type of ground
coverTrack = []
targetRect = []
coverDrop = [0, 0, 0]
rigel = 312
dropVel = 0
# define what we fly over 0 = double chimney 1 = long snow line 2 = short snow line
groundLine = [1, 1, 0, 2, 0, 2, 0, 2, 1, 2, 0, 0, 2, 0, 2, 0, 0, 0, 0, 2, 2, 0, 2, 0,
1, 0, 1, 0, 0, 0, 1, 2, 0, 1, 1, 0, 2, 0, 2, 0, 1, 0, 2, 0, 0, 1, 2, 0,
2, 0, 0, 1, 1, 1, 1]
def setUpGround():
global coverTrack, targetRect
targetRect = []
coverTrack = []
length = 0
for i in range(len(groundLine)) :
part = groundLine[i]
if part == 0 :
targetRect.append(pygame.Rect(length + 18, 481, 93, 32))
if part == 2 :
length += 65
else :
length += 130
#print("ground line length",length)
def initGPIO():
io.setwarnings(False)
io.setmode(io.BCM)
io.setup(2, io.IN)
io.add_event_detect(2, io.FALLING, callback = shakeDetect, bouncetime = 30)
def showPicture(distance):
global coverDrop, drop, dropVel, santaDistance
screen.blit(background,[0,0])
showGround(distance)
''' # uncomment to see catching rectangles
for t in range(len(targetRect)) :
pygame.draw.rect(screen, (0,128,0), targetRect[t], 0)
'''
if drop :
if dropVel != 0 :
dropVel += 1
else :
dropVel = 2
screen.blit(cover[coverDrop[0]], [ coverDrop[1], coverDrop[2] ])
if santaL_R :
coverDrop[1] -= 4
else:
coverDrop[1] += 4
coverDrop[2] += dropVel
if coverDrop[2] > 526: gameSound[2].play() ; drop = False ; dropVel = 0
if catch(distance) :
gameSound[1].play()
drop = False
dropVel = 0
santaDistance += disInc * 8 # give a little kick
screen.blit(santaFrames[frame][santaL_R],[santaDistance, santaHeight])
pygame.display.update()
def showGround(scroll):
global lastDistance
if scroll != 0:
delta = scroll - lastDistance
for t in range(len(targetRect)):
targetRect[t] = targetRect[t].move(-delta, 0)
lastDistance = scroll
length = - scroll
chunk = 0
while length < 1250 :
if length > -130 :
screen.blit(groundPlotType[groundLine[chunk]],[length, groundPlotY[groundLine[chunk]]])
length += groundTypeW[groundLine[chunk]]
chunk += 1
for coverCount in range(len(coverTrack)) :
screen.blit(cover[coverTrack[coverCount][0]], [coverTrack[coverCount][1] - scroll,
413] )
def catch(offset) : # dropping cover collide with chimney catch rectangle
global coverTrack, delivered
caught = False
for r in range(len(targetRect)):
if targetRect[r].collidepoint((coverDrop[1], coverDrop[2] + 66)) or targetRect[r].collidepoint((coverDrop[1] + 50, coverDrop[2] + 66)):
caught = True ; delivered += 1
coverTrack.append([coverDrop[0], coverDrop[1] + offset, coverDrop[2]])
#print("coverTrack list",coverTrack)
return caught
def drawWords(words,x,y) :
textSurface = pygame.Surface((14,textHeight))
textRect = textSurface.get_rect()
textRect.left = x
textRect.top = y
pygame.draw.rect(screen,(102,204,255), (x,y,14,textHeight-10), 0)
textSurface = font.render(words, True, (255,255,255), (102,204,255))
screen.blit(textSurface, textRect)
def shakeDetect(pin):
global frame, coverDrop, throwCount, disInc, santaDistance
global santaHeight
frame = frame + 1
if frame >= 9: frame = 0 # frame of animation
disInc = 2
if santaL_R : disInc = -2
if drop :
santaHeight -= 2 # go up
else :
santaDistance += disInc
def throw():
global santaHeight, drop, coverDrop, throwCount
if drop : return
else:
if santaHeight >= rigel : # boost up if too low
santaHeight = 30.0
else :
drop = True
if drop:
if santaL_R :
coverDrop = [throwCount, 100 + santaDistance, int(santaHeight)]
else :
coverDrop = [throwCount, santaDistance, int(santaHeight)]
throwCount += 1 # number of covers thrown for next time
gameSound[0].play() # throw
def waitNextFrame():
autoTime = time.time()
while time.time() - autoTime < 0.04:
checkForEvent()
def terminate(): # close down the program
print("Closing down")
io.remove_event_detect(2)
pygame.mixer.quit()
pygame.quit() # close pygame
os._exit(1)
def checkForEvent(): # see if we need to quit
global restart
event = pygame.event.poll()
if event.type == pygame.QUIT :
terminate()
if event.type == pygame.KEYDOWN :
if event.key == pygame.K_ESCAPE :
terminate()
if event.key == pygame.K_SPACE :
throw()
if event.key == pygame.K_RETURN :
restart = True
print("New Run")
if event.type == pygame.MOUSEBUTTONDOWN :
pass
#print(pygame.mouse.get_pos())
#os.system("scrot")
# Main program logic:
if __name__ == '__main__':
main()
|
gpl-2.0
| 4,538,330,641,443,869,000
| 36.739496
| 143
| 0.5943
| false
| 3.496302
| false
| false
| false
|
kubeflow/kfp-tekton
|
sdk/python/tests/compiler/testdata/withparam_global.py
|
1
|
1802
|
# Copyright 2021 kubeflow.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kfp.dsl as dsl
from kfp_tekton.compiler import TektonCompiler
class Coder:
def empty(self):
return ""
TektonCompiler._get_unique_id_code = Coder.empty
@dsl.pipeline(name='withparam-global')
def pipeline(loopidy_doop: list = [3, 5, 7, 9]):
op0 = dsl.ContainerOp(
name="my-out-cop0",
image='python:alpine3.6',
command=["sh", "-c"],
arguments=[
'python -c "import json; import sys; json.dump([i for i in range(20, 31)], open(\'/tmp/out.json\', \'w\'))"'],
file_outputs={'out': '/tmp/out.json'},
)
with dsl.ParallelFor(loopidy_doop) as item:
op1 = dsl.ContainerOp(
name="my-in-cop1",
image="library/bash:4.4.23",
command=["sh", "-c"],
arguments=["echo no output global op1, item: %s" % item],
).after(op0)
op_out = dsl.ContainerOp(
name="my-out-cop2",
image="library/bash:4.4.23",
command=["sh", "-c"],
arguments=["echo no output global op2, outp: %s" % op0.output],
).after(op1)
if __name__ == '__main__':
from kfp_tekton.compiler import TektonCompiler
TektonCompiler().compile(pipeline, __file__.replace('.py', '.yaml'))
|
apache-2.0
| -853,971,105,150,103,200
| 31.178571
| 122
| 0.629301
| false
| 3.35568
| false
| false
| false
|
Scitator/rl-course-experiments
|
GEN/genetic_gym.py
|
1
|
5386
|
#!/usr/bin/python
import gym
from gym import wrappers
import argparse
import numpy as np
import random
from tqdm import trange
def get_random_policy(env):
"""
Build a numpy array representing agent policy.
This array must have one element per each of 16 environment states.
Element must be an integer from 0 to 3, representing action
to take from that state.
"""
return np.random.randint(0, int(env.action_space.n), int(env.observation_space.n))
def sample_reward(env, policy, t_max=100):
"""
Interact with an environment, return sum of all rewards.
If game doesn't end on t_max (e.g. agent walks into a wall),
force end the game and return whatever reward you got so far.
Tip: see signature of env.step(...) method above.
"""
s = env.reset()
total_reward = 0
for _ in range(t_max):
action = policy[s]
s, reward, done, info = env.step(action)
total_reward += reward
if done:
break
return total_reward
def evaluate(sample_func, env, policy, n_times=100):
"""Run several evaluations and average the score the policy gets."""
rewards = [sample_func(env, policy) for _ in range(n_times)]
return float(np.mean(rewards))
def crossover(env, policy1, policy2, p=0.5, prioritize_func=None):
"""
for each state, with probability p take action from policy1, else policy2
"""
if prioritize_func is not None:
p = prioritize_func(env, policy1, policy2, p)
return np.choose(
(np.random.random_sample(policy1.shape[0]) <= p).astype(int), [policy1, policy2])
def mutation(env, policy, p=0.1):
"""
for each state, with probability p replace action with random action
Tip: mutation can be written as crossover with random policy
"""
return crossover(env, get_random_policy(env), policy, p)
def run(env, n_episodes, max_steps,
pool_size, n_crossovers, n_mutations,
seed=42, verbose=False, api_key=None):
random.seed(seed)
np.random.seed(seed)
env_name = env
env = gym.make(env).env
env.reset()
if api_key is not None:
env = gym.wrappers.Monitor(env, "/tmp/" + env_name, force=True)
if verbose:
print("initializing...")
pool = [get_random_policy(env) for _ in range(pool_size)]
rewards = np.zeros(n_episodes)
tr = trange(
n_episodes,
desc="best score: {:.4}".format(0.0),
leave=True)
def sample_func(env, policy):
return sample_reward(
env, policy, t_max=max_steps if api_key is None else int(1e10))
def prioritize_func(env, policy1, policy2, p):
return min(
p * evaluate(sample_func, env, policy1) / (evaluate(sample_func, env, policy2) + 0.001),
1.0)
for i_epoch in tr:
crossovered = [
crossover(env, random.choice(pool), random.choice(pool),
prioritize_func=prioritize_func)
for _ in range(n_crossovers)]
mutated = [mutation(env, random.choice(pool)) for _ in range(n_mutations)]
assert type(crossovered) == type(mutated) == list
# add new policies to the pool
pool = pool + crossovered + mutated
pool_scores = list(map(lambda x: evaluate(sample_func, env, x), pool))
# select pool_size best policies
selected_indices = np.argsort(pool_scores)[-pool_size:]
pool = [pool[i] for i in selected_indices]
pool_scores = [pool_scores[i] for i in selected_indices]
# print the best policy so far (last in ascending score order)
tr.set_description("best score: {:.4}".format(pool_scores[-1]))
rewards[i_epoch] = pool_scores[-1]
print("Avg rewards over {} episodes: {:.4f} +/-{:.4f}".format(
n_episodes, np.mean(rewards), np.std(rewards)))
if api_key is not None:
env.close()
gym.upload("/tmp/" + env_name, api_key=api_key)
def _parse_args():
parser = argparse.ArgumentParser(description='Policy iteration example')
parser.add_argument(
'--env',
type=str,
default='FrozenLake8x8-v0',
help='The environment to use')
parser.add_argument(
'--num_episodes',
type=int,
default=200,
help='Number of episodes')
parser.add_argument(
'--max_steps',
type=int,
default=200,
help='Max number per episode')
parser.add_argument(
'--pool_size',
type=int,
default=200,
help='Population size')
parser.add_argument(
'--n_crossovers',
type=int,
default=100,
help='Number of crossovers per episode')
parser.add_argument(
'--n_mutations',
type=int,
default=100,
help='Number of mutations per episode')
parser.add_argument(
'--seed',
type=int,
default=42)
parser.add_argument(
'--verbose',
action='store_true',
default=False)
parser.add_argument(
'--api_key',
type=str,
default=None)
args, _ = parser.parse_known_args()
return args
def main():
args = _parse_args()
run(args.env, args.num_episodes, args.max_steps,
args.pool_size, args.n_crossovers, args.n_mutations,
args.seed, args.verbose, args.api_key)
if __name__ == '__main__':
main()
|
mit
| 801,298,826,288,472,000
| 28.431694
| 100
| 0.604716
| false
| 3.543421
| false
| false
| false
|
Adrianacmy/Classic-Interesting-CS-Mini-Programs
|
old/reverse_dict.py
|
1
|
2014
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Created on Sun May 15
@author: Adrianacmy
Create a function reverse that takes in a dictionary and reverses it, such that
all of the values become keys and all of the keys become values. Be careful: we
do not wish to lose any information. Consider what to do if the original
dictionary has lists of values for a particular key, or has duplicate values
for some keys.
'''
# def format_dic_value(dict):
# '''format the single elements value list if it is necessary'''
# nw_dict = {}
# for k, v in dict.items():
# if len(v) == 1 and type(v) == list:
# nw_dict[k] = ''.join(v)
# else:
# nw_dict[k] = v
# return nw_dict
def convert_to_simple_list(lst, nw_list=[]):
'''
Convert a muti-dimentinal list to one dimention list.
lst: any list
nw_list: one dimentiona list, could start as empty
return: a one dimention list
'''
for a in lst:
if type(a) == list:
convert_to_simple_list(a)
else:
nw_list.append(a)
return nw_list
# lst = ['a', 'b', 'c', [1,2,3], 'abc']
# print(convert_to_simple_list(lst))
def add_dic_val(dic, k, v):
'''
add elements or values to a dictionary.
dic: an empty dictionary
k: a key
v: a value
'''
dic[k] = dic.get(k, [])
if not v in dic[k]:
dic[k].append(v)
def reverse_dict(d):
'''reverse keys and values in a dictionary'''
r = {} #reversed dictionary
for k, v in d.items():
nw_lst = []
if type(v) == list:
value_list = convert_to_simple_list(v, nw_lst)
# if value_list:
for val in value_list:
add_dic_val(r, val, k)
else:
add_dic_val(r, v, k)
return r
def main():
d = {1: 'a', 4: ['abc', 'egf'], 5: '',(1, 6): 'abc', 2:[1, 2, 3, [1, 2]], 8: ['', 2]}
print(reverse_dict(d))
if __name__ == "__main__":
main()
|
mit
| -2,144,025,257,464,626,000
| 22.149425
| 89
| 0.541708
| false
| 3.166667
| false
| false
| false
|
tkaitchuck/nupic
|
py/nupic/research/fdrutilities.py
|
1
|
62245
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy
from numpy import *
import random
import copy
import sys
import itertools
random.seed(42)
numpy.random.seed(42)
from nupic.bindings.math import (SM32, SparseBinaryMatrix)
###############################################################################
def setRandomSeed(seed):
""" Set the random seeds. Helpful to make unit tests repeatable"""
random.seed(seed)
numpy.random.seed(seed)
###############################################################################
def addNoise(input, noise=0.1, doForeground=True, doBackground=True):
"""
Add noise to the given input.
Parameters:
-----------------------------------------------
input: the input to add noise to
noise: how much noise to add
doForeground: If true, turn off some of the 1 bits in the input
doBackground: If true, turn on some of the 0 bits in the input
"""
if doForeground and doBackground:
return numpy.abs(input - (numpy.random.random(input.shape) < noise))
else:
if doForeground:
return numpy.logical_and(input, numpy.random.random(input.shape) > noise)
if doBackground:
return numpy.logical_or(input, numpy.random.random(input.shape) < noise)
return input
################################################################################
def generateCoincMatrix(nCoinc=10, length=500, activity=50):
"""
Generate a coincidence matrix. This is used to generate random inputs to the
temporal learner and to compare the predicted output against.
It generates a matrix of nCoinc rows, each row has length 'length' and has
a total of 'activity' bits on.
Parameters:
-----------------------------------------------
nCoinc: the number of rows to generate
length: the length of each row
activity: the number of ones to put into each row.
"""
coincMatrix0 = SM32(int(nCoinc), int(length))
theOnes = numpy.array([1.0] * activity, dtype=numpy.float32)
for rowIdx in xrange(nCoinc):
coinc = numpy.array(random.sample(xrange(length),
activity), dtype=numpy.uint32)
coinc.sort()
coincMatrix0.setRowFromSparse(rowIdx, coinc, theOnes)
# This is the right code to use, it's faster, but it derails the unit
# testing of the pooling for now.
coincMatrix = SM32(int(nCoinc), int(length))
coincMatrix.initializeWithFixedNNZR(activity)
return coincMatrix0
###############################################################################
def generateVectors(numVectors=100, length=500, activity=50):
"""
Generate a list of random sparse distributed vectors. This is used to generate
training vectors to the spatial or temporal learner and to compare the predicted
output against.
It generates a list of 'numVectors' elements, each element has length 'length'
and has a total of 'activity' bits on.
Parameters:
-----------------------------------------------
numVectors: the number of vectors to generate
length: the length of each row
activity: the number of ones to put into each row.
"""
vectors = []
coinc = numpy.zeros(length, dtype='int32')
indexList = range(length)
for i in xrange(numVectors):
coinc[:] = 0
coinc[random.sample(indexList, activity)] = 1
vectors.append(coinc.copy())
return vectors
###############################################################################
def generateSimpleSequences(nCoinc=10, seqLength=[5,6,7], nSeq=100):
"""
Generate a set of simple sequences. The elements of the sequences will be
integers from 0 to 'nCoinc'-1. The length of each sequence will be
randomly chosen from the 'seqLength' list.
Parameters:
-----------------------------------------------
nCoinc: the number of elements available to use in the sequences
seqLength: a list of possible sequence lengths. The length of each
sequence will be randomly chosen from here.
nSeq: The number of sequences to generate
retval: a list of sequences. Each sequence is itself a list
containing the coincidence indices for that sequence.
"""
coincList = range(nCoinc)
seqList = []
for i in xrange(nSeq):
if max(seqLength) <= nCoinc:
seqList.append(random.sample(coincList, random.choice(seqLength)))
else:
len = random.choice(seqLength)
seq = []
for x in xrange(len):
seq.append(random.choice(coincList))
seqList.append(seq)
return seqList
###############################################################################
def generateHubSequences(nCoinc=10, hubs = [2,6], seqLength=[5,6,7], nSeq=100):
"""
Generate a set of hub sequences. These are sequences which contain a hub
element in the middle. The elements of the sequences will be integers
from 0 to 'nCoinc'-1. The hub elements will only appear in the middle of
each sequence. The length of each sequence will be randomly chosen from the
'seqLength' list.
Parameters:
-----------------------------------------------
nCoinc: the number of elements available to use in the sequences
hubs: which of the elements will be used as hubs.
seqLength: a list of possible sequence lengths. The length of each
sequence will be randomly chosen from here.
nSeq: The number of sequences to generate
retval: a list of sequences. Each sequence is itself a list
containing the coincidence indices for that sequence.
"""
coincList = range(nCoinc)
for hub in hubs:
coincList.remove(hub)
seqList = []
for i in xrange(nSeq):
length = random.choice(seqLength)-1
seq = random.sample(coincList,length)
seq.insert(length//2, random.choice(hubs))
seqList.append(seq)
return seqList
def genTestSeqsForLookback(nPatterns=10, patternLen=500, patternActivity=50,
seqLength=[5,6,7], nSequences=50):
"""
Generate two sets of sequences. The first set of sequences is used to train
the sequence learner till it fills up capacity. The second set is then used
to further train the system to test its generalization capability using the
one step look back idea. The second set of sequences are generated by modifying
the first set
Parameters:
-----------------------------------------------
nPatterns: the number of patterns to use in the sequences.
patternLen: The number of elements in each pattern
patternActivity: The number of elements that should be active in
each pattern
seqLength: a list of possible sequence lengths. The length of each
sequence will be randomly chosen from here.
nSequences: The number of simple sequences in the first set
retval: (seqList1, seqList2, patterns)
seqList1, seqList2: a list of sequences. Each sequence is itself a list
containing the input pattern indices for that sequence.
patterns: the input patterns used in the seqList.
"""
# Create the input patterns
patterns = generateCoincMatrix(nCoinc=nPatterns, length=patternLen,
activity=patternActivity)
#patterns = generateSimpleCoincMatrix(nCoinc=nPatterns, length=patternLen,
# activity=patternActivity)
similarity = []
for i in xrange(nPatterns):
similarity.append(patterns.rightVecProd(patterns.getRow(i)))
similarity = numpy.array(similarity, dtype='int32')
print similarity
# Create the raw sequences
seqList1 = generateSimpleSequences(nCoinc=nPatterns, seqLength=seqLength,
nSeq=nSequences)
#The second set of sequences are obtained by replacing just the first
#element in each sequence.
seqList2 = copy.deepcopy(seqList1)
for i in range(0,len(seqList2)):
seqList2[i][0] = random.randint(0,nPatterns-1)
#return ([range(6),[5,4,1,3,4]],[[7,1,2,3,4,5]],patterns)
return (seqList1, seqList2, patterns)
################################################################################
def generateSimpleCoincMatrix(nCoinc=10, length=500, activity=50):
"""
Generate a non overlapping coincidence matrix. This is used to generate random
inputs to the temporal learner and to compare the predicted output against.
It generates a matrix of nCoinc rows, each row has length 'length' and has
a total of 'activity' bits on.
Parameters:
-----------------------------------------------
nCoinc: the number of rows to generate
length: the length of each row
activity: the number of ones to put into each row.
"""
assert nCoinc*activity<=length, "can't generate non-overlapping coincidences"
coincMatrix = SM32(0, length)
coinc = numpy.zeros(length, dtype='int32')
for i in xrange(nCoinc):
coinc[:] = 0
coinc[i*activity:(i+1)*activity] = 1
coincMatrix.addRow(coinc)
return coincMatrix
###############################################################################
def generateSequences(nPatterns=10, patternLen=500, patternActivity=50,
hubs=[2,6], seqLength=[5,6,7],
nSimpleSequences=50, nHubSequences=50):
"""
Generate a set of simple and hub sequences. A simple sequence contains
a randomly chosen set of elements from 0 to 'nCoinc-1'. A hub sequence
always contains a hub element in the middle of it.
Parameters:
-----------------------------------------------
nPatterns: the number of patterns to use in the sequences.
patternLen: The number of elements in each pattern
patternActivity: The number of elements that should be active in
each pattern
hubs: which of the elements will be used as hubs.
seqLength: a list of possible sequence lengths. The length of each
sequence will be randomly chosen from here.
nSimpleSequences: The number of simple sequences to generate
nHubSequences: The number of hub sequences to generate
retval: (seqList, patterns)
seqList: a list of sequences. Each sequence is itself a list
containing the input pattern indices for that sequence.
patterns: the input patterns used in the seqList.
"""
# Create the input patterns
patterns = generateCoincMatrix(nCoinc=nPatterns, length=patternLen,
activity=patternActivity)
# Create the raw sequences
seqList = generateSimpleSequences(nCoinc=nPatterns, seqLength=seqLength,
nSeq=nSimpleSequences) + \
generateHubSequences(nCoinc=nPatterns, hubs=hubs, seqLength=seqLength,
nSeq=nHubSequences)
# Return results
return (seqList, patterns)
###############################################################################
def generateL2Sequences(nL1Patterns=10, l1Hubs=[2,6], l1SeqLength=[5,6,7],
nL1SimpleSequences=50, nL1HubSequences=50,
l1Pooling=4, perfectStability=False, spHysteresisFactor=1.0,
patternLen=500, patternActivity=50):
"""
Generate the simulated output from a spatial pooler that's sitting
on top of another spatial pooler / temporal pooler pair. The average on-time
of the outputs from the simulated TP is given by the l1Pooling argument.
In this routine, L1 refers to the first spatial and temporal pooler and L2
refers to the spatial pooler above that.
Parameters:
-----------------------------------------------
nL1Patterns: the number of patterns to use in the L1 sequences.
l1Hubs: which of the elements will be used as hubs.
l1SeqLength: a list of possible sequence lengths. The length of each
sequence will be randomly chosen from here.
nL1SimpleSequences: The number of simple sequences to generate for L1
nL1HubSequences: The number of hub sequences to generate for L1
l1Pooling: The number of time steps to pool over in the L1 temporal
pooler
perfectStability: If true, then the input patterns represented by the
sequences generated will have perfect stability over
l1Pooling time steps. This is the best case ideal input
to a TP. In actual situations, with an actual SP
providing input, the stability will always be less than
this.
spHystereisFactor: The hysteresisFactor to use in the L2 spatial pooler.
Only used when perfectStability is False
patternLen: The number of elements in each pattern output by L2
patternActivity: The number of elements that should be active in
each pattern
@retval: (seqList, patterns)
seqList: a list of sequences output from L2. Each sequence is
itself a list containing the input pattern indices for that
sequence.
patterns: the input patterns used in the L2 seqList.
"""
# First, generate the L1 sequences
l1SeqList = generateSimpleSequences(nCoinc=nL1Patterns, seqLength=l1SeqLength,
nSeq=nL1SimpleSequences) + \
generateHubSequences(nCoinc=nL1Patterns, hubs=l1Hubs,
seqLength=l1SeqLength, nSeq=nL1HubSequences)
# Generate the L2 SP output from those
spOutput = generateSlowSPOutput(seqListBelow = l1SeqList,
poolingTimeBelow=l1Pooling, outputWidth=patternLen,
activity=patternActivity, perfectStability=perfectStability,
spHysteresisFactor=spHysteresisFactor)
# Map the spOutput patterns into indices into a pattern matrix which we
# generate now.
outSeq = None
outSeqList = []
outPatterns = SM32(0, patternLen)
for pattern in spOutput:
# If we have a reset vector start a new sequence
if pattern.sum() == 0:
if outSeq is not None:
outSeqList.append(outSeq)
outSeq = []
continue
# See if this vector matches a pattern we've already seen before
patternIdx = None
if outPatterns.nRows() > 0:
# Find most matching 1's.
matches = outPatterns.rightVecSumAtNZ(pattern)
outCoinc = matches.argmax().astype('uint32')
# See if its number of 1's is the same in the pattern and in the
# coincidence row. If so, it is an exact match
numOnes = pattern.sum()
if matches[outCoinc] == numOnes \
and outPatterns.getRow(int(outCoinc)).sum() == numOnes:
patternIdx = outCoinc
# If no match, add this pattern to our matrix
if patternIdx is None:
outPatterns.addRow(pattern)
patternIdx = outPatterns.nRows() - 1
# Store the pattern index into the sequence
outSeq.append(patternIdx)
# Put in last finished sequence
if outSeq is not None:
outSeqList.append(outSeq)
# Return with the seqList and patterns matrix
return (outSeqList, outPatterns)
###############################################################################
def vectorsFromSeqList(seqList, patternMatrix):
"""
Convert a list of sequences of pattern indices, and a pattern lookup table
into a an array of patterns
Parameters:
-----------------------------------------------
seq: the sequence, given as indices into the patternMatrix
patternMatrix: a SparseMatrix contaning the possible patterns used in
the sequence.
"""
totalLen = 0
for seq in seqList:
totalLen += len(seq)
vectors = numpy.zeros((totalLen, patternMatrix.shape[1]), dtype='bool')
vecOffset = 0
for seq in seqList:
seq = numpy.array(seq, dtype='uint32')
for idx,coinc in enumerate(seq):
vectors[vecOffset] = patternMatrix.getRow(int(coinc))
vecOffset += 1
return vectors
###############################################################################
# The following three functions are used in tests to compare two different
# TP instances.
def sameTPParams(tp1, tp2):
"""Given two TP instances, see if any parameters are different."""
result = True
for param in ["numberOfCols", "cellsPerColumn", "initialPerm", "connectedPerm",
"minThreshold", "newSynapseCount", "permanenceInc", "permanenceDec",
"permanenceMax", "globalDecay", "activationThreshold",
"doPooling", "segUpdateValidDuration", "seed",
"burnIn", "pamLength", "maxAge"]:
if getattr(tp1, param) != getattr(tp2,param):
print param,"is different"
print getattr(tp1, param), "vs", getattr(tp2,param)
result = False
return result
def sameSynapse(syn, synapses):
"""Given a synapse and a list of synapses, check whether this synapse
exist in the list. A synapse is represented as [col, cell, permanence].
A synapse matches if col and cell are identical and the permanence value is
within 0.001."""
for s in synapses:
if (s[0]==syn[0]) and (s[1]==syn[1]) and (abs(s[2]-syn[2]) <= 0.001):
return True
return False
def sameSegment(seg1, seg2):
"""Return True if seg1 and seg2 are identical, ignoring order of synapses"""
result = True
# check sequence segment, total activations etc. In case any are floats,
# check that they are within 0.001.
for field in [1, 2, 3, 4, 5, 6]:
if abs(seg1[0][field] - seg2[0][field]) > 0.001:
result = False
# Compare number of synapses
if len(seg1[1:]) != len(seg2[1:]):
result = False
# Now compare synapses, ignoring order of synapses
if seg2[1:][2] <= 0:
print "A synapse with zero permanence encountered"
result = False
if result == True:
for syn in seg1[1:]:
if syn[2] <= 0:
print "A synapse with zero permanence encountered"
result = False
res = sameSynapse(syn, seg2[1:])
if res == False:
result = False
return result
def tpDiff(tp1, tp2, verbosity = 0, relaxSegmentTests =True):
"""
Given two TP instances, list the difference between them and returns False
if there is a difference. This function checks the major parameters. If this
passes (and checkLearn is true) it checks the number of segments on
each cell. If this passes, checks each synapse on each segment.
When comparing C++ and Py, the segments are usually in different orders in the
cells. tpDiff ignores segment order when comparing TP's.
"""
# First check basic parameters. If we fail here, don't continue
if sameTPParams(tp1, tp2) == False:
print "Two TP's have different parameters"
return False
result = True
# Compare states at t first, they usually diverge before the structure of the
# cells starts diverging
if (tp1.activeState['t'] != tp2.activeState['t']).any():
print 'Active states diverge', numpy.where(tp1.activeState['t'] != tp2.activeState['t'])
result = False
if (tp1.predictedState['t'] - tp2.predictedState['t']).any():
print 'Predicted states diverge', numpy.where(tp1.predictedState['t'] != tp2.predictedState['t'])
result = False
# TODO: check confidence at T (confT)
# Now check some high level learned parameters.
if tp1.getNumSegments() != tp2.getNumSegments():
print "Number of segments are different", tp1.getNumSegments(), tp2.getNumSegments()
result = False
if tp1.getNumSynapses() != tp2.getNumSynapses():
print "Number of synapses are different", tp1.getNumSynapses(), tp2.getNumSynapses()
tp1.printCells()
tp2.printCells()
result = False
# Check that each cell has the same number of segments and synapses
for c in xrange(tp1.numberOfCols):
for i in xrange(tp2.cellsPerColumn):
if tp1.getNumSegmentsInCell(c, i) != tp2.getNumSegmentsInCell(c, i):
print "Num segments different in cell:",c,i,
print tp1.getNumSegmentsInCell(c, i), tp2.getNumSegmentsInCell(c, i)
result = False
# If the above tests pass, then check each segment and report differences
# Note that segments in tp1 can be in a different order than tp2. Here we
# make sure that, for each segment in tp1, there is an identical segment
# in tp2.
if result == True and not relaxSegmentTests:
for c in xrange(tp1.numberOfCols):
for i in xrange(tp2.cellsPerColumn):
nSegs = tp1.getNumSegmentsInCell(c, i)
for segIdx in xrange(nSegs):
tp1seg = tp1.getSegmentOnCell(c, i, segIdx)
# Loop through all segments in tp2seg and see if any of them match tp1seg
res = False
for tp2segIdx in xrange(nSegs):
tp2seg = tp2.getSegmentOnCell(c, i, tp2segIdx)
if sameSegment(tp1seg, tp2seg) == True:
res = True
break
if res == False:
print "\nSegments are different for cell:",c,i
if verbosity >= 1:
print "C++"
tp1.printCell(c,i)
print "Py"
tp2.printCell(c,i)
result = False
if result == True and (verbosity > 1):
print "TP's match"
return result
def tpDiff2(tp1, tp2, verbosity = 0, relaxSegmentTests =True,
checkLearn = True, checkStates = True):
"""
Given two TP instances, list the difference between them and returns False
if there is a difference. This function checks the major parameters. If this
passes (and checkLearn is true) it checks the number of segments on each cell.
If this passes, checks each synapse on each segment.
When comparing C++ and Py, the segments are usually in different orders in the
cells. tpDiff ignores segment order when comparing TP's.
If checkLearn is True, will check learn states as well as all the segments
If checkStates is True, will check the various state arrays
"""
# First check basic parameters. If we fail here, don't continue
if sameTPParams(tp1, tp2) == False:
print "Two TP's have different parameters"
return False
tp1Label = "<tp_1 (%s)>" % tp1.__class__.__name__
tp2Label = "<tp_2 (%s)>" % tp2.__class__.__name__
result = True
if checkStates:
# Compare states at t first, they usually diverge before the structure of the
# cells starts diverging
if (tp1.infActiveState['t'] != tp2.infActiveState['t']).any():
print 'Active states diverged', numpy.where(tp1.infActiveState['t'] != tp2.infActiveState['t'])
result = False
if (tp1.infPredictedState['t'] - tp2.infPredictedState['t']).any():
print 'Predicted states diverged', numpy.where(tp1.infPredictedState['t'] != tp2.infPredictedState['t'])
result = False
if checkLearn and (tp1.lrnActiveState['t'] - tp2.lrnActiveState['t']).any():
print 'lrnActiveState[t] diverged', numpy.where(tp1.lrnActiveState['t'] != tp2.lrnActiveState['t'])
result = False
if checkLearn and (tp1.lrnPredictedState['t'] - tp2.lrnPredictedState['t']).any():
print 'lrnPredictedState[t] diverged', numpy.where(tp1.lrnPredictedState['t'] != tp2.lrnPredictedState['t'])
result = False
if checkLearn and abs(tp1.getAvgLearnedSeqLength() - tp2.getAvgLearnedSeqLength()) > 0.01:
print "Average learned sequence lengths differ: ",
print tp1.getAvgLearnedSeqLength()," vs ", tp2.getAvgLearnedSeqLength()
result = False
# TODO: check confidence at T (confT)
# Now check some high level learned parameters.
if tp1.getNumSegments() != tp2.getNumSegments():
print "Number of segments are different", tp1.getNumSegments(), tp2.getNumSegments()
result = False
if tp1.getNumSynapses() != tp2.getNumSynapses():
print "Number of synapses are different", tp1.getNumSynapses(), tp2.getNumSynapses()
if verbosity >= 3:
print "%s: " % tp1Label,
tp1.printCells()
print "\n%s : " % tp2Label,
tp2.printCells()
#result = False
# Check that each cell has the same number of segments and synapses
for c in xrange(tp1.numberOfCols):
for i in xrange(tp2.cellsPerColumn):
if tp1.getNumSegmentsInCell(c, i) != tp2.getNumSegmentsInCell(c, i):
print "Num segments different in cell:",c,i,
print tp1.getNumSegmentsInCell(c, i), tp2.getNumSegmentsInCell(c, i)
result = False
# If the above tests pass, then check each segment and report differences
# Note that segments in tp1 can be in a different order than tp2. Here we
# make sure that, for each segment in tp1, there is an identical segment
# in tp2.
if result == True and not relaxSegmentTests and checkLearn:
for c in xrange(tp1.numberOfCols):
for i in xrange(tp2.cellsPerColumn):
nSegs = tp1.getNumSegmentsInCell(c, i)
for segIdx in xrange(nSegs):
tp1seg = tp1.getSegmentOnCell(c, i, segIdx)
# Loop through all segments in tp2seg and see if any of them match tp1seg
res = False
for tp2segIdx in xrange(nSegs):
tp2seg = tp2.getSegmentOnCell(c, i, tp2segIdx)
if sameSegment(tp1seg, tp2seg) == True:
res = True
break
if res == False:
print "\nSegments are different for cell:",c,i
result = False
if verbosity >= 0:
print "%s : " % tp1Label,
tp1.printCell(c,i)
print "\n%s : " % tp2Label,
tp2.printCell(c,i)
if result == True and (verbosity > 1):
print "TP's match"
return result
###############################################################################
def spDiff(SP1,SP2):
"""
Function that compares two spatial pooler instances. Compares the
static variables between the two poolers to make sure that they are equivalent.
Parameters
-----------------------------------------
SP1 first spatial pooler to be compared
SP2 second spatial pooler to be compared
To establish equality, this function does the following:
1.Compares the connected synapse matrices for each coincidence
2.Compare the potential synapse matrices for each coincidence
3.Compare the permanence matrices for each coincidence
4.Compare the firing boosts between the two poolers.
5.Compare the duty cycles before and after inhibition for both poolers
"""
if(len(SP1._masterConnectedM)!=len(SP2._masterConnectedM)):
print "Connected synapse matrices are different sizes"
return False
if(len(SP1._masterPotentialM)!=len(SP2._masterPotentialM)):
print "Potential synapse matrices are different sizes"
return False
if(len(SP1._masterPermanenceM)!=len(SP2._masterPermanenceM)):
print "Permanence matrices are different sizes"
return False
#iterate over cells
for i in range(0,len(SP1._masterConnectedM)):
#grab the Coincidence Matrices and compare them
connected1 = SP1._masterConnectedM[i]
connected2 = SP2._masterConnectedM[i]
if(connected1!=connected2):
print "Connected Matrices for cell %d different" % (i)
return False
#grab permanence Matrices and compare them
permanences1 = SP1._masterPermanenceM[i];
permanences2 = SP2._masterPermanenceM[i];
if(permanences1!=permanences2):
print "Permanence Matrices for cell %d different" % (i)
return False
#grab the potential connection Matrices and compare them
potential1 = SP1._masterPotentialM[i];
potential2 = SP2._masterPotentialM[i];
if(potential1!=potential2):
print "Potential Matrices for cell %d different" % (i)
return False
#Check firing boosts
if(not numpy.array_equal(SP1._firingBoostFactors,SP2._firingBoostFactors)):
print "Firing boost factors are different between spatial poolers"
return False
#Check duty cycles after inhibiton
if(not numpy.array_equal(SP1._dutyCycleAfterInh,SP2._dutyCycleAfterInh)):
print "Duty cycles after inhibition are different between spatial poolers"
return False
#Check duty cycles before inhibition
if(not numpy.array_equal(SP1._dutyCycleBeforeInh,SP2._dutyCycleBeforeInh)):
print "Duty cycles before inhibition are different between spatial poolers"
return False
print("Spatial Poolers are equivalent")
return True
###############################################################################
def removeSeqStarts(vectors, resets, numSteps=1):
"""
Convert a list of sequences of pattern indices, and a pattern lookup table
into a an array of patterns
Parameters:
-----------------------------------------------
vectors: the data vectors. Row 0 contains the outputs from time
step 0, row 1 from time step 1, etc.
resets: the reset signal. This is a vector of booleans
the same length as the number of rows in 'vectors'. It
has a 1 where a sequence started and a 0 otherwise. The
first 'numSteps' rows of 'vectors' of each sequence will
not be included in the return result.
numSteps Number of samples to remove from the start of each sequence
retval: copy of vectors, with the first 'numSteps' samples at the
start of each sequence removed.
"""
# Do nothing if numSteps is 0
if numSteps == 0:
return vectors
resetIndices = resets.nonzero()[0]
removeRows = resetIndices
for i in range(numSteps-1):
removeRows = numpy.hstack((removeRows, resetIndices+i+1))
return numpy.delete(vectors, removeRows, axis=0)
###############################################################################
def _accumulateFrequencyCounts(values, freqCounts=None):
"""
Accumulate a list of values 'values' into the frequency counts 'freqCounts',
and return the updated frequency counts
For example, if values contained the following: [1,1,3,5,1,3,5], and the initial
freqCounts was None, then the return value would be:
[0,3,0,2,0,2]
which corresponds to how many of each value we saw in the input, i.e. there
were 0 0's, 3 1's, 0 2's, 2 3's, 0 4's, and 2 5's.
If freqCounts is not None, the values will be added to the existing counts and
the length of the frequency Counts will be automatically extended as necessary
Parameters:
-----------------------------------------------
values: The values to accumulate into the frequency counts
freqCounts: Accumulated frequency counts so far, or none
"""
# How big does our freqCounts vector need to be?
values = numpy.array(values)
numEntries = values.max() + 1
if freqCounts is not None:
numEntries = max(numEntries, freqCounts.size)
# Where do we accumulate the results?
if freqCounts is not None:
if freqCounts.size != numEntries:
newCounts = numpy.zeros(numEntries, dtype='int32')
newCounts[0:freqCounts.size] = freqCounts
else:
newCounts = freqCounts
else:
newCounts = numpy.zeros(numEntries, dtype='int32')
# Accumulate the new values
for v in values:
newCounts[v] += 1
return newCounts
###############################################################################
def _listOfOnTimesInVec(vector):
"""
Returns 3 things for a vector:
* the total on time
* the number of runs
* a list of the durations of each run.
Parameters:
-----------------------------------------------
input stream: 11100000001100000000011111100000
return value: (11, 3, [3, 2, 6])
"""
# init counters
durations = []
numOnTimes = 0
totalOnTime = 0
# Find where the nonzeros are
nonzeros = numpy.array(vector).nonzero()[0]
# Nothing to do if vector is empty
if len(nonzeros) == 0:
return (0, 0, [])
# Special case of only 1 on bit
if len(nonzeros) == 1:
return (1, 1, [1])
# Count the consecutive non-zeros
prev = nonzeros[0]
onTime = 1
endIdx = nonzeros[-1]
for idx in nonzeros[1:]:
if idx != prev+1:
totalOnTime += onTime
numOnTimes += 1
durations.append(onTime)
onTime = 1
else:
onTime += 1
prev = idx
# Add in the last one
totalOnTime += onTime
numOnTimes += 1
durations.append(onTime)
return (totalOnTime, numOnTimes, durations)
###############################################################################
def _fillInOnTimes(vector, durations):
"""
Helper function used by averageOnTimePerTimestep. 'durations' is a vector
which must be the same len as vector. For each "on" in vector, it fills in
the corresponding element of duration with the duration of that "on" signal
up until that time
Parameters:
-----------------------------------------------
vector: vector of output values over time
durations: vector same length as 'vector', initialized to 0's.
This is filled in with the durations of each 'on" signal.
Example:
vector: 11100000001100000000011111100000
durations: 12300000001200000000012345600000
"""
# Find where the nonzeros are
nonzeros = numpy.array(vector).nonzero()[0]
# Nothing to do if vector is empty
if len(nonzeros) == 0:
return
# Special case of only 1 on bit
if len(nonzeros) == 1:
durations[nonzeros[0]] = 1
return
# Count the consecutive non-zeros
prev = nonzeros[0]
onTime = 1
onStartIdx = prev
endIdx = nonzeros[-1]
for idx in nonzeros[1:]:
if idx != prev+1:
# Fill in the durations
durations[onStartIdx:onStartIdx+onTime] = range(1,onTime+1)
onTime = 1
onStartIdx = idx
else:
onTime += 1
prev = idx
# Fill in the last one
durations[onStartIdx:onStartIdx+onTime] = range(1,onTime+1)
###############################################################################
def averageOnTimePerTimestep(vectors, numSamples=None):
"""
Computes the average on-time of the outputs that are on at each time step, and
then averages this over all time steps.
This metric is resiliant to the number of outputs that are on at each time
step. That is, if time step 0 has many more outputs on than time step 100, it
won't skew the results. This is particularly useful when measuring the
average on-time of things like the temporal pooler output where you might
have many columns bursting at the start of a sequence - you don't want those
start of sequence bursts to over-influence the calculated average on-time.
Parameters:
-----------------------------------------------
vectors: the vectors for which the onTime is calculated. Row 0
contains the outputs from time step 0, row 1 from time step
1, etc.
numSamples: the number of elements for which on-time is calculated.
If not specified, then all elements are looked at.
Returns (scalar average on-time over all time steps,
list containing frequency counts of each encountered on-time)
"""
# Special case given a 1 dimensional vector: it represents a single column
if vectors.ndim == 1:
vectors.shape = (-1,1)
numTimeSteps = len(vectors)
numElements = len(vectors[0])
# How many samples will we look at?
if numSamples is not None:
import pdb; pdb.set_trace() # Test this....
countOn = numpy.random.randint(0, numElements, numSamples)
vectors = vectors[:, countOn]
# Fill in each non-zero of vectors with the on-time that that output was
# on for.
durations = numpy.zeros(vectors.shape, dtype='int32')
for col in xrange(vectors.shape[1]):
_fillInOnTimes(vectors[:,col], durations[:,col])
# Compute the average on time for each time step
sums = vectors.sum(axis=1)
sums.clip(min=1, max=numpy.inf, out=sums)
avgDurations = durations.sum(axis=1, dtype='float64') / sums
avgOnTime = avgDurations.sum() / (avgDurations > 0).sum()
# Generate the frequency counts for each duration
freqCounts = _accumulateFrequencyCounts(avgDurations)
return (avgOnTime, freqCounts)
###############################################################################
def averageOnTime(vectors, numSamples=None):
"""
Returns the average on-time, averaged over all on-time runs.
Parameters:
-----------------------------------------------
vectors: the vectors for which the onTime is calculated. Row 0
contains the outputs from time step 0, row 1 from time step
1, etc.
numSamples: the number of elements for which on-time is calculated.
If not specified, then all elements are looked at.
Returns: (scalar average on-time of all outputs,
list containing frequency counts of each encountered on-time)
"""
# Special case given a 1 dimensional vector: it represents a single column
if vectors.ndim == 1:
vectors.shape = (-1,1)
numTimeSteps = len(vectors)
numElements = len(vectors[0])
# How many samples will we look at?
if numSamples is None:
numSamples = numElements
countOn = range(numElements)
else:
countOn = numpy.random.randint(0, numElements, numSamples)
# Compute the on-times and accumulate the frequency counts of each on-time
# encountered
sumOfLengths = 0.0
onTimeFreqCounts = None
n = 0
for i in countOn:
(onTime, segments, durations) = _listOfOnTimesInVec(vectors[:,i])
if onTime != 0.0:
sumOfLengths += onTime
n += segments
onTimeFreqCounts = _accumulateFrequencyCounts(durations, onTimeFreqCounts)
# Return the average on time of each element that was on.
if n > 0:
return (sumOfLengths/n, onTimeFreqCounts)
else:
return (0.0, onTimeFreqCounts)
###############################################################################
def plotOutputsOverTime(vectors, buVectors=None, title='On-times'):
"""
Generate a figure that shows each output over time. Time goes left to right,
and each output is plotted on a different line, allowing you to see the overlap
in the outputs, when they turn on/off, etc.
Parameters:
------------------------------------------------------------
vectors: the vectors to plot
buVectors: These are normally specified when plotting the pooling
outputs of the temporal pooler over time. The 'buVectors'
are the sequence outputs and the 'vectors' are the
pooling outputs. The buVector (sequence) outputs will be drawn
in a darker color than the vector (pooling) outputs to
distinguish where the cell is outputting due to pooling vs.
sequence memory.
title: title for the plot
avgOnTime: The average on-time measurement. If not supplied,
then it will be calculated from the passed in vectors.
"""
# Produce the plot
import pylab
pylab.ion()
pylab.figure()
imData = vectors.transpose()
if buVectors is not None:
assert(buVectors.shape == vectors.shape)
imData = imData.copy()
imData[buVectors.transpose().astype('bool')] = 2
pylab.imshow(imData, aspect='auto', cmap=pylab.cm.gray_r,
interpolation='nearest')
pylab.title(title)
###############################################################################
def plotHistogram(freqCounts, title='On-Times Histogram', xLabel='On-Time'):
"""
This is usually used to display a histogram of the on-times encountered
in a particular output.
The freqCounts is a vector containg the frequency counts of each on-time
(starting at an on-time of 0 and going to an on-time = len(freqCounts)-1)
The freqCounts are typically generated from the averageOnTimePerTimestep
or averageOnTime methods of this module.
Parameters:
-----------------------------------------------
freqCounts: The frequency counts to plot
title: Title of the plot
"""
import pylab
pylab.ion()
pylab.figure()
pylab.bar(numpy.arange(len(freqCounts)) - 0.5, freqCounts)
pylab.title(title)
pylab.xlabel(xLabel)
###############################################################################
def populationStability(vectors, numSamples=None):
"""
Returns the stability for the population averaged over multiple time steps
Parameters:
-----------------------------------------------
vectors: the vectors for which the stability is calculated
numSamples the number of time steps where stability is counted
At each time step, count the fraction of the active elements which are stable
from the previous step
Average all the fraction
"""
# ----------------------------------------------------------------------
# Calculate the stability
numVectors = len(vectors)
if numSamples is None:
numSamples = numVectors-1
countOn = range(numVectors-1)
else:
countOn = numpy.random.randint(0, numVectors-1, numSamples)
sigmap = 0.0
for i in countOn:
match = checkMatch(vectors[i], vectors[i+1], sparse=False)
# Ignore reset vectors (all 0's)
if match[1] != 0:
sigmap += float(match[0])/match[1]
return sigmap / numSamples
###############################################################################
def percentOutputsStableOverNTimeSteps(vectors, numSamples=None):
"""
Returns the percent of the outputs that remain completely stable over
N time steps.
Parameters:
-----------------------------------------------
vectors: the vectors for which the stability is calculated
numSamples: the number of time steps where stability is counted
For each window of numSamples, count how many outputs are active during
the entire window.
"""
# ----------------------------------------------------------------------
# Calculate the stability
totalSamples = len(vectors)
windowSize = numSamples
# Process each window
numWindows = 0
pctStable = 0
for wStart in range(0, totalSamples-windowSize+1):
# Count how many elements are active for the entire time
data = vectors[wStart:wStart+windowSize]
outputSums = data.sum(axis=0)
stableOutputs = (outputSums == windowSize).sum()
# Accumulated
samplePctStable = float(stableOutputs) / data[0].sum()
print samplePctStable
pctStable += samplePctStable
numWindows += 1
# Return percent average over all possible windows
return float(pctStable) / numWindows
###########################################################################
def computeSaturationLevels(outputs, outputsShape, sparseForm=False):
"""
Compute the saturation for a continuous level. This breaks the level into
multiple regions and computes the saturation level for each region.
Parameters:
--------------------------------------------
outputs: output of the level. If sparseForm is True, this is a list of
the non-zeros. If sparseForm is False, it is the dense
representation
outputsShape: The shape of the outputs of the level (height, width)
retval: (sat, innerSat):
sat: list of the saturation levels of each non-empty
region of the level (each 0 -> 1.0)
innerSat: list of the saturation level of each non-empty region
that is not near an edge (each 0 -> 1.0)
"""
# Get the outputs into a SparseBinaryMatrix
if not sparseForm:
outputs = outputs.reshape(outputsShape)
spOut = SM32(outputs)
else:
if len(outputs) > 0:
assert (outputs.max() < outputsShape[0] * outputsShape[1])
spOut = SM32(1, outputsShape[0] * outputsShape[1])
spOut.setRowFromSparse(0, outputs, [1]*len(outputs))
spOut.reshape(outputsShape[0], outputsShape[1])
# Get the activity in each local region using the nNonZerosPerBox method
# This method takes a list of the end row indices and a list of the end
# column indices.
# We will use regions that are 15x15, which give us about a 1/225 (.4%) resolution
# on saturation.
regionSize = 15
rows = xrange(regionSize+1, outputsShape[0]+1, regionSize)
cols = xrange(regionSize+1, outputsShape[1]+1, regionSize)
regionSums = spOut.nNonZerosPerBox(rows, cols)
# Get all the nonzeros out - those are our saturation sums
(locations, values) = regionSums.tolist()
values /= float(regionSize * regionSize)
sat = list(values)
# Now, to compute which are the inner regions, we will only take the ones that
# are surrounded by activity above, below, left and right
innerSat = []
locationSet = set(locations)
for (location, value) in itertools.izip(locations, values):
(row, col) = location
if (row-1,col) in locationSet and (row, col-1) in locationSet \
and (row+1, col) in locationSet and (row, col+1) in locationSet:
innerSat.append(value)
return (sat, innerSat)
################################################################################
def checkMatch(input, prediction, sparse=True, verbosity=0):
"""
Compares the actual input with the predicted input and returns results
Parameters:
-----------------------------------------------
input: The actual input
prediction: the predicted input
verbosity: If > 0, print debugging messages
sparse: If true, they are in sparse form (list of
active indices)
retval (foundInInput, totalActiveInInput, missingFromInput,
totalActiveInPrediction)
foundInInput: The number of predicted active elements that were
found in the actual input
totalActiveInInput: The total number of active elements in the input.
missingFromInput: The number of predicted active elements that were not
found in the actual input
totalActiveInPrediction: The total number of active elements in the prediction
"""
if sparse:
activeElementsInInput = set(input)
activeElementsInPrediction = set(prediction)
else:
activeElementsInInput = set(input.nonzero()[0])
activeElementsInPrediction = set(prediction.nonzero()[0])
totalActiveInPrediction = len(activeElementsInPrediction)
totalActiveInInput = len(activeElementsInInput)
foundInInput = len(activeElementsInPrediction.intersection(activeElementsInInput))
missingFromInput = len(activeElementsInPrediction.difference(activeElementsInInput))
missingFromPrediction = len(activeElementsInInput.difference(activeElementsInPrediction))
if verbosity >= 1:
print "preds. found in input:", foundInInput, "out of", totalActiveInPrediction,
print "; preds. missing from input:", missingFromInput, "out of", \
totalActiveInPrediction,
print "; unexpected active in input:", missingFromPrediction, "out of", \
totalActiveInInput
return (foundInInput, totalActiveInInput, missingFromInput,
totalActiveInPrediction)
###############################################################################
def predictionExtent(inputs, resets, outputs, minOverlapPct=100.0):
"""
Computes the predictive ability of a temporal pooler (TP). This routine returns
a value which is the average number of time steps of prediction provided
by the TP. It accepts as input the inputs, outputs, and resets provided to
the TP as well as a 'minOverlapPct' used to evalulate whether or not a
prediction is a good enough match to the actual input.
The 'outputs' are the pooling outputs of the TP. This routine treats each output
as a "manifold" that includes the active columns that should be present in the
next N inputs. It then looks at each successive input and sees if it's active
columns are within the manifold. For each output sample, it computes how
many time steps it can go forward on the input before the input overlap with
the manifold is less then 'minOverlapPct'. It returns the average number of
time steps calculated for each output.
Parameters:
-----------------------------------------------
inputs: The inputs to the TP. Row 0 contains the inputs from time
step 0, row 1 from time step 1, etc.
resets: The reset input to the TP. Element 0 contains the reset from
time step 0, element 1 from time step 1, etc.
outputs: The pooling outputs from the TP. Row 0 contains the outputs
from time step 0, row 1 from time step 1, etc.
minOverlapPct: How much each input's columns must overlap with the pooling
output's columns to be considered a valid prediction.
retval: (Average number of time steps of prediction over all output
samples,
Average number of time steps of prediction when we aren't
cut short by the end of the sequence,
List containing frequency counts of each encountered
prediction time)
"""
# List of how many times we encountered each prediction amount. Element 0
# is how many times we successfully predicted 0 steps in advance, element 1
# is how many times we predicted 1 step in advance, etc.
predCounts = None
# Total steps of prediction over all samples
predTotal = 0
# Total number of samples
nSamples = len(outputs)
# Total steps of prediction for samples at the start of the sequence, or
# for samples whose prediction runs aren't cut short by the end of the
# sequence.
predTotalNotLimited = 0
nSamplesNotLimited = 0
# Compute how many cells/column we have
nCols = len(inputs[0])
nCellsPerCol = len(outputs[0]) // nCols
# Evalulate prediction for each output sample
for idx in xrange(nSamples):
# What are the active columns for this output?
activeCols = outputs[idx].reshape(nCols, nCellsPerCol).max(axis=1)
# How many steps of prediction do we have?
steps = 0
while (idx+steps+1 < nSamples) and (resets[idx+steps+1] == 0):
overlap = numpy.logical_and(inputs[idx+steps+1], activeCols)
overlapPct = 100.0 * float(overlap.sum()) / inputs[idx+steps+1].sum()
if overlapPct >= minOverlapPct:
steps += 1
else:
break
# print "idx:", idx, "steps:", steps
# Accumulate into our total
predCounts = _accumulateFrequencyCounts([steps], predCounts)
predTotal += steps
# If this sample was not cut short by the end of the sequence, include
# it into the "NotLimited" runs
if resets[idx] or \
((idx+steps+1 < nSamples) and (not resets[idx+steps+1])):
predTotalNotLimited += steps
nSamplesNotLimited += 1
# Return results
return (float(predTotal) / nSamples,
float(predTotalNotLimited) / nSamplesNotLimited,
predCounts)
###############################################################################
def getCentreAndSpreadOffsets(spaceShape,
spreadShape,
stepSize=1):
"""
Generates centre offsets and spread offsets for block-mode based training
regimes - star, cross, block.
Parameters:
-----------------------------------------------
spaceShape: The (height, width) of the 2-D space to explore. This
sets the number of center-points.
spreadShape: The shape (height, width) of the area around each center-point
to explore.
stepSize: The step size. How big each step is, in pixels. This controls
*both* the spacing of the center-points within the block and the
points we explore around each center-point
retval: (centreOffsets, spreadOffsets)
"""
from nupic.math.cross import cross
# =====================================================================
# Init data structures
# What is the range on the X and Y offsets of the center points?
shape = spaceShape
# If the shape is (1,1), special case of just 1 center point
if shape[0] == 1 and shape[1] == 1:
centerOffsets = [(0,0)]
else:
xMin = -1 * (shape[1] // 2)
xMax = xMin + shape[1] - 1
xPositions = range(stepSize * xMin, stepSize * xMax + 1, stepSize)
yMin = -1 * (shape[0] // 2)
yMax = yMin + shape[0] - 1
yPositions = range(stepSize * yMin, stepSize * yMax + 1, stepSize)
centerOffsets = list(cross(yPositions, xPositions))
numCenterOffsets = len(centerOffsets)
print "centerOffsets:", centerOffsets
# What is the range on the X and Y offsets of the spread points?
shape = spreadShape
# If the shape is (1,1), special case of no spreading around each center
# point
if shape[0] == 1 and shape[1] == 1:
spreadOffsets = [(0,0)]
else:
xMin = -1 * (shape[1] // 2)
xMax = xMin + shape[1] - 1
xPositions = range(stepSize * xMin, stepSize * xMax + 1, stepSize)
yMin = -1 * (shape[0] // 2)
yMax = yMin + shape[0] - 1
yPositions = range(stepSize * yMin, stepSize * yMax + 1, stepSize)
spreadOffsets = list(cross(yPositions, xPositions))
# Put the (0,0) entry first
spreadOffsets.remove((0,0))
spreadOffsets.insert(0, (0,0))
numSpreadOffsets = len(spreadOffsets)
print "spreadOffsets:", spreadOffsets
return centerOffsets, spreadOffsets
###############################################################################
def makeCloneMap(columnsShape, outputCloningWidth, outputCloningHeight=-1):
"""Make a two-dimensional clone map mapping columns to clone master.
This makes a map that is (numColumnsHigh, numColumnsWide) big that can
be used to figure out which clone master to use for each column. Here are
a few sample calls
>>> makeCloneMap(columnsShape=(10, 6), outputCloningWidth=4)
(array([[ 0, 1, 2, 3, 0, 1],
[ 4, 5, 6, 7, 4, 5],
[ 8, 9, 10, 11, 8, 9],
[12, 13, 14, 15, 12, 13],
[ 0, 1, 2, 3, 0, 1],
[ 4, 5, 6, 7, 4, 5],
[ 8, 9, 10, 11, 8, 9],
[12, 13, 14, 15, 12, 13],
[ 0, 1, 2, 3, 0, 1],
[ 4, 5, 6, 7, 4, 5]], dtype=uint32), 16)
>>> makeCloneMap(columnsShape=(7, 8), outputCloningWidth=3)
(array([[0, 1, 2, 0, 1, 2, 0, 1],
[3, 4, 5, 3, 4, 5, 3, 4],
[6, 7, 8, 6, 7, 8, 6, 7],
[0, 1, 2, 0, 1, 2, 0, 1],
[3, 4, 5, 3, 4, 5, 3, 4],
[6, 7, 8, 6, 7, 8, 6, 7],
[0, 1, 2, 0, 1, 2, 0, 1]], dtype=uint32), 9)
>>> makeCloneMap(columnsShape=(7, 11), outputCloningWidth=5)
(array([[ 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0],
[ 5, 6, 7, 8, 9, 5, 6, 7, 8, 9, 5],
[10, 11, 12, 13, 14, 10, 11, 12, 13, 14, 10],
[15, 16, 17, 18, 19, 15, 16, 17, 18, 19, 15],
[20, 21, 22, 23, 24, 20, 21, 22, 23, 24, 20],
[ 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0],
[ 5, 6, 7, 8, 9, 5, 6, 7, 8, 9, 5]], dtype=uint32), 25)
>>> makeCloneMap(columnsShape=(7, 8), outputCloningWidth=3, outputCloningHeight=4)
(array([[ 0, 1, 2, 0, 1, 2, 0, 1],
[ 3, 4, 5, 3, 4, 5, 3, 4],
[ 6, 7, 8, 6, 7, 8, 6, 7],
[ 9, 10, 11, 9, 10, 11, 9, 10],
[ 0, 1, 2, 0, 1, 2, 0, 1],
[ 3, 4, 5, 3, 4, 5, 3, 4],
[ 6, 7, 8, 6, 7, 8, 6, 7]], dtype=uint32), 12)
The basic idea with this map is that, if you imagine things stretching off
to infinity, every instance of a given clone master is seeing the exact
same thing in all directions. That includes:
- All neighbors must be the same
- The "meaning" of the input to each of the instances of the same clone
master must be the same. If input is pixels and we have translation
invariance--this is easy. At higher levels where input is the output
of lower levels, this can be much harder.
- The "meaning" of the inputs to neighbors of a clone master must be the
same for each instance of the same clone master.
The best way to think of this might be in terms of 'inputCloningWidth' and
'outputCloningWidth'.
- The 'outputCloningWidth' is the number of columns you'd have to move
horizontally (or vertically) before you get back to the same the same
clone that you started with. MUST BE INTEGRAL!
- The 'inputCloningWidth' is the 'outputCloningWidth' of the node below us.
If we're getting input from an sensor where every element just represents
a shift of every other element, this is 1.
At a conceptual level, it means that if two different inputs are shown
to the node and the only difference between them is that one is shifted
horizontally (or vertically) by this many pixels, it means we are looking
at the exact same real world input, but shifted by some number of pixels
(doesn't have to be 1). MUST BE INTEGRAL!
At level 1, I think you could have this:
* inputCloningWidth = 1
* sqrt(coincToInputRatio^2) = 2.5
* outputCloningWidth = 5
...in this case, you'd end up with 25 masters.
Let's think about this case:
input: - - - 0 1 2 3 4 5 - - - - -
columns: 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4
...in other words, input 0 is fed to both column 0 and column 1. Input 1
is fed to columns 2, 3, and 4, etc. Hopefully, you can see that you'll
get the exact same output (except shifted) with:
input: - - - - - 0 1 2 3 4 5 - - -
columns: 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4
...in other words, we've shifted the input 2 spaces and the output shifted
5 spaces.
*** The outputCloningWidth MUST ALWAYS be an integral multiple of the ***
*** inputCloningWidth in order for all of our rules to apply. ***
*** NOTE: inputCloningWidth isn't passed here, so it's the caller's ***
*** responsibility to ensure that this is true. ***
*** The outputCloningWidth MUST ALWAYS be an integral multiple of ***
*** sqrt(coincToInputRatio^2), too. ***
@param columnsShape The shape (height, width) of the columns.
@param outputCloningWidth See docstring above.
@param outputCloningHeight If non-negative, can be used to make
rectangular (instead of square) cloning fields.
@return cloneMap An array (numColumnsHigh, numColumnsWide) that
contains the clone index to use for each
column.
@return numDistinctClones The number of distinct clones in the map. This
is just outputCloningWidth*outputCloningHeight.
"""
if outputCloningHeight < 0:
outputCloningHeight = outputCloningWidth
columnsHeight, columnsWidth = columnsShape
numDistinctMasters = outputCloningWidth * outputCloningHeight
a = numpy.empty((columnsHeight, columnsWidth), 'uint32')
for row in xrange(columnsHeight):
for col in xrange(columnsWidth):
a[row, col] = (col % outputCloningWidth) + \
(row % outputCloningHeight) * outputCloningWidth
return a, numDistinctMasters
##############################################################################
def numpyStr(array, format='%f', includeIndices=False, includeZeros=True):
""" Pretty print a numpy matrix using the given format string for each
value. Return the string representation
Parameters:
------------------------------------------------------------
array: The numpy array to print. This can be either a 1D vector or 2D matrix
format: The format string to use for each value
includeIndices: If true, include [row,col] label for each value
includeZeros: Can only be set to False if includeIndices is on.
If True, include 0 values in the print-out
If False, exclude 0 values from the print-out.
"""
shape = array.shape
assert (len(shape) <= 2)
items = ['[']
if len(shape) == 1:
if includeIndices:
format = '%d:' + format
if includeZeros:
rowItems = [format % (c,x) for (c,x) in enumerate(array)]
else:
rowItems = [format % (c,x) for (c,x) in enumerate(array) if x != 0]
else:
rowItems = [format % (x) for x in array]
items.extend(rowItems)
else:
(rows, cols) = shape
if includeIndices:
format = '%d,%d:' + format
for r in xrange(rows):
if includeIndices:
rowItems = [format % (r,c,x) for c,x in enumerate(array[r])]
else:
rowItems = [format % (x) for x in array[r]]
if r > 0:
items.append('')
items.append('[')
items.extend(rowItems)
if r < rows-1:
items.append(']\n')
else:
items.append(']')
items.append(']')
return ' '.join(items)
###############################################################################
if __name__=='__main__':
testStability(numOrigVectors=10, length=500, activity=50,morphTime=3)
from IPython.Shell import IPShellEmbed; IPShellEmbed()()
|
gpl-3.0
| -4,347,953,636,782,061,600
| 36.429946
| 114
| 0.6196
| false
| 4.012441
| false
| false
| false
|
blancltd/django-latest-tweets
|
latest_tweets/utils.py
|
1
|
6404
|
from __future__ import unicode_literals
from datetime import datetime
import hashlib
from tempfile import TemporaryFile
from django.core.files import File
from django.utils.six.moves import html_parser
from django.utils.timezone import utc
from PIL import Image
import requests
from .models import Hashtag, Like, Photo, Tweet
HASHTAG_HTML = '<a href="https://twitter.com/hashtag/{text}" target="_blank">#{text}</a>'
URL_HTML = '<a href="{expanded_url}" target="_blank">{display_url}</a>'
MENTION_HTML = '<a href="https://twitter.com/{screen_name}" target="_blank">@{screen_name}</a>'
SYMBOL_HTML = '<a href="https://twitter.com/search?q=%24{text}" target="_blank">${text}</a>'
def tweet_html_entities(tweet, **kwargs):
text = list(tweet)
for hashtag in kwargs.get('hashtags', []):
start, end = hashtag['indices']
text[start] = HASHTAG_HTML.format(**hashtag)
text[start + 1:end] = [''] * (end - start - 1)
for url in kwargs.get('urls', []):
start, end = url['indices']
text[start] = URL_HTML.format(**url)
text[start + 1:end] = [''] * (end - start - 1)
for mention in kwargs.get('user_mentions', []):
start, end = mention['indices']
text[start] = MENTION_HTML.format(**mention)
text[start + 1:end] = [''] * (end - start - 1)
for symbol in kwargs.get('symbols', []):
start, end = symbol['indices']
text[start] = SYMBOL_HTML.format(**symbol)
text[start + 1:end] = [''] * (end - start - 1)
for media in kwargs.get('media', []):
start, end = media['indices']
text[start] = URL_HTML.format(**media)
text[start + 1:end] = [''] * (end - start - 1)
return ''.join(text)
def tweet_hashtags(tweet, hashtags):
for hashtag in hashtags:
text = hashtag['text'].lower()
tag, created = Hashtag.objects.get_or_create(text=text)
tweet.hashtags.add(tag)
def tweet_photos(tweet, media, download):
for photo in media:
# Only photos
if photo['type'] != 'photo':
continue
photo_id = photo['id']
large = photo['sizes']['large']
obj, created = Photo.objects.get_or_create(tweet=tweet, photo_id=photo_id, defaults={
'text': photo['display_url'],
'text_index': photo['indices'][0],
'url': photo['url'],
'media_url': photo['media_url_https'],
'large_width': int(large['w']),
'large_height': int(large['h']),
})
if download and not obj.image_file:
with TemporaryFile() as temp_file:
image_file = File(temp_file)
# Download the file
r = requests.get(obj.media_url, stream=True)
r.raise_for_status()
for chunk in r.iter_content(4096):
image_file.write(chunk)
# Get Pillow to look at it
image_file.seek(0)
pil_image = Image.open(image_file)
image_name = '%s.%s' % (
hashlib.md5(obj.media_url.encode()).hexdigest(), pil_image.format.lower())
# Save the file
image_file.seek(0)
obj.image_file.save(image_name, image_file, save=True)
def update_tweets(tweet_list, tweet_entities=tweet_html_entities, download=False):
# Need to escape HTML entities
htmlparser = html_parser.HTMLParser()
unescape = htmlparser.unescape
obj_list = []
for tweet in tweet_list:
tweet_id = tweet['id']
tweet_username = tweet['user']['screen_name']
tweet_name = tweet['user']['name']
tweet_created = datetime.strptime(
tweet['created_at'], '%a %b %d %H:%M:%S +0000 %Y'
).replace(tzinfo=utc)
tweet_is_reply = tweet['in_reply_to_screen_name'] is not None
if 'retweeted_status' in tweet:
retweeted_username = tweet['retweeted_status']['user']['screen_name']
retweeted_name = tweet['retweeted_status']['user']['name']
retweeted_tweet_id = tweet['retweeted_status']['id']
tweet_text = tweet['retweeted_status']['text']
tweet_html = tweet_entities(tweet_text, **tweet['retweeted_status']['entities'])
favorite_count = tweet['retweeted_status']['favorite_count']
retweet_count = tweet['retweeted_status']['retweet_count']
else:
retweeted_username = ''
retweeted_name = ''
retweeted_tweet_id = None
tweet_text = tweet['text']
tweet_html = tweet_entities(tweet_text, **tweet['entities'])
favorite_count = tweet['favorite_count']
retweet_count = tweet['retweet_count']
tweet_text = unescape(tweet_text)
obj, created = Tweet.objects.get_or_create(tweet_id=tweet_id, defaults={
'user': tweet_username,
'name': tweet_name,
'text': tweet_text,
'html': tweet_html,
'favorite_count': favorite_count,
'retweet_count': retweet_count,
'retweeted_username': retweeted_username,
'retweeted_name': retweeted_name,
'retweeted_tweet_id': retweeted_tweet_id,
'is_reply': tweet_is_reply,
'created': tweet_created,
})
if created:
# Add hashtags
tweet_hashtags(tweet=obj, hashtags=tweet['entities'].get('hashtags', []))
# Add any photos
tweet_photos(tweet=obj, media=tweet['entities'].get('media', []), download=download)
else:
# Update counts, but try to avoid excessive updates
update_fields = []
if obj.favorite_count != favorite_count:
obj.favorite_count = favorite_count
update_fields.append('favorite_count')
if obj.retweet_count != retweet_count:
obj.retweet_count = retweet_count
update_fields.append('retweet_count')
if update_fields:
obj.save(update_fields=update_fields)
obj_list.append(obj)
return obj_list
def update_likes(user, tweet_list, download=False):
obj_list = update_tweets(tweet_list=tweet_list, download=download)
for tweet in obj_list:
Like.objects.get_or_create(user=user, tweet=tweet)
return obj_list
|
bsd-3-clause
| -4,917,055,748,261,835,000
| 33.994536
| 96
| 0.570581
| false
| 3.731935
| false
| false
| false
|
bk1285/rpi_wordclock
|
wordclock_interfaces/event_handler.py
|
1
|
2667
|
import threading
from monotonic import monotonic as _time
class next_action:
NEXT_PLUGIN = 1
GOTO_MENU = 2
RUN_DEFAULT_PLUGIN = 3
class event_handler:
EVENT_INVALID = -1
EVENT_BUTTON_LEFT = 0
EVENT_BUTTON_RIGHT = 1
EVENT_BUTTON_RETURN = 2
EVENT_EXIT_PLUGIN = 3
EVENT_NEXT_PLUGIN_REQUESTED = 4
BUTTONS = {'left': EVENT_BUTTON_LEFT, 'right': EVENT_BUTTON_RIGHT, 'return': EVENT_BUTTON_RETURN}
def __init__(self):
self.condition = threading.Condition()
self.event = self.EVENT_INVALID
self.lock_time = 0.1
self.nextAction = next_action.RUN_DEFAULT_PLUGIN
def getNextAction(self, evt):
if evt == self.EVENT_NEXT_PLUGIN_REQUESTED:
self.nextAction = next_action.NEXT_PLUGIN
elif evt == self.EVENT_BUTTON_RETURN:
self.nextAction = next_action.GOTO_MENU
else:
self.nextAction = next_action.RUN_DEFAULT_PLUGIN
def waitForEvent(self, seconds=None):
self.condition.acquire()
self.__wait_for(lambda: self.event != self.EVENT_INVALID, seconds)
evt = self.event
self.getNextAction(evt)
self.event = self.EVENT_INVALID
self.condition.release()
return evt
def setEvent(self, evt):
self.condition.acquire()
if self.event != self.EVENT_EXIT_PLUGIN and self.event != self.EVENT_NEXT_PLUGIN_REQUESTED:
self.event = evt
self.condition.notifyAll()
self.condition.release()
def waitForExit(self, seconds=None):
self.condition.acquire()
exitWasTriggered = self.__wait_for(
lambda: self.event == self.EVENT_EXIT_PLUGIN or self.event == self.EVENT_NEXT_PLUGIN_REQUESTED, seconds)
self.getNextAction(self.event)
self.event = self.EVENT_INVALID
self.condition.release()
return True if exitWasTriggered else False
def __wait_for(self, predicate, timeout=None):
"""
Wait until a condition evaluates to True.
predicate should be a callable which result will be interpreted as a
boolean value. A timeout may be provided giving the maximum time to
wait.
"""
endtime = None
waittime = timeout
result = predicate()
while not result:
if waittime is not None:
if endtime is None:
endtime = _time() + waittime
else:
waittime = endtime - _time()
if waittime <= 0:
break
self.condition.wait(waittime)
result = predicate()
return result
|
gpl-3.0
| 370,705,147,843,874,800
| 31.13253
| 116
| 0.600675
| false
| 4.090491
| false
| false
| false
|
saikia81/the-number-game
|
4/number_game.py
|
2
|
11834
|
#!/usr/bin/python
# version 4
# april 2012
# this was written by saikia81 and is copyrighted under the GNU general public license 3
# it was written in notepad++, a program I recommend!
# whitespace ftw!
#import random, system and operating system possibilities.
import os, sys
import random, time #time moduele
#pickling for data2file
import cPickle as pickle
#introducing the player
def instructions():
print 'welcome to the guess my number game V4'
print "I'll think of a number and you have to guess it\n"
#making a list of all possible numbers for every dificulty
def list_numbers():
list_easy = []
list_medium = []
list_hard = []
for n in range(1,101):
list_easy.append(n)
list_medium.append(n)
list_hard.append(n)
for n in range(101,201):
list_medium.append(n)
list_hard.append(n)
for n in range(-201,0):
n += 1
list_hard.append(n)
return list_easy, list_medium, list_hard
#does the player want to change the dificulty
def change_dificulty(dificulty):
if dificulty == None:
dificulty = choose_dificulty()
return dificulty
if raw_input("do you want to change dificulty? yes/no: ") == 'yes':
dificulty = choose_dificulty()
return dificulty
else:
return dificulty
#the dificulty the player wants to choose
def choose_dificulty():
print '\nwhat dificulty do you want to play in?'
dificulty = raw_input('choose between "easy", "medium" or "hard":\n')
dificulties = 'easy', 'medium', 'hard'
#if anybody tries to be smart: help them get it right
wrong = -1
if dificulty in dificulties: wrong = 0
elif dificulty not in dificulties:
wrong += 1
for n in (1,2,3):
if n == 3:
print "\nseems like you can't handle choosing a dificulty..."
dificulty = "easy"
time.sleep(2)
print ""
elif (dificulty not in dificulties):
print 'something went wrong!!! please try again\n'
dificulty = raw_input('choose between "easy", "medium" or "hard":\n')
wrong += 1
elif dificulty in dificulties:
print "\nalright so let's get started :D\n"
break
else:
print "you're doing something wrong! I'll chooce a dificulty for you\a\a\a\a\n"
dificulty = 'easy'
print "ERROR: 008"
time.sleep(2)
else:
print '\a\a\asomething went wrong the program will shutdown.'
print "ERROR: 009"
time.sleep(2.5)
sys.exit()
return dificulty
#so here a random number will be choosen depending of the dificulty
def random_number(dificulty, list_easy, list_medium, list_hard):
if dificulty == 'easy':
NUMBER = random.randrange(100) + 1
print "you have choosen the dificulty easy."
number_range = '1 and 100: '
numbers = list_easy
elif dificulty == 'medium':
NUMBER = random.randrange(200) + 1
print "you have choosen the dificulty medium."
number_range = '1 and 200: '
numbers = list_medium
elif dificulty =='hard':
NUMBER = random.randrange(-200,201)
print "you have choosen the dificulty hard."
number_range = '-200 and 200: '
numbers = list_hard
else:
print "dificulty malfunction"
print "ERROR: 003"
time.sleep(2.5)
exit()
return NUMBER, number_range, numbers
# if the guess != "the (predefined) number": loop.
def game(dificulty, NUMBER, number_range, numbers):
time.sleep(2.5)
os.system('cls')
guesses=0
guess='nothing'
while guess != NUMBER:
if guess == 'nothing':
print 'guess a number between', number_range
try:
guess = input()
except:
print "\nsomething went wrong\nyou're getting another try\n\n"
continue
guesses += 1
elif guess == 'cheater':
guess = NUMBER
elif guess not in numbers:
print "\nthe guess you made isn't in the range of valid numbers.\nAre you sure you want to make this guess?"
answ = raw_input("'yes'/'no' \n")
if answ == 'yes':
print "it's your funeral"
print '\nnguess a number between', number_range
guesses += 1
elif answ == 'no':
print "good choice"
print '\nguess a number between', number_range
try:
guess = input()
except:
print "something went wrong\nyou're getting another try\n"
continue
else:
print "that isn't a valid option"
print "let's continue\n"
#if the number is higher than the guess
elif guess < NUMBER:
print 'higher...'
print '\nguess a number between', number_range
try:
guess = input()
except:
print "something went wrong\nyou're getting another try\n"
continue
guesses += 1
continue
#if the number is 'lower...'
elif guess > NUMBER:
print 'lower...'
print '\nguess a number between', number_range
try:
guess = input()
except:
print "something went wrong\n you'll get another try"
continue
guesses -= 1
guesses += 1
#this is actually an error that will never occur... but better safe than sorry.
else:
print '\a\a\asorry, something went wrong. The game will now end itself.'
sys.exit()
print
print 'you did it the NUMBER was: ', NUMBER,
print 'it cost you ', guesses, 'guesses to get it right', 'on dificulty', dificulty
print
return guesses
##Here I will use the 'os' module to keep a highscore system
#in the default appdata of the users profile.
#everything here is to see if everything is alright in it's place.
def highscore(dificulty,guesses):
FOLDER_LOCALAPPDATA = os.environ['LOCALAPPDATA']
FOLDER_NUMBER_GAME = FOLDER_LOCALAPPDATA + '\\Number_game'
#deciding if a new highscore file and/or dir is needed
if os.access(FOLDER_NUMBER_GAME, 0) == False: #dir
try:
os.mkdir(FOLDER_NUMBER_GAME)
except:
os.system('cls')
print 'creating folder: ERROR\nError code: 002'
os.system('pause')
sys.exit()
try:
HIGHSCORES_DAT = open(FOLDER_NUMBER_GAME+"\\highscores.dat", "w+")
easy_highscores={}
medium_highscores={}
hard_highscores={}
all_highscores = [easy_highscores,medium_highscores,hard_highscores]
pickle.dump(all_highscores,HIGHSCORES_DAT)
HIGHSCORES_DAT.close()
HIGHSCORES_DAT = open(FOLDER_NUMBER_GAME+"\\highscores.dat", "r+")
unpickled_file = pickle.load(HIGHSCORES_DAT)
except:
os.system('cls')
print 'loading file: ERROR\nError code: 001'
os.system('pause')
sys.exit()
else:
HIGHSCORES_DAT.close()
#done with file and folder creation
#
#showing highscores
HIGHSCORES_DAT = open(FOLDER_NUMBER_GAME+"\\highscores.dat", "r")
try:
unpickled_file = pickle.load(HIGHSCORES_DAT)
except:
print "couldn't locate or unpickle file"
print "ERROR: 005"
print "\n if this was your first run of the game: this is common"
print "if not, please send a message at saikia81@hotmail.com, thank you"
time.sleep(1)
print "everything went worse then expected. shutting down"
time.sleep(2.5)
sys.exit()
else:
HIGHSCORES_DAT.close()
if dificulty == "easy": l=0
if dificulty == "medium": l=1
if dificulty == "hard": l=2
highscores = unpickled_file[l]
#creating your highscore...
your_name = raw_input('what is your name?: ')
try:
if highscores[your_name]>guesses:
os.system('cls')
print "congratulations, new highscore!!"
if raw_input('do you want to replace your score yes/no: ') =="yes": highscores[your_name]=guesses
except:
print "new user"
highscores[your_name]=guesses
list_keys= highscores.keys()
list_values= highscores.values()
list_values.sort()
time.sleep(4)
os.system('cls')
#deeply annoying part
#highscore display
print" ---HIGHSCORE---"
print "highscores in", dificulty,"dificulty"
print"\nname attempts"
print"----------------------------------------"
i=0
#for values in sorted values list
for n in list_values:
#reset found to find next highscore
found = False
#set p to 0: to try different keys
p=0
#while the matching key and value not found keep looking
while found != True:
#m = the next key in list
m=list_keys[p]
if highscores[m] == n: found=True
p+=1
b=len(m)
b=21-b
print m,' '*b,highscores[m]
HIGHSCORES_DAT = open(FOLDER_NUMBER_GAME+"\\highscores.dat", "r")
unpickled_file = pickle.load(HIGHSCORES_DAT)
HIGHSCORES_DAT.close()
if l==0: unpickled_file[0]=highscores
if l==1: unpickled_file[1]=highscores
if l==2: unpickled_file[2]=highscores
HIGHSCORES_DAT = open(FOLDER_NUMBER_GAME+"\\highscores.dat", "w")
pickle.dump(unpickled_file,HIGHSCORES_DAT)
HIGHSCORES_DAT.close()
def end():
time.sleep(1)
print('''
The number Game V4
Copyright (C) 2012 Saikia81
''')
time.sleep(5)
os.system('cls')
print("""
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
""")
time.sleep(7)
try:
if pygame.mixer.get_busy()>0:
try:
pygame.mixer.music.fadeout(3000)
except:
print "ERROR: 012"
except:
pass
time.sleep(3)
os.system('pause')
sys.exit()
def main():
#initializing
ask_music = raw_input('music "on"?: ')
if (ask_music == 'on') or (ask_music == 'yes'):
try:
import pygame.mixer
pygame.mixer.init()
pygame.mixer.music.load("song.mp3")
pygame.mixer.music.play(-1)
except:
print "pygame not working!\nError: 013"
os.system('cls')
list_easy, list_medium, list_hard = list_numbers()
dificulty = None
instructions()
while 1:
dificulty=change_dificulty(dificulty)
NUMBER, number_range, numbers = random_number(dificulty, list_easy, list_medium, list_hard)
guesses = game(dificulty, NUMBER, number_range, numbers)
highscore(dificulty,guesses)
ask_again = raw_input('\ndo you want to play again? yes/no: ')
os.system('cls')
if ask_again == 'no': end()
#start
main()
|
gpl-3.0
| -2,437,525,922,798,313,000
| 32.908309
| 120
| 0.577489
| false
| 3.857236
| false
| false
| false
|
akusok/website-ibc
|
wibc_old/utils/copy_hdf5.py
|
1
|
6601
|
# -*- coding: utf-8 -*-
"""
Utilites for copying huge HDF5 files.
Created on Thu Jun 20 14:02:59 2013
"""
#from ../modules/hdf5_creator import create_empty_hdf5
from tables import openFile
import numpy as np
import time
def copy_hdf5_newindex(data, new):
"""Copying a part of data, updating indexes.
Copying process is image-based.
Websites currently stays the same, as they are a tiny table.
Only full-file copy, no appends!
"""
def modify_data(imgrow, regs, descrs, rli):
"""Modifying data before writing it back.
Returns an empty tuple if an image is to be deleted.
"""
K = 0.8 # coeffitient
regs1 = []
descrs1 = []
rli1 = rli
x,y = imgrow[9]
for i in xrange(len(regs)):
rg = regs[i]
ds = descrs[i]
xr,yr = rg[4]
# centering and normalizing
xr = (float(xr)/x - 0.5)*2
yr = (float(yr)/y - 0.5)*2
# check required condition
if (xr**2 + yr**2 >= K**2):
rg[0] = rli1 # self index
ds[0] = rli1 # self index
rli1 += 1
regs1.append(rg)
descrs1.append(ds)
if len(regs1) == 0:
return ()
else:
return (regs1, descrs1)
print "Opening files"
db0 = openFile(data, "r")
db1 = openFile(new, "a")
i = 0
Ws0 = db0.root.Websites
Img0 = db0.root.Images
Reg0 = db0.root.Regions
Des0 = db0.root.Descriptors
Ws1 = db1.root.Websites
Img1 = db1.root.Images
Reg1 = db1.root.Regions
Des1 = db1.root.Descriptors
# websites
print "Copying websites"
batch = 10000
N = Ws0.nrows
for b in range(N/batch + 1):
nmin = b*batch
nmax = min((b+1)*batch, N)
rows = []
# just copy rows as they are the same
Ws1.append(Ws0.read(nmin, nmax))
print "ws: %d/%d" % (nmax, N)
Ws1.attrs.last_index = Ws0.attrs.last_index
Ws1.flush()
# image-based copy process
t = time.time()
reg_first = 0
last_index = 0
reg_last_index = 0
nr_in_class = np.zeros((Img0.attrs.nr_in_class.shape[0],))
flush = 0
flushbatch = 1000
N = Img0.nrows
for j in xrange(N):
imgrow = Img0.read(j,j+1)[0]
i0 = imgrow[3]
i1 = i0 + imgrow[4]
regs = Reg0.read(i0, i1)
descrs = Des0.read(i0, i1)
imgrow[0] = last_index
data = modify_data(imgrow, regs, descrs, reg_last_index)
# skipping an image if needed
if data == ():
continue
regs, descrs = data
reg_count = len(regs)
# update image row
imgrow[0] = last_index
imgrow[3] = reg_first
imgrow[4] = reg_count
# writing data - an array of tuples
Img1.append([tuple(imgrow)])
Reg1.append([tuple(r) for r in regs])
Des1.append([tuple(d) for d in descrs])
# update global attributes
nr_in_class[imgrow[1]] += 1
last_index += 1
reg_first += reg_count # updating reg_first for next image
reg_last_index += reg_count
flush += 1
# flushing
if flush >= flushbatch:
dt = time.time() - t
etr = int((float(dt)/(j+1)) * (N-j-1))
print "Images %d/%d, time remaining %d:%02d:%02d" % \
(j+1, N, etr/3600, (etr % 3600)/60, etr % 60)
flush = 0
Img1.attrs.last_index = last_index
Img1.attrs.nr_in_class = nr_in_class
Img1.flush()
Reg1.attrs.last_index = reg_last_index
Reg1.flush()
Des1.attrs.last_index = reg_last_index
Des1.flush()
# final flush
Img1.attrs.last_index = last_index
Img1.attrs.nr_in_class = nr_in_class
Img1.flush()
Reg1.attrs.last_index = reg_last_index
Reg1.flush()
Des1.attrs.last_index = reg_last_index
Des1.flush()
db0.close()
db1.close()
print 'Done copying!'
def copy_hdf5(data, new, batch=100000):
"""Copying all data to modify some columns.
"""
print "Opening files"
db0 = openFile(data, "r")
db1 = openFile(new, "a")
i = 0
Ws0 = db0.root.Websites
Img0 = db0.root.Images
Reg0 = db0.root.Regions
Des0 = db0.root.Descriptors
Ws1 = db1.root.Websites
Img1 = db1.root.Images
Reg1 = db1.root.Regions
Des1 = db1.root.Descriptors
# websites
print "Copying websites"
N = Ws0.nrows
for b in range(N/batch + 1):
nmin = b*batch
nmax = min((b+1)*batch, N)
rows = []
# just copy rows as they are the same
Ws1.append(Ws0.read(nmin, nmax))
print "ws: %d/%d" % (nmax, N)
Ws1.attrs.last_index = Ws0.attrs.last_index
Ws1.flush()
# images
print "Copying images"
N = Img0.nrows
img_repr = np.ones((24,), dtype=np.float64) * -1
for b in range(N/batch + 1):
nmin = b*batch
nmax = min((b+1)*batch, N)
rows = []
for row in Img0.read(nmin, nmax):
rows.append(tuple(row) + (img_repr,))
Img1.append(rows)
print "img: %d/%d" % (nmax, N)
Img1.attrs.last_index = Img0.attrs.last_index
Img1.attrs.nr_in_class = Img0.attrs.nr_in_class
Img1.flush()
# regions
print "Copying regions"
N = Reg0.nrows
ngb = np.ones((10,2), dtype=np.float64) * -1
for b in range(N/batch + 1):
nmin = b*batch
nmax = min((b+1)*batch, N)
rows = []
for tupl in Reg0.read(nmin, nmax):
row = list(tupl)
# format rows here
rows.append(tuple(row[:6] + [ngb] + row[6:]))
Reg1.append(rows)
print "reg: %d/%d" % (nmax, N)
Reg1.attrs.last_index = Reg0.attrs.last_index
Reg1.flush()
# descriptors
print "Copying descriptors"
N = Des0.nrows
for b in range(N/batch + 1):
nmin = b*batch
nmax = min((b+1)*batch, N)
Des1.append(Des0.read(nmin, nmax))
print "des: %d/%d" % (nmax, N)
Des1.attrs.last_index = Des0.attrs.last_index
Des1.flush()
db0.close()
db1.close()
print 'Done copying!'
if __name__ == "__main__":
copy_hdf5_newindex("/data/spiiras/spiiras.h5",
"/users/akusoka1/local/spiiras_border.h5")
|
gpl-2.0
| -4,039,075,724,023,822,300
| 22.242958
| 67
| 0.518709
| false
| 3.109279
| false
| false
| false
|
qiime2-plugins/feature-table
|
q2_feature_table/_summarize/_visualizer.py
|
1
|
11251
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2020, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import os
import pkg_resources
import shutil
import biom
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
from q2_types.feature_data import DNAIterator
import q2templates
import skbio
import qiime2
import json
from ._vega_spec import vega_spec
_blast_url_template = ("http://www.ncbi.nlm.nih.gov/BLAST/Blast.cgi?"
"ALIGNMENT_VIEW=Pairwise&PROGRAM=blastn&DATABASE"
"=nt&CMD=Put&QUERY=%s")
TEMPLATES = pkg_resources.resource_filename('q2_feature_table', '_summarize')
def tabulate_seqs(output_dir: str, data: DNAIterator) -> None:
sequences = []
seq_lengths = []
with open(os.path.join(output_dir, 'sequences.fasta'), 'w') as fh:
for sequence in data:
skbio.io.write(sequence, format='fasta', into=fh)
str_seq = str(sequence)
seq_len = len(str_seq)
sequences.append({'id': sequence.metadata['id'],
'len': seq_len,
'url': _blast_url_template % str_seq,
'seq': str_seq})
seq_lengths.append(seq_len)
seq_len_stats = _compute_descriptive_stats(seq_lengths)
_write_tsvs_of_descriptive_stats(seq_len_stats, output_dir)
index = os.path.join(TEMPLATES, 'tabulate_seqs_assets', 'index.html')
q2templates.render(index, output_dir, context={'data': sequences,
'stats': seq_len_stats})
js = os.path.join(
TEMPLATES, 'tabulate_seqs_assets', 'js', 'tsorter.min.js')
os.mkdir(os.path.join(output_dir, 'js'))
shutil.copy(js, os.path.join(output_dir, 'js', 'tsorter.min.js'))
def summarize(output_dir: str, table: biom.Table,
sample_metadata: qiime2.Metadata = None) -> None:
number_of_features, number_of_samples = table.shape
sample_summary, sample_frequencies = _frequency_summary(
table, axis='sample')
if number_of_samples > 1:
# Calculate the bin count, with a minimum of 5 bins
IQR = sample_summary['3rd quartile'] - sample_summary['1st quartile']
if IQR == 0.0:
bins = 5
else:
# Freedman–Diaconis rule
bin_width = (2 * IQR) / (number_of_samples ** (1/3))
bins = max((sample_summary['Maximum frequency'] -
sample_summary['Minimum frequency']) / bin_width, 5)
sample_frequencies_ax = sns.distplot(sample_frequencies, kde=False,
rug=True, bins=int(round(bins)))
sample_frequencies_ax.get_xaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
sample_frequencies_ax.set_xlabel('Frequency per sample')
sample_frequencies_ax.set_ylabel('Number of samples')
sample_frequencies_ax.get_figure().savefig(
os.path.join(output_dir, 'sample-frequencies.pdf'))
sample_frequencies_ax.get_figure().savefig(
os.path.join(output_dir, 'sample-frequencies.png'))
plt.gcf().clear()
feature_summary, feature_frequencies = _frequency_summary(
table, axis='observation')
if number_of_features > 1:
feature_frequencies_ax = sns.distplot(feature_frequencies, kde=False,
rug=False)
feature_frequencies_ax.set_xlabel('Frequency per feature')
feature_frequencies_ax.set_ylabel('Number of features')
feature_frequencies_ax.set_xscale('log')
feature_frequencies_ax.set_yscale('log')
feature_frequencies_ax.get_figure().savefig(
os.path.join(output_dir, 'feature-frequencies.pdf'))
feature_frequencies_ax.get_figure().savefig(
os.path.join(output_dir, 'feature-frequencies.png'))
sample_summary_table = q2templates.df_to_html(
sample_summary.apply('{:,}'.format).to_frame('Frequency'))
feature_summary_table = q2templates.df_to_html(
feature_summary.apply('{:,}'.format).to_frame('Frequency'))
index = os.path.join(TEMPLATES, 'summarize_assets', 'index.html')
context = {
'number_of_samples': number_of_samples,
'number_of_features': number_of_features,
'total_frequencies': int(np.sum(sample_frequencies)),
'sample_summary_table': sample_summary_table,
'feature_summary_table': feature_summary_table,
}
feature_qualitative_data = _compute_qualitative_summary(table)
sample_frequencies.sort_values(inplace=True, ascending=False)
feature_frequencies.sort_values(inplace=True, ascending=False)
sample_frequencies.to_csv(
os.path.join(output_dir, 'sample-frequency-detail.csv'))
feature_frequencies.to_csv(
os.path.join(output_dir, 'feature-frequency-detail.csv'))
feature_frequencies = feature_frequencies.astype(int) \
.apply('{:,}'.format).to_frame('Frequency')
feature_frequencies['# of Samples Observed In'] = \
pd.Series(feature_qualitative_data).astype(int).apply('{:,}'.format)
feature_frequencies_table = q2templates.df_to_html(feature_frequencies)
sample_frequency_template = os.path.join(
TEMPLATES, 'summarize_assets', 'sample-frequency-detail.html')
feature_frequency_template = os.path.join(
TEMPLATES, 'summarize_assets', 'feature-frequency-detail.html')
context.update({'max_count': sample_frequencies.max(),
'feature_frequencies_table': feature_frequencies_table,
'feature_qualitative_data': feature_qualitative_data,
'tabs': [{'url': 'index.html',
'title': 'Overview'},
{'url': 'sample-frequency-detail.html',
'title': 'Interactive Sample Detail'},
{'url': 'feature-frequency-detail.html',
'title': 'Feature Detail'}]})
# Create a JSON object containing the Sample Frequencies to build the
# table in sample-frequency-detail.html
sample_frequencies_json = sample_frequencies.to_json()
templates = [index, sample_frequency_template, feature_frequency_template]
context.update({'frequencies_list':
json.dumps(sorted(sample_frequencies.values.tolist()))})
if sample_metadata is not None:
context.update({'vega_spec':
json.dumps(vega_spec(sample_metadata,
sample_frequencies
))
})
context.update({'sample_frequencies_json': sample_frequencies_json})
q2templates.util.copy_assets(os.path.join(TEMPLATES,
'summarize_assets',
'vega'),
output_dir)
q2templates.render(templates, output_dir, context=context)
def _compute_descriptive_stats(lst: list):
"""Basic descriptive statistics and a (parametric) seven-number summary.
Calculates descriptive statistics for a list of numerical values, including
count, min, max, mean, and a parametric seven-number-summary. This summary
includes values for the lower quartile, median, upper quartile, and
percentiles 2, 9, 91, and 98. If the data is normally distributed, these
seven percentiles will be equally spaced when plotted.
Parameters
----------
lst : list of int or float values
Returns
-------
dict
a dictionary containing the following descriptive statistics:
count
int: the number of items in `lst`
min
int or float: the smallest number in `lst`
max
int or float: the largest number in `lst`
mean
float: the mean of `lst`
range
int or float: the range of values in `lst`
std
float: the standard deviation of values in `lst`
seven_num_summ_percentiles
list of floats: the parameter percentiles used to calculate this
seven-number summary: [2, 9, 25, 50, 75, 91, 98]
seven_num_summ_values
list of floats: the calculated percentile values of the summary
"""
# NOTE: With .describe(), NaN values in passed lst are excluded by default
if len(lst) == 0:
raise ValueError('No values provided.')
seq_lengths = pd.Series(lst)
seven_num_summ_percentiles = [0.02, 0.09, 0.25, 0.5, 0.75, 0.91, 0.98]
descriptive_stats = seq_lengths.describe(
percentiles=seven_num_summ_percentiles)
return {'count': int(descriptive_stats.loc['count']),
'min': descriptive_stats.loc['min'],
'max': descriptive_stats.loc['max'],
'range': descriptive_stats.loc['max'] -
descriptive_stats.loc['min'],
'mean': descriptive_stats.loc['mean'],
'std': descriptive_stats.loc['std'],
'seven_num_summ_percentiles': seven_num_summ_percentiles,
'seven_num_summ_values': descriptive_stats.loc['2%':'98%'].tolist()
}
def _write_tsvs_of_descriptive_stats(dictionary: dict, output_dir: str):
descriptive_stats = ['count', 'min', 'max', 'mean', 'range', 'std']
stat_list = []
for key in descriptive_stats:
stat_list.append(dictionary[key])
descriptive_stats = pd.DataFrame(
{'Statistic': descriptive_stats, 'Value': stat_list})
descriptive_stats.to_csv(
os.path.join(output_dir, 'descriptive_stats.tsv'),
sep='\t', index=False, float_format='%g')
seven_number_summary = pd.DataFrame(
{'Quantile': dictionary['seven_num_summ_percentiles'],
'Value': dictionary['seven_num_summ_values']})
seven_number_summary.to_csv(
os.path.join(output_dir, 'seven_number_summary.tsv'),
sep='\t', index=False, float_format='%g')
def _compute_qualitative_summary(table):
table = table.transpose()
sample_count = {}
for count_vector, feature_id, _ in table.iter():
sample_count[feature_id] = (count_vector != 0).sum()
return sample_count
def _frequencies(table, axis):
return pd.Series(data=table.sum(axis=axis), index=table.ids(axis=axis))
def _frequency_summary(table, axis='sample'):
frequencies = _frequencies(table, axis=axis)
summary = pd.Series([frequencies.min(), frequencies.quantile(0.25),
frequencies.median(), frequencies.quantile(0.75),
frequencies.max(), frequencies.mean()],
index=['Minimum frequency', '1st quartile',
'Median frequency', '3rd quartile',
'Maximum frequency', 'Mean frequency'])
return summary, frequencies
|
bsd-3-clause
| 6,885,744,701,589,621,000
| 41.449057
| 79
| 0.598631
| false
| 3.881643
| false
| false
| false
|
AlexanderHaase/eppraise
|
eppraise.py
|
1
|
16057
|
#!/usr/bin/python3
from ebaysdk.finding import Connection as Finding
from ebaysdk.exception import ConnectionError
import itertools
import functools
import openpyxl
import sys
import re
import yaml
import argparse
import logging
import json
import datetime
import collections
from flask import Flask, redirect, request, render_template, make_response
from sqlalchemy.ext.declarative import declarative_base, declared_attr, as_declarative
from sqlalchemy import Table, Column, Integer, String, DateTime, ForeignKey, Boolean, create_engine
from sqlalchemy.orm import relationship, sessionmaker
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm.exc import NoResultFound
logger = logging.getLogger( __name__ )
def consume(iterator, n = None):
"Advance the iterator n-steps ahead. If n is none, consume entirely."
# Use functions that consume iterators at C speed.
if n is None:
# feed the entire iterator into a zero-length deque
collections.deque(iterator, maxlen=0)
else:
# advance to the empty slice starting at position n
next(islice(iterator, n, n), None)
def apply( function, iterator ):
consume( map( function, iterator ) )
def unique( iterator ):
seenValues = set()
while True:
value = next( iterator )
if value not in seenValues:
seenValues.add( value )
yield value
def scrub( keywords ):
keys = re.sub( '(\s)', ' ', keywords ).split()
filtered = map( lambda key: re.sub( '(\W)', '', key ), keys )
return ' '.join( filter( None, filtered ) )
#
# Setup SQL Schema
#
@as_declarative()
class SQLBase( object ):
'''Common properties for all sql objects'''
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
id = Column( Integer, primary_key = True, autoincrement = True )
def dict( self, append = tuple(), exclude = ('text',) ):
'''Serialize keys via reflection'''
keys = itertools.chain( self.__table__.columns.keys(), append )
keys = filter( lambda key: key not in exclude, keys )
attrs = map( functools.partial( getattr, self ), dir( self ) )
funcs = filter( lambda attr: hasattr( attr, '__serialize__' ) and attr.__serialize__, attrs )
result = collections.OrderedDict( map( lambda key: (key, getattr( self, key )), keys ) )
result.update( collections.OrderedDict( map( lambda func: (func.__name__, func()), funcs ) ) )
return result
def serialize( func ):
func.__serialize__ = True
return func
class JSONProps( object ):
'''Mix-in for text<->json'''
text = Column( String, nullable = False )
@property
def json( self ):
if not hasattr( self, '__jsonCache__'):
self.__jsonCache__ = json.loads( self.text )
return self.__jsonCache__
@json.setter
def json( self, data ):
self.text = json.dumps( data )
self.__jsonCache__ = data # TODO: Deep copy/reload data....
associate_watch_item = Table( 'associate_watch_item', SQLBase.metadata,
Column( "watch_id", Integer, ForeignKey( "watch.id" ), primary_key = True ),
Column( "item_id", Integer, ForeignKey( "item.id" ), primary_key = True )
)
class Watch( SQLBase ):
'''Saved watch for items'''
keywords = Column( String, nullable = False, unique = True )
enabled = Column( Boolean, nullable = False, default = True )
items = relationship("Item", back_populates="watches", secondary = associate_watch_item )
queries = relationship("Query", back_populates="watch")
@SQLBase.serialize
def estimate( self ):
'''Mean sold price'''
(total, qty) = functools.reduce( (lambda accum, item: ( accum[ 0 ] + item.price(), accum[ 1 ] + 1.0 ) ), self.items, ( 0.0, 0.0 ) )
return total / qty if qty > 0 else None
@classmethod
def queryAll( cls, context, connection ):
activeWatches = context.session().query( cls ).filter( cls.enabled == True ).all()
return map( functools.partial( Query.fromWatch, context, connection ), activeWatches )
@classmethod
def fromFile( cls, context, filePath, inputRange ):
wb = openpyxl.load_workbook( filePath )
sheet = wb.active
return map( lambda cell: context.upsert( Watch, keywords = cell.value ), itertools.chain.from_iterable( sheet[ inputRange ] ) )
class Query( SQLBase, JSONProps ):
'''Record of executing a query. Future-proofing our data!'''
watch_id = Column( Integer, ForeignKey( Watch.id ), nullable = False )
watch = relationship( Watch, back_populates = "queries" )
retrieved = Column( DateTime, default = datetime.datetime.utcnow, nullable = False )
keywords = Column( String, nullable = False )
@classmethod
def fromWatch( cls, context, connection, watch ):
'''Create a query from a watch'''
keywords = scrub( watch.keywords )
result = connection.query( keywords )
return context.upsert( cls, keywords = keywords, watch = watch, json = result.dict() )
class Item( SQLBase, JSONProps ):
'''Record from ebay. We're watching completed items, so one per item is enough.'''
ebayID = Column( String, unique = True, nullable = False )
watches = relationship( Watch, back_populates = "items", secondary = associate_watch_item )
@SQLBase.serialize
def date( self ):
return self.json[ 'listingInfo' ][ 'endTime' ]
@SQLBase.serialize
def url( self ):
return self.json[ 'viewItemURL' ]
@SQLBase.serialize
def sold( self ):
return self.json[ 'sellingStatus' ][ 'sellingState' ] == 'EndedWithSales'
@SQLBase.serialize
def price( self ):
'''Fetch an iterator of sold prices'''
return float( self.json[ 'sellingStatus' ][ 'currentPrice' ][ 'value' ] )
@classmethod
def fromQuery( cls, context, query ):
'''Creates NEW objects from query'''
items = query.json[ 'searchResult' ].get( 'item', tuple() )
return map( lambda item: context.upsert( cls, watches = [ query.watch ], json = item, ebayID = item['itemId'] ), items )
class Database( object ):
def __init__( self, dbURL = 'sqlite:///:memory:', base = SQLBase ):
self.engine = create_engine( dbURL )
self.sessionMaker = sessionmaker( self.engine )
base.metadata.create_all( self.engine )
def refresh( self ):
self.engine = create_engine( self.dbURL )
self.sessionMaker = sessionmaker( self.engine )
base.metadata.create_all( self.engine )
class SessionContext( object ):
def __init__( self, db ):
self.db = db
def session( self ):
return self.activeSession
def __call__( self, func, key = 'context' ):
'''decorator'''
@functools.wraps( func )
def wrapper( *args, **kwargs ):
with self:
kwargs[ key ] = self
return func( *args, **kwargs )
return wrapper
def __enter__( self ):
self.activeSession = self.db.sessionMaker()
return self
def __exit__( self, type, value, traceback ):
if value:
self.activeSession.rollback()
else:
self.activeSession.commit()
self.activeSession.close()
del self.activeSession
def refresh( self ):
self.activeSession.rollback()
self.activeSession.close()
self.activeSession = self.db.sessionMaker()
@staticmethod
def identifyingColumns( cls ):
return filter( lambda column: column.unique or column.primary_key, cls.__table__.columns )
@classmethod
def queryArgs( this, cls, kwargs ):
present = filter( lambda column: column.name in kwargs, this.identifyingColumns( cls ) )
return map( lambda column: getattr( cls, column.name ) == kwargs[ column.name ], present )
@staticmethod
def updateKey( obj, key, value ):
if key in obj.__class__.__table__.columns:
setattr( obj, key, value )
elif hasattr( obj, key ) and isinstance( getattr( obj, key ), list ):
getattr( obj, key ).extend( value )
else:
setattr( obj, key, value )
def upsert( self, cls, **kwargs ):
try:
queryArgs = tuple(self.queryArgs( cls, kwargs) )
if len( queryArgs ) == 0:
raise KeyError( queryArgs )
obj = self.session().query( cls ).filter( *queryArgs ).one()
logger.info( "Already exists: {} {}".format( obj.__class__.__name__, obj.dict() ) )
apply( lambda item: self.updateKey( obj, item[ 0 ], item[ 1 ] ), kwargs.items() )
except (NoResultFound, KeyError):
obj = cls( **kwargs )
self.session().add( obj )
logger.info( "Added new item: {} {}".format( obj.__class__.__name__, obj.dict() ) )
return obj
def commitIfNew( self, obj ):
try:
self.activeSession.add( obj )
self.activeSession.commit()
logger.info( "Added new item: {} {}".format( obj.__class__.__name__, obj.dict() ) )
return True
except IntegrityError as e:
self.activeSession.rollback()
logger.info( "Already exists: {} {}\n{}".format( obj.__class__.__name__, obj.dict(), e ) )
return False
def context( self ):
return self.SessionContext( self )
#
# Ebay connections
#
class Estimate( object ):
'''
Estimate for a single search result, focusing on sold items. Mostly
focuses on dynamically extracting features from data, rather than
statically compution/storing them.
'''
def __init__( self, keyword, result ):
self.raw = result
self.keyword = keyword
self.items = result.dict()[ 'searchResult' ].get( 'item', tuple() )
def sold( self ):
'''Fetch an iterator of sold items'''
return filter( (lambda item: item[ 'sellingStatus' ][ 'sellingState' ] == 'EndedWithSales' ), self.items )
def prices( self ):
'''Fetch an iterator of sold prices'''
return map( (lambda item: item[ 'sellingStatus' ][ 'currentPrice' ][ 'value' ] ), self.sold() )
def mean( self ):
'''Mean sold price'''
(total, qty) = functools.reduce( (lambda accum, price: ( accum[ 0 ] + float(price), accum[ 1 ] + 1.0 ) ), self.prices(), ( 0.0, 0.0 ) )
return total / qty if qty > 0 else None
class Connection( object ):
'''Syntatic sugar for interacting with the ebay sdk'''
def __init__( self, **kwargs ):
self.api = Finding( **kwargs )
def query( self, item ):
return self.api.execute( 'findCompletedItems', {'keywords': item, } )
def estimate( self, item ):
'''Create an estimate for the given item'''
return Estimate( item, self.query( item ) )
def estimateFile( self, file, inputRange, outputRange ):
'''Proof of concept method for dumping this to/from a file'''
wb = openpyxl.load_workbook( file )
sheet = wb.active
ioRange = zip( sheet[ inputRange ], sheet[ outputRange ] )
def handleElement( ioElement ):
keys = re.sub( '(\s)', ' ', ioElement[ 0 ][ 0 ].value ).split()
filtered = map( lambda key: re.sub( '(\W)', '', key ), keys )
key = ' '.join( filter( None, filtered ) )
sys.stderr.write( key )
est = self.estimate( key )
mean = est.mean()
sys.stderr.write( ': {}\n'.format( mean ) )
ioElement[ 1 ][ 0 ].value = mean
functools.reduce( lambda x,y: None, map( handleElement, ioRange ) )
wb.save( file )
if __name__ == '__main__':
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# create console handler and set level to info
handler = logging.StreamHandler()
handler.setLevel( logging.DEBUG )
handler.setFormatter( logging.Formatter( "%(asctime)s - %(levelname)s\t<%(name)s:%(lineno)d>: %(message)s" ) )
logger.addHandler( handler )
# Setup parser
parser = argparse.ArgumentParser()
parser.add_argument('-d','--database-url', default = 'sqlite:///eppraise.db', help = "Database connection url" )
parser.add_argument( '-v', '--verbose', default = "WARNING", help = "Set logging level in DEBUG, INFO, WARNING, ERROR, CRITICAL" )
subparsers = parser.add_subparsers( help = "command help", dest = "command" )
xlsxParser = subparsers.add_parser( "xlsx", help = "Interact with spreadsheet" )
xlsxParser.add_argument("spreadsheet", help = "Input spreadsheet" )
xlsxParser.add_argument('-i', '--input-range', required = True, help = "Range of items in spreadsheet to estimate, one per cell" )
xlsxParser.add_argument('-o', '--output-range', required = False, help = "Range for output estimates in sheet, same size as input range" )
watchParser = subparsers.add_parser( "watch", help = "Create or modify a watch" )
watchParser.add_argument( "watch", help = "Keywords to watch" )
watchParser.add_argument( "--disable", action='store_true', help = "Disable specified watch" )
watchParser.add_argument( "--estimate", action='store_true', help = "Provide an estimate based on database data" )
queryParser = subparsers.add_parser( "update", help = "Update all active watches" )
queryParser.add_argument('-c','--config', default = "./config.yaml", help = "Configuration for ebay API" )
itemParser = subparsers.add_parser( "item" )
webParser = subparsers.add_parser( "web" )
webParser.add_argument( '-a', '--host', default = "0.0.0.0", help = "Host IP address for binding server" )
webParser.add_argument( '-p', '--port', default = "5000", help = "Host port for binding server", type = int )
#todo webParser..
# parse args
args = parser.parse_args();
# setup logger
logger.setLevel( getattr( logging, args.verbose ) )
logger.debug( args )
# connect to database
logger.debug( "Connecting to database: '{}'...".format( args.database_url ) )
db = Database( args.database_url )
if args.command == 'xlsx':
with db.context() as context:
def updateWatch( inputCell, outputCell ):
watch = context.upsert( Watch, keywords = inputCell.value )
if outputCell:
outputCell.value = watch.estimate()
workbook = openpyxl.load_workbook( args.spreadsheet )
sheet = workbook.active
inputCells = itertools.chain.from_iterable( sheet[ args.input_range ] )
if args.output_range:
outputCells = itertools.chain.from_iterable( sheet[ args.output_range ] )
else:
outputCells = itertools.repeat( None )
consume( itertools.starmap( updateWatch, zip( inputCells, outputCells ) ) )
workbook.save( args.spreadsheet )
#map( lambda cell: context.upsert( Watch, keywords = cell.value ), itertools.chain.from_iterable( sheet[ inputRange ] ) )
#watches = Watch.fromFile( context, args.spreadsheet, args.input_range )
# TODO output range
elif args.command == 'watch':
with db.context() as context:
try:
watch = context.session().query( Watch ).filter( Watch.keywords == args.watch ).one()
except NoResultFound:
watch = Watch( keywords = args.watch )
context.session().add( watch )
watch.enabled = not args.disable
print( watch.dict() )
elif args.command == 'update':
# read config
with open( args.config, 'r' ) as handle:
config = yaml.safe_load( handle )
# connect
con = Connection( config_file = None, appid = config[ 'ebay' ][ 'id' ] )
with db.context() as context:
for query in Watch.queryAll( context, con ):
# Commit and filter new items
apply( context.session().expunge, itertools.filterfalse( Item.sold, Item.fromQuery( context, query ) ) )
elif args.command == 'item':
with db.context() as context:
apply( sys.stdout.write, map( "{}\n".format, map( SQLBase.dict, context.session().query( Item ).all() ) ) )
elif args.command == 'web':
app = Flask( __name__ )
def no_cache( func ):
@functools.wraps( func )
def wrapper( *args, **kwargs ):
response = func( *args, **kwargs )
if isinstance( response, str ):
response = make_response( response, 200 )
response.headers[ 'Cache-Control' ] = 'no-cache, no-store, must-revalidate'
response.headers[ 'Pragma' ] = 'no-cache'
return response
return wrapper
def serialize( iterator, status = 200 ):
response = make_response( json.dumps( list( map( SQLBase.dict, iterator ) ) ), status )
response.headers[ 'Content-Type' ] = 'appliction/json'
return response
@app.route( '/watch' )
@no_cache
@db.context()
def watch( context ):
return serialize( context.session().query( Watch ).all() )
@app.route( '/watch/<int:watchId>/items' )
@no_cache
@db.context()
def watchItems( watchId, context ):
return serialize( context.session().query( Watch ).filter( Watch.id == watchId ).one().items )
@app.route( '/' )
def index():
return render_template( 'index.html' )
@app.route( '/jsonp/<name>' )
@no_cache
def jsonp( name ):
return render_template( name )
app.run( args.host, port = args.port, debug = True )
|
gpl-3.0
| 1,612,893,145,415,447,600
| 31.438384
| 139
| 0.667746
| false
| 3.362723
| true
| false
| false
|
ProjetSigma/backend
|
sigma_chat/models/message.py
|
1
|
1271
|
# -*- coding: utf-8 -*-
from django.db import models
from sigma_chat.models.chat_member import ChatMember
from sigma_chat.models.chat import Chat
def chat_directory_path(instance, filename):
# file will be uploaded to MEDIA_ROOT/user_<id>/<filename>
return 'uploads/chats/{0}/{1}'.format(instance.chat_id.id, filename)
class Message(models.Model):
text = models.TextField(blank=True)
chatmember_id = models.ForeignKey(ChatMember, related_name='chatmember_message')
chat_id = models.ForeignKey(Chat, related_name='message')
date = models.DateTimeField(auto_now=True)
attachment = models.FileField(upload_to=chat_directory_path, blank=True)
################################################################
# PERMISSIONS #
################################################################
@staticmethod
def has_read_permission(request):
return True
def has_object_read_permission(self, request):
return request.user.is_member(self.chat)
@staticmethod
def has_write_permission(request):
return True
def has_object_write_permission(self, request):
return request.user == self.chatmember.user and self.chatmember.is_member
|
agpl-3.0
| 1,941,828,085,756,653,000
| 34.305556
| 84
| 0.608183
| false
| 4.279461
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.