repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
zennobjects/kivy
|
refs/heads/master
|
kivy/input/providers/mtdev.py
|
6
|
'''
Native support for Multitouch devices on Linux, using libmtdev.
===============================================================
The Mtdev project is a part of the Ubuntu Maverick multitouch architecture.
You can read more on http://wiki.ubuntu.com/Multitouch
To configure MTDev, it's preferable to use probesysfs providers.
Check :py:class:`~kivy.input.providers.probesysfs` for more information.
Otherwise, add this to your configuration::
[input]
# devicename = hidinput,/dev/input/eventXX
acert230h = mtdev,/dev/input/event2
.. note::
You must have read access to the input event.
You can use a custom range for the X, Y and pressure values.
On some drivers, the range reported is invalid.
To fix that, you can add these options to the argument line:
* invert_x : 1 to invert X axis
* invert_y : 1 to invert Y axis
* min_position_x : X minimum
* max_position_x : X maximum
* min_position_y : Y minimum
* max_position_y : Y maximum
* min_pressure : pressure minimum
* max_pressure : pressure maximum
* min_touch_major : width shape minimum
* max_touch_major : width shape maximum
* min_touch_minor : width shape minimum
* max_touch_minor : height shape maximum
'''
__all__ = ('MTDMotionEventProvider', 'MTDMotionEvent')
import os
from kivy.input.motionevent import MotionEvent
from kivy.input.shape import ShapeRect
class MTDMotionEvent(MotionEvent):
def depack(self, args):
self.is_touch = True
self.sx = args['x']
self.sy = args['y']
self.profile = ['pos']
if 'size_w' in args and 'size_h' in args:
self.shape = ShapeRect()
self.shape.width = args['size_w']
self.shape.height = args['size_h']
self.profile.append('shape')
if 'pressure' in args:
self.pressure = args['pressure']
self.profile.append('pressure')
super(MTDMotionEvent, self).depack(args)
def __str__(self):
i, sx, sy, d = (self.id, self.sx, self.sy, self.device)
return '<MTDMotionEvent id=%d pos=(%f, %f) device=%s>' % (i, sx, sy, d)
if 'KIVY_DOC' in os.environ:
# documentation hack
MTDMotionEventProvider = None
else:
import threading
import collections
from kivy.lib.mtdev import Device, \
MTDEV_TYPE_EV_ABS, MTDEV_CODE_SLOT, MTDEV_CODE_POSITION_X, \
MTDEV_CODE_POSITION_Y, MTDEV_CODE_PRESSURE, \
MTDEV_CODE_TOUCH_MAJOR, MTDEV_CODE_TOUCH_MINOR, \
MTDEV_CODE_TRACKING_ID, MTDEV_ABS_POSITION_X, \
MTDEV_ABS_POSITION_Y, MTDEV_ABS_TOUCH_MINOR, \
MTDEV_ABS_TOUCH_MAJOR
from kivy.input.provider import MotionEventProvider
from kivy.input.factory import MotionEventFactory
from kivy.logger import Logger
class MTDMotionEventProvider(MotionEventProvider):
options = ('min_position_x', 'max_position_x',
'min_position_y', 'max_position_y',
'min_pressure', 'max_pressure',
'min_touch_major', 'max_touch_major',
'min_touch_minor', 'min_touch_major',
'invert_x', 'invert_y')
def __init__(self, device, args):
super(MTDMotionEventProvider, self).__init__(device, args)
self._device = None
self.input_fn = None
self.default_ranges = dict()
# split arguments
args = args.split(',')
if not args:
Logger.error('MTD: No filename pass to MTD configuration')
Logger.error('MTD: Use /dev/input/event0 for example')
return None
# read filename
self.input_fn = args[0]
Logger.info('MTD: Read event from <%s>' % self.input_fn)
# read parameters
for arg in args[1:]:
if arg == '':
continue
arg = arg.split('=')
# ensure it's a key = value
if len(arg) != 2:
err = 'MTD: Bad parameter %s: Not in key=value format' % arg
Logger.error()
continue
# ensure the key exist
key, value = arg
if key not in MTDMotionEventProvider.options:
Logger.error('MTD: unknown %s option' % key)
continue
# ensure the value
try:
self.default_ranges[key] = int(value)
except ValueError:
err = 'MTD: invalid value %s for option %s' % (key, value)
Logger.error(err)
continue
# all good!
Logger.info('MTD: Set custom %s to %d' % (key, int(value)))
def start(self):
if self.input_fn is None:
return
self.uid = 0
self.queue = collections.deque()
self.thread = threading.Thread(
target=self._thread_run,
kwargs=dict(
queue=self.queue,
input_fn=self.input_fn,
device=self.device,
default_ranges=self.default_ranges))
self.thread.daemon = True
self.thread.start()
def _thread_run(self, **kwargs):
input_fn = kwargs.get('input_fn')
queue = kwargs.get('queue')
device = kwargs.get('device')
drs = kwargs.get('default_ranges').get
touches = {}
touches_sent = []
point = {}
l_points = {}
def process(points):
for args in points:
tid = args['id']
try:
touch = touches[tid]
except KeyError:
touch = MTDMotionEvent(device, tid, args)
touches[touch.id] = touch
touch.move(args)
action = 'update'
if tid not in touches_sent:
action = 'begin'
touches_sent.append(tid)
if 'delete' in args:
action = 'end'
del args['delete']
del touches[touch.id]
touches_sent.remove(tid)
touch.update_time_end()
queue.append((action, touch))
def normalize(value, vmin, vmax):
return (value - vmin) / float(vmax - vmin)
# open mtdev device
_fn = input_fn
_slot = 0
_device = Device(_fn)
_changes = set()
# prepare some vars to get limit of some component
ab = _device.get_abs(MTDEV_ABS_POSITION_X)
range_min_position_x = drs('min_position_x', ab.minimum)
range_max_position_x = drs('max_position_x', ab.maximum)
Logger.info('MTD: <%s> range position X is %d - %d' %
(_fn, range_min_position_x, range_max_position_x))
ab = _device.get_abs(MTDEV_ABS_POSITION_Y)
range_min_position_y = drs('min_position_y', ab.minimum)
range_max_position_y = drs('max_position_y', ab.maximum)
Logger.info('MTD: <%s> range position Y is %d - %d' %
(_fn, range_min_position_y, range_max_position_y))
ab = _device.get_abs(MTDEV_ABS_TOUCH_MAJOR)
range_min_major = drs('min_touch_major', ab.minimum)
range_max_major = drs('max_touch_major', ab.maximum)
Logger.info('MTD: <%s> range touch major is %d - %d' %
(_fn, range_min_major, range_max_major))
ab = _device.get_abs(MTDEV_ABS_TOUCH_MINOR)
range_min_minor = drs('min_touch_minor', ab.minimum)
range_max_minor = drs('max_touch_minor', ab.maximum)
Logger.info('MTD: <%s> range touch minor is %d - %d' %
(_fn, range_min_minor, range_max_minor))
range_min_pressure = drs('min_pressure', 0)
range_max_pressure = drs('max_pressure', 255)
Logger.info('MTD: <%s> range pressure is %d - %d' %
(_fn, range_min_pressure, range_max_pressure))
invert_x = int(bool(drs('invert_x', 0)))
invert_y = int(bool(drs('invert_y', 0)))
Logger.info('MTD: <%s> axes invertion: X is %d, Y is %d' %
(_fn, invert_x, invert_y))
while _device:
# idle as much as we can.
while _device.idle(1000):
continue
# got data, read all without redoing idle
while True:
data = _device.get()
if data is None:
break
# set the working slot
if data.type == MTDEV_TYPE_EV_ABS and \
data.code == MTDEV_CODE_SLOT:
_slot = data.value
continue
# fill the slot
if not _slot in l_points:
l_points[_slot] = dict()
point = l_points[_slot]
ev_value = data.value
ev_code = data.code
if ev_code == MTDEV_CODE_POSITION_X:
val = normalize(ev_value,
range_min_position_x, range_max_position_x)
if invert_x:
val = 1. - val
point['x'] = val
elif ev_code == MTDEV_CODE_POSITION_Y:
val = 1. - normalize(ev_value,
range_min_position_y, range_max_position_y)
if invert_y:
val = 1. - val
point['y'] = val
elif ev_code == MTDEV_CODE_PRESSURE:
point['pressure'] = normalize(ev_value,
range_min_pressure, range_max_pressure)
elif ev_code == MTDEV_CODE_TOUCH_MAJOR:
point['size_w'] = normalize(ev_value,
range_min_major, range_max_major)
elif ev_code == MTDEV_CODE_TOUCH_MINOR:
point['size_h'] = normalize(ev_value,
range_min_minor, range_max_minor)
elif ev_code == MTDEV_CODE_TRACKING_ID:
if ev_value == -1:
point['delete'] = True
else:
point['id'] = ev_value
else:
# unrecognized command, ignore.
continue
_changes.add(_slot)
# push all changes
if _changes:
process([l_points[x] for x in _changes])
_changes.clear()
def update(self, dispatch_fn):
# dispatch all event from threads
try:
while True:
event_type, touch = self.queue.popleft()
dispatch_fn(event_type, touch)
except:
pass
MotionEventFactory.register('mtdev', MTDMotionEventProvider)
|
napalm-automation/napalm-yang
|
refs/heads/develop
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_sid_label_range/tlvs/tlv/unknown_tlv/state/__init__.py
|
1
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/router-information/tlvs/tlv/segment-routing-sid-label-range/tlvs/tlv/unknown-tlv/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Contents of an unknown TLV within the LSA
"""
__slots__ = ("_path_helper", "_extmethods", "__type", "__length", "__value")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__type = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint16",
is_config=False,
)
self.__length = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="length",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint16",
is_config=False,
)
self.__value = YANGDynClass(
base=bitarray,
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="binary",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"opaque-lsa",
"router-information",
"tlvs",
"tlv",
"segment-routing-sid-label-range",
"tlvs",
"tlv",
"unknown-tlv",
"state",
]
def _get_type(self):
"""
Getter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_sid_label_range/tlvs/tlv/unknown_tlv/state/type (uint16)
YANG Description: The type value of the unknown TLV
"""
return self.__type
def _set_type(self, v, load=False):
"""
Setter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_sid_label_range/tlvs/tlv/unknown_tlv/state/type (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_type() directly.
YANG Description: The type value of the unknown TLV
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint16",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """type must be of a type compatible with uint16""",
"defined-type": "uint16",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint16', is_config=False)""",
}
)
self.__type = t
if hasattr(self, "_set"):
self._set()
def _unset_type(self):
self.__type = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint16",
is_config=False,
)
def _get_length(self):
"""
Getter method for length, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_sid_label_range/tlvs/tlv/unknown_tlv/state/length (uint16)
YANG Description: The length value of the unknown TLV
"""
return self.__length
def _set_length(self, v, load=False):
"""
Setter method for length, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_sid_label_range/tlvs/tlv/unknown_tlv/state/length (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_length is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_length() directly.
YANG Description: The length value of the unknown TLV
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="length",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint16",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """length must be of a type compatible with uint16""",
"defined-type": "uint16",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="length", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint16', is_config=False)""",
}
)
self.__length = t
if hasattr(self, "_set"):
self._set()
def _unset_length(self):
self.__length = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="length",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint16",
is_config=False,
)
def _get_value(self):
"""
Getter method for value, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_sid_label_range/tlvs/tlv/unknown_tlv/state/value (binary)
YANG Description: The value portion of the unknwon TLV
"""
return self.__value
def _set_value(self, v, load=False):
"""
Setter method for value, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_sid_label_range/tlvs/tlv/unknown_tlv/state/value (binary)
If this variable is read-only (config: false) in the
source YANG file, then _set_value is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_value() directly.
YANG Description: The value portion of the unknwon TLV
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=bitarray,
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="binary",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """value must be of a type compatible with binary""",
"defined-type": "binary",
"generated-type": """YANGDynClass(base=bitarray, is_leaf=True, yang_name="value", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='binary', is_config=False)""",
}
)
self.__value = t
if hasattr(self, "_set"):
self._set()
def _unset_value(self):
self.__value = YANGDynClass(
base=bitarray,
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="binary",
is_config=False,
)
type = __builtin__.property(_get_type)
length = __builtin__.property(_get_length)
value = __builtin__.property(_get_value)
_pyangbind_elements = OrderedDict(
[("type", type), ("length", length), ("value", value)]
)
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/router-information/tlvs/tlv/segment-routing-sid-label-range/tlvs/tlv/unknown-tlv/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Contents of an unknown TLV within the LSA
"""
__slots__ = ("_path_helper", "_extmethods", "__type", "__length", "__value")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__type = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint16",
is_config=False,
)
self.__length = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="length",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint16",
is_config=False,
)
self.__value = YANGDynClass(
base=bitarray,
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="binary",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"opaque-lsa",
"router-information",
"tlvs",
"tlv",
"segment-routing-sid-label-range",
"tlvs",
"tlv",
"unknown-tlv",
"state",
]
def _get_type(self):
"""
Getter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_sid_label_range/tlvs/tlv/unknown_tlv/state/type (uint16)
YANG Description: The type value of the unknown TLV
"""
return self.__type
def _set_type(self, v, load=False):
"""
Setter method for type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_sid_label_range/tlvs/tlv/unknown_tlv/state/type (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_type() directly.
YANG Description: The type value of the unknown TLV
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint16",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """type must be of a type compatible with uint16""",
"defined-type": "uint16",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint16', is_config=False)""",
}
)
self.__type = t
if hasattr(self, "_set"):
self._set()
def _unset_type(self):
self.__type = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint16",
is_config=False,
)
def _get_length(self):
"""
Getter method for length, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_sid_label_range/tlvs/tlv/unknown_tlv/state/length (uint16)
YANG Description: The length value of the unknown TLV
"""
return self.__length
def _set_length(self, v, load=False):
"""
Setter method for length, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_sid_label_range/tlvs/tlv/unknown_tlv/state/length (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_length is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_length() directly.
YANG Description: The length value of the unknown TLV
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="length",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint16",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """length must be of a type compatible with uint16""",
"defined-type": "uint16",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="length", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint16', is_config=False)""",
}
)
self.__length = t
if hasattr(self, "_set"):
self._set()
def _unset_length(self):
self.__length = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16
),
is_leaf=True,
yang_name="length",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint16",
is_config=False,
)
def _get_value(self):
"""
Getter method for value, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_sid_label_range/tlvs/tlv/unknown_tlv/state/value (binary)
YANG Description: The value portion of the unknwon TLV
"""
return self.__value
def _set_value(self, v, load=False):
"""
Setter method for value, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/segment_routing_sid_label_range/tlvs/tlv/unknown_tlv/state/value (binary)
If this variable is read-only (config: false) in the
source YANG file, then _set_value is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_value() directly.
YANG Description: The value portion of the unknwon TLV
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=bitarray,
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="binary",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """value must be of a type compatible with binary""",
"defined-type": "binary",
"generated-type": """YANGDynClass(base=bitarray, is_leaf=True, yang_name="value", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='binary', is_config=False)""",
}
)
self.__value = t
if hasattr(self, "_set"):
self._set()
def _unset_value(self):
self.__value = YANGDynClass(
base=bitarray,
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="binary",
is_config=False,
)
type = __builtin__.property(_get_type)
length = __builtin__.property(_get_length)
value = __builtin__.property(_get_value)
_pyangbind_elements = OrderedDict(
[("type", type), ("length", length), ("value", value)]
)
|
BT-jmichaud/connector
|
refs/heads/8.0
|
connector/unit/binder.py
|
12
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
|
jeacaveo/schmebulock
|
refs/heads/develop
|
items/views.py
|
1
|
""" Views of items app. """
from rest_framework import viewsets
from . import models
from . import serializers
from . import metadata
class BrandViewSet(viewsets.ModelViewSet):
""" Endpoint for Brands. """
queryset = models.Brand.objects.all()
serializer_class = serializers.BrandSerializer
class StoreViewSet(viewsets.ModelViewSet):
""" Endpoint for Stores. """
queryset = models.Store.objects.all()
serializer_class = serializers.StoreSerializer
class OrderViewSet(viewsets.ModelViewSet):
"""
Endpoint for Orders.
GET parameters:
'nested' (boolean): get detailed information on foreign key fields.
"""
queryset = models.Order.objects.all()
serializer_class = serializers.OrderSerializer
# Override
def get_serializer_class(self):
"""
Override!
Using custom nested serializer when requested.
"""
if (self.request.method == "GET" and
self.request.query_params.get("nested")):
return serializers.OrderNestedSerializer
return super().get_serializer_class()
class ItemViewSet(viewsets.ModelViewSet):
"""
Endpoint for Items.
GET parameters:
'nested' (boolean): get detailed information on foreign key fields.
"""
queryset = models.Item.objects.all()
serializer_class = serializers.ItemSerializer
metadata_class = metadata.CustomItemMetadata
# Override
def get_serializer_class(self):
"""
Override!
Using custom nested serializer when requested.
"""
if (self.request.method == "GET" and
self.request.query_params.get("nested")):
return serializers.ItemNestedSerializer
return super().get_serializer_class()
class LocationViewSet(viewsets.ModelViewSet):
"""
Endpoint for Location.
GET parameters:
'nested' (boolean): get detailed information on foreign key fields.
"""
queryset = models.Location.objects.all()
serializer_class = serializers.LocationSerializer
# Override
def get_serializer_class(self):
"""
Override!
Using custom nested serializer when requested.
"""
if (self.request.method == "GET" and
self.request.query_params.get("nested")):
return serializers.LocationNestedSerializer
return super().get_serializer_class()
class PurchaseViewSet(viewsets.ModelViewSet):
"""
Endpoint for Purchase.
GET parameters:
'nested' (boolean): get detailed information on foreign key fields.
"""
queryset = models.Purchase.objects.all()
serializer_class = serializers.PurchaseSerializer
metadata_class = metadata.CustomPurchaseMetadata
# Override
def get_serializer_class(self):
"""
Override!
Using custom nested serializer when requested.
"""
if (self.request.method == "GET" and
self.request.query_params.get("nested")):
return serializers.PurchaseNestedSerializer
return super().get_serializer_class()
|
cpennington/edx-platform
|
refs/heads/master
|
pavelib/utils/envs.py
|
1
|
"""
Helper functions for loading environment settings.
"""
import io
import json
import os
import re
import sys
from time import sleep
import memcache
import six
from lazy import lazy
from path import Path as path
from paver.easy import BuildFailure, sh
from six.moves import configparser
from pavelib.utils.cmd import django_cmd
def repo_root():
"""
Get the root of the git repository (edx-platform).
This sometimes fails on Docker Devstack, so it's been broken
down with some additional error handling. It usually starts
working within 30 seconds or so; for more details, see
https://openedx.atlassian.net/browse/PLAT-1629 and
https://github.com/docker/for-mac/issues/1509
"""
file_path = path(__file__)
attempt = 1
while True:
try:
absolute_path = file_path.abspath()
break
except OSError:
print(u'Attempt {}/180 to get an absolute path failed'.format(attempt))
if attempt < 180:
attempt += 1
sleep(1)
else:
print('Unable to determine the absolute path of the edx-platform repo, aborting')
raise
return absolute_path.parent.parent.parent
class Env(object):
"""
Load information about the execution environment.
"""
# Root of the git repository (edx-platform)
REPO_ROOT = repo_root()
# Reports Directory
REPORT_DIR = REPO_ROOT / 'reports'
METRICS_DIR = REPORT_DIR / 'metrics'
QUALITY_DIR = REPORT_DIR / 'quality_junitxml'
# Generic log dir
GEN_LOG_DIR = REPO_ROOT / "test_root" / "log"
# Python unittest dirs
PYTHON_COVERAGERC = REPO_ROOT / ".coveragerc"
# Bok_choy dirs
BOK_CHOY_DIR = REPO_ROOT / "common" / "test" / "acceptance"
BOK_CHOY_LOG_DIR = GEN_LOG_DIR
BOK_CHOY_REPORT_DIR = REPORT_DIR / "bok_choy"
BOK_CHOY_A11Y_REPORT_DIR = REPORT_DIR / "a11y"
BOK_CHOY_COVERAGERC = BOK_CHOY_DIR / ".coveragerc"
BOK_CHOY_A11Y_COVERAGERC = BOK_CHOY_DIR / ".a11ycoveragerc"
BOK_CHOY_A11Y_CUSTOM_RULES_FILE = (
REPO_ROOT / "node_modules" / "edx-custom-a11y-rules" /
"lib" / "custom_a11y_rules.js"
)
# Which Python version should be used in xdist workers?
PYTHON_VERSION = os.environ.get("PYTHON_VERSION", "2.7")
# If set, put reports for run in "unique" directories.
# The main purpose of this is to ensure that the reports can be 'slurped'
# in the main jenkins flow job without overwriting the reports from other
# build steps. For local development/testing, this shouldn't be needed.
if os.environ.get("SHARD", None):
shard_str = "shard_{}".format(os.environ.get("SHARD"))
BOK_CHOY_REPORT_DIR = BOK_CHOY_REPORT_DIR / shard_str
BOK_CHOY_LOG_DIR = BOK_CHOY_LOG_DIR / shard_str
# The stubs package is currently located in the Django app called "terrain"
# from when they were used by both the bok-choy and lettuce (deprecated) acceptance tests
BOK_CHOY_STUB_DIR = REPO_ROOT / "common" / "djangoapps" / "terrain"
# Directory that videos are served from
VIDEO_SOURCE_DIR = REPO_ROOT / "test_root" / "data" / "video"
PRINT_SETTINGS_LOG_FILE = BOK_CHOY_LOG_DIR / "print_settings.log"
# Detect if in a Docker container, and if so which one
SERVER_HOST = os.environ.get('BOK_CHOY_HOSTNAME', '0.0.0.0')
USING_DOCKER = SERVER_HOST != '0.0.0.0'
SETTINGS = 'bok_choy_docker' if USING_DOCKER else 'bok_choy'
DEVSTACK_SETTINGS = 'devstack_docker' if USING_DOCKER else 'devstack'
TEST_SETTINGS = 'test'
BOK_CHOY_SERVERS = {
'lms': {
'host': SERVER_HOST,
'port': os.environ.get('BOK_CHOY_LMS_PORT', '8003'),
'log': BOK_CHOY_LOG_DIR / "bok_choy_lms.log"
},
'cms': {
'host': SERVER_HOST,
'port': os.environ.get('BOK_CHOY_CMS_PORT', '8031'),
'log': BOK_CHOY_LOG_DIR / "bok_choy_studio.log"
}
}
BOK_CHOY_STUBS = {
'xqueue': {
'port': 8040,
'log': BOK_CHOY_LOG_DIR / "bok_choy_xqueue.log",
'config': 'register_submission_url=http://0.0.0.0:8041/test/register_submission',
},
'ora': {
'port': 8041,
'log': BOK_CHOY_LOG_DIR / "bok_choy_ora.log",
'config': '',
},
'comments': {
'port': 4567,
'log': BOK_CHOY_LOG_DIR / "bok_choy_comments.log",
},
'video': {
'port': 8777,
'log': BOK_CHOY_LOG_DIR / "bok_choy_video_sources.log",
'config': "root_dir={}".format(VIDEO_SOURCE_DIR),
},
'youtube': {
'port': 9080,
'log': BOK_CHOY_LOG_DIR / "bok_choy_youtube.log",
},
'edxnotes': {
'port': 8042,
'log': BOK_CHOY_LOG_DIR / "bok_choy_edxnotes.log",
},
'ecommerce': {
'port': 8043,
'log': BOK_CHOY_LOG_DIR / "bok_choy_ecommerce.log",
},
'catalog': {
'port': 8091,
'log': BOK_CHOY_LOG_DIR / "bok_choy_catalog.log",
},
'lti': {
'port': 8765,
'log': BOK_CHOY_LOG_DIR / "bok_choy_lti.log",
},
}
# Mongo databases that will be dropped before/after the tests run
MONGO_HOST = 'edx.devstack.mongo' if USING_DOCKER else 'localhost'
BOK_CHOY_MONGO_DATABASE = "test"
BOK_CHOY_CACHE_HOST = 'edx.devstack.memcached' if USING_DOCKER else '0.0.0.0'
BOK_CHOY_CACHE = memcache.Client(['{}:11211'.format(BOK_CHOY_CACHE_HOST)], debug=0)
# Test Ids Directory
TEST_DIR = REPO_ROOT / ".testids"
# Configured browser to use for the js test suites
SELENIUM_BROWSER = os.environ.get('SELENIUM_BROWSER', 'firefox')
if USING_DOCKER:
KARMA_BROWSER = 'ChromeDocker' if SELENIUM_BROWSER == 'chrome' else 'FirefoxDocker'
else:
KARMA_BROWSER = 'FirefoxNoUpdates'
# Files used to run each of the js test suites
# TODO: Store this as a dict. Order seems to matter for some
# reason. See issue TE-415.
KARMA_CONFIG_FILES = [
REPO_ROOT / 'cms/static/karma_cms.conf.js',
REPO_ROOT / 'cms/static/karma_cms_squire.conf.js',
REPO_ROOT / 'cms/static/karma_cms_webpack.conf.js',
REPO_ROOT / 'lms/static/karma_lms.conf.js',
REPO_ROOT / 'common/lib/xmodule/xmodule/js/karma_xmodule.conf.js',
REPO_ROOT / 'common/lib/xmodule/xmodule/js/karma_xmodule_webpack.conf.js',
REPO_ROOT / 'common/static/karma_common.conf.js',
REPO_ROOT / 'common/static/karma_common_requirejs.conf.js',
]
JS_TEST_ID_KEYS = [
'cms',
'cms-squire',
'cms-webpack',
'lms',
'xmodule',
'xmodule-webpack',
'common',
'common-requirejs',
'jest-snapshot'
]
JS_REPORT_DIR = REPORT_DIR / 'javascript'
# Directories used for common/lib/tests
IGNORED_TEST_DIRS = ('__pycache__', '.cache')
LIB_TEST_DIRS = []
for item in (REPO_ROOT / "common/lib").listdir():
dir_name = (REPO_ROOT / 'common/lib' / item)
if dir_name.isdir() and not dir_name.endswith(IGNORED_TEST_DIRS):
LIB_TEST_DIRS.append(path("common/lib") / item.basename())
LIB_TEST_DIRS.append(path("pavelib/paver_tests"))
LIB_TEST_DIRS.append(path("scripts/xsslint/tests"))
# Directory for i18n test reports
I18N_REPORT_DIR = REPORT_DIR / 'i18n'
# Service variant (lms, cms, etc.) configured with an environment variable
# We use this to determine which envs.json file to load.
SERVICE_VARIANT = os.environ.get('SERVICE_VARIANT', None)
# If service variant not configured in env, then pass the correct
# environment for lms / cms
if not SERVICE_VARIANT: # this will intentionally catch "";
if any(i in sys.argv[1:] for i in ('cms', 'studio')):
SERVICE_VARIANT = 'cms'
else:
SERVICE_VARIANT = 'lms'
@classmethod
def get_django_setting(cls, django_setting, system, settings=None):
"""
Interrogate Django environment for specific settings values
:param django_setting: the django setting to get
:param system: the django app to use when asking for the setting (lms | cms)
:param settings: the settings file to use when asking for the value
:return: unicode value of the django setting
"""
if not settings:
settings = os.environ.get("EDX_PLATFORM_SETTINGS", "aws")
log_dir = os.path.dirname(cls.PRINT_SETTINGS_LOG_FILE)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
try:
value = sh(
django_cmd(
system,
settings,
u"print_setting {django_setting} 2>{log_file}".format(
django_setting=django_setting,
log_file=cls.PRINT_SETTINGS_LOG_FILE
)
),
capture=True
)
return six.text_type(value).strip()
except BuildFailure:
print(u"Unable to print the value of the {} setting:".format(django_setting))
with io.open(cls.PRINT_SETTINGS_LOG_FILE, 'r') as f:
print(f.read())
sys.exit(1)
@classmethod
def get_nested_django_setting(cls, django_setting, nested_django_setting, system, settings=None):
"""
Interrogate Django environment for specific nested settings values
:param django_setting: the root django setting to get
:param nested_django_setting: the nested django setting to get
:param system: the django app to use when asking for the setting (lms | cms)
:param settings: the settings file to use when asking for the value
:return: unicode value of the django setting
"""
django_setting_value = cls.get_django_setting(django_setting, system, settings)
pattern = re.compile(
u"[\"']{setting}[\"']: [\"'](?P<setting_value>.*?)[\"']".format(setting=nested_django_setting)
)
match = pattern.search(django_setting_value)
if match:
return match.group('setting_value')
return None
@classmethod
def covered_modules(cls):
"""
List the source modules listed in .coveragerc for which coverage
will be measured.
"""
coveragerc = configparser.RawConfigParser()
coveragerc.read(cls.PYTHON_COVERAGERC)
modules = coveragerc.get('run', 'source')
result = []
for module in modules.split('\n'):
module = module.strip()
if module:
result.append(module)
return result
@lazy
def env_tokens(self):
"""
Return a dict of environment settings.
If we couldn't find the JSON file, issue a warning and return an empty dict.
"""
# Find the env JSON file
if self.SERVICE_VARIANT:
env_path = self.REPO_ROOT.parent / "{service}.env.json".format(service=self.SERVICE_VARIANT)
else:
env_path = path("env.json").abspath()
# If the file does not exist, here or one level up,
# issue a warning and return an empty dict
if not env_path.isfile():
env_path = env_path.parent.parent / env_path.basename()
if not env_path.isfile():
print(
u"Warning: could not find environment JSON file "
"at '{path}'".format(path=env_path), # pylint: disable=unicode-format-string
file=sys.stderr,
)
return dict()
# Otherwise, load the file as JSON and return the resulting dict
try:
with open(env_path) as env_file:
return json.load(env_file)
except ValueError:
print(
u"Error: Could not parse JSON "
"in {path}".format(path=env_path), # pylint: disable=unicode-format-string
file=sys.stderr,
)
sys.exit(1)
@lazy
def feature_flags(self):
"""
Return a dictionary of feature flags configured by the environment.
"""
return self.env_tokens.get('FEATURES', dict())
@classmethod
def rsync_dirs(cls):
"""
List the directories that should be synced during pytest-xdist
execution. Needs to include all modules for which coverage is
measured, not just the tests being run.
"""
result = set()
for module in cls.covered_modules():
result.add(module.split('/')[0])
return result
|
AIML/scikit-learn
|
refs/heads/master
|
doc/tutorial/text_analytics/data/movie_reviews/fetch_data.py
|
278
|
"""Script to download the movie review dataset"""
import os
import tarfile
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
URL = ("http://www.cs.cornell.edu/people/pabo/"
"movie-review-data/review_polarity.tar.gz")
ARCHIVE_NAME = URL.rsplit('/', 1)[1]
DATA_FOLDER = "txt_sentoken"
if not os.path.exists(DATA_FOLDER):
if not os.path.exists(ARCHIVE_NAME):
print("Downloading dataset from %s (3 MB)" % URL)
opener = urlopen(URL)
open(ARCHIVE_NAME, 'wb').write(opener.read())
print("Decompressing %s" % ARCHIVE_NAME)
tarfile.open(ARCHIVE_NAME, "r:gz").extractall(path='.')
os.remove(ARCHIVE_NAME)
|
devs1991/test_edx_docmode
|
refs/heads/master
|
venv/lib/python2.7/site-packages/openassessment/assessment/tasks.py
|
9
|
"""
Celery looks for tasks in this module,
so import the tasks we want the workers to implement.
"""
# pylint:disable=W0611
from .worker.training import train_classifiers, reschedule_training_tasks
from .worker.grading import grade_essay, reschedule_grading_tasks
|
youtalk/mindstorms_ros
|
refs/heads/master
|
nxt/nxt_ros/scripts/nxt_ros.py
|
1
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2010, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import roslib; roslib.load_manifest('nxt_ros')
import nxt.locator
import rospy
import math
from nxt.motor import PORT_A, PORT_B, PORT_C
from nxt.sensor import PORT_1, PORT_2, PORT_3, PORT_4
import nxt.sensor
import nxt.motor
import thread
from sensor_msgs.msg import JointState, Imu
from nxt_msgs.msg import Range, Contact, JointCommand, Color, Gyro, Accelerometer
from PyKDL import Rotation
POWER_TO_NM = 0.01
POWER_MAX = 125
global my_lock
my_lock = thread.allocate_lock()
def check_params(ns, params):
for p in params:
if not rospy.get_param(ns+'/'+p):
return False
return True
class Device(object):
def __init__(self, params):
self.desired_period = 1.0 / params['desired_frequency']
self.period = self.desired_period
self.initialized = False
self.name = params['name']
def needs_trigger(self):
# initialize
if not self.initialized:
self.initialized = True
self.last_run = rospy.Time.now()
rospy.logdebug('Initializing %s', self.name)
return False
# compute frequency
now = rospy.Time.now()
period = 0.9 * self.period + 0.1 * (now - self.last_run).to_sec()
# check period
if period > self.desired_period * 1.2:
rospy.logwarn('%s not reaching desired frequency: actual %f, desired %f',
self.name, 1.0 / period, 1.0 / self.desired_period)
elif period > self.desired_period * 1.5:
rospy.logerr('%s not reaching desired frequency: actual %f, desired %f',
self.name, 1.0 / period, 1.0 / self.desired_period)
return period > self.desired_period
def trigger(self):
raise NotImplementedError()
def do_trigger(self):
try:
rospy.logdebug('Trigger %s with current frequency %f',
self.name, 1.0/self.period)
now = rospy.Time.now()
self.period = 0.9 * self.period + 0.1 * (now - self.last_run).to_sec()
self.last_run = now
self.trigger()
rospy.logdebug('Trigger %s took %f mili-seconds',
self.name, (rospy.Time.now() - now).to_sec() * 1000)
except nxt.error.I2CError:
rospy.logwarn('caught an exception nxt.error.I2CError')
except nxt.error.DirProtError:
rospy.logwarn('caught an exception nxt.error.DirProtError')
class Motor(Device):
def __init__(self, params, comm):
Device.__init__(self, params)
# create motor
self.name = params['name']
self.motor = nxt.motor.Motor(comm, eval(params['port']))
self.cmd = 0 # default command
# create publisher
self.pub = rospy.Publisher('joint_state', JointState)
self.last_js = None
# create subscriber
self.sub = rospy.Subscriber('joint_command', JointCommand, self.cmd_cb, None, 2)
def cmd_cb(self, msg):
if msg.name == self.name:
cmd = msg.effort / POWER_TO_NM
if cmd > POWER_MAX:
cmd = POWER_MAX
elif cmd < -POWER_MAX:
cmd = -POWER_MAX
self.cmd = cmd # save command
def trigger(self):
js = JointState()
js.header.stamp = rospy.Time.now()
state = self.motor.get_output_state()
js.name.append(self.name)
js.position.append(state[9] * math.pi / 180.0)
js.effort.append(state[1] * POWER_TO_NM)
vel = 0
if self.last_js:
vel = (js.position[0] - self.last_js.position[0]) / \
(js.header.stamp - self.last_js.header.stamp).to_sec()
js.velocity.append(vel)
else:
vel = 0
js.velocity.append(vel)
self.pub.publish(js)
self.last_js = js
# send command
self.motor.run(int(self.cmd), 0)
class TouchSensor(Device):
def __init__(self, params, comm):
Device.__init__(self, params)
# create touch sensor
self.touch = nxt.sensor.TouchSensor(comm, eval(params['port']))
self.frame_id = params['frame_id']
# create publisher
self.pub = rospy.Publisher(params['name'], Contact)
def trigger(self):
ct = Contact()
ct.contact = self.touch.get_sample()
ct.header.frame_id = self.frame_id
ct.header.stamp = rospy.Time.now()
self.pub.publish(ct)
class UltraSonicSensor(Device):
def __init__(self, params, comm):
Device.__init__(self, params)
# create ultrasonic sensor
self.ultrasonic = nxt.sensor.UltrasonicSensor(comm, eval(params['port']))
self.frame_id = params['frame_id']
self.spread = params['spread_angle']
self.min_range = params['min_range']
self.max_range = params['max_range']
# create publisher
self.pub = rospy.Publisher(params['name'], Range)
def trigger(self):
ds = Range()
ds.header.frame_id = self.frame_id
ds.header.stamp = rospy.Time.now()
ds.range = self.ultrasonic.get_sample()/100.0
ds.spread_angle = self.spread
ds.range_min = self.min_range
ds.range_max = self.max_range
self.pub.publish(ds)
class GyroSensor(Device):
def __init__(self, params, comm):
Device.__init__(self, params)
# create gyro sensor
self.gyro = nxt.sensor.GyroSensor(comm, eval(params['port']))
self.frame_id = params['frame_id']
self.orientation = 0.0
self.offset = 0.0
self.prev_time = rospy.Time.now()
# calibrate
rospy.loginfo('Calibrating Gyro. Don\'t move the robot now')
start_time = rospy.Time.now()
cal_duration = rospy.Duration(2)
offset = 0
tmp_time = rospy.Time.now()
while rospy.Time.now() < start_time + cal_duration:
rospy.sleep(0.01)
sample = self.gyro.get_sample()
now = rospy.Time.now()
offset += (sample * (now - tmp_time).to_sec())
tmp_time = now
self.offset = offset / (tmp_time - start_time).to_sec()
rospy.loginfo('Gyro calibrated with offset %f'%self.offset)
# create publisher
self.pub = rospy.Publisher(params['name'], Gyro)
# create publisher
self.pub2 = rospy.Publisher(params['name']+'_imu', Imu)
def trigger(self):
sample = self.gyro.get_sample()
gs = Gyro()
gs.header.frame_id = self.frame_id
gs.header.stamp = rospy.Time.now()
gs.calibration_offset.x = 0.0
gs.calibration_offset.y = 0.0
gs.calibration_offset.z = self.offset
gs.angular_velocity.x = 0.0
gs.angular_velocity.y = 0.0
gs.angular_velocity.z = (sample - self.offset) * math.pi / 180.0
gs.angular_velocity_covariance = [0, 0, 0, 0, 0, 0, 0, 0, 1]
self.pub.publish(gs)
imu = Imu()
imu.header.frame_id = self.frame_id
imu.header.stamp = rospy.Time.now()
imu.angular_velocity.x = 0.0
imu.angular_velocity.y = 0.0
imu.angular_velocity.z = (sample-self.offset)*math.pi/180.0
imu.angular_velocity_covariance = [0, 0, 0, 0, 0, 0, 0, 0, 1]
imu.orientation_covariance = [0.001, 0, 0, 0, 0.001, 0, 0, 0, 0.1]
self.orientation += imu.angular_velocity.z * \
(imu.header.stamp - self.prev_time).to_sec()
self.prev_time = imu.header.stamp
imu.orientation.x, imu.orientation.y, imu.orientation.z, imu.orientation.w = \
Rotation.RotZ(self.orientation).GetQuaternion()
self.pub2.publish(imu)
class AccelerometerSensor(Device):
def __init__(self, params, comm):
Device.__init__(self, params)
# create gyro sensor
self.accel = nxt.sensor.AccelerometerSensor(comm, eval(params['port']))
self.frame_id = params['frame_id']
# create publisher
self.pub = rospy.Publisher(params['name'], Accelerometer)
def trigger(self):
gs = Accelerometer()
gs.header.frame_id = self.frame_id
gs.header.stamp = rospy.Time.now()
x, y, z = self.accel.get_sample()
gs.linear_acceleration.x = x * 9.8
gs.linear_acceleration.y = y * 9.8
gs.linear_acceleration.z = z * 9.8
gs.linear_acceleration_covariance = [1, 0, 0, 0, 1, 0, 0, 0, 1]
self.pub.publish(gs)
class ColorSensor(Device):
def __init__(self, params, comm):
Device.__init__(self, params)
# create color sensor
self.color = nxt.sensor.ColorSensor(comm, eval(params['port']))
self.frame_id = params['frame_id']
# create publisher
self.pub = rospy.Publisher(params['name'], Color)
def trigger(self):
co = Color()
co.header.frame_id = self.frame_id
co.header.stamp = rospy.Time.now()
co.intensity = 0.0
color = self.color.get_color()
if color == 1: # black
co.r = 0.0
co.g = 0.0
co.b = 0.0
elif color == 2: # blue
co.r = 0.0
co.g = 0.0
co.b = 1.0
elif color == 3: # green
co.r = 0.0
co.g = 1.0
co.b = 0.0
elif color == 4: # yellow
co.r = 1.0
co.g = 1.0
co.b = 0.0
elif color == 5: # red
co.r = 1.0
co.g = 0.0
co.b = 1.0
elif color == 6: # white
co.r = 1.0
co.g = 1.0
co.b = 1.0
else:
rospy.logerr('Undefined color of color sensor')
self.pub.publish(co)
class IntensitySensor(Device):
def __init__(self, params, comm):
Device.__init__(self, params)
# create intensity sensor
self.intensity = nxt.sensor.ColorSensor(comm, eval(params['port']))
self.frame_id = params['frame_id']
self.color_r = params['color_r']
self.color_g = params['color_g']
self.color_b = params['color_b']
if self.color_r == 1.0 and self.color_g == 0.0 and self.color_b == 0.0:
self.color = 'red'
elif self.color_r == 0.0 and self.color_g == 1.0 and self.color_b == 0.0:
self.color = 'green'
elif self.color_r == 0.0 and self.color_g == 0.0 and self.color_b == 1.0:
self.color = 'blue'
elif self.color_r == 0.0 and self.color_g == 0.0 and self.color_b == 0.0:
self.color = 'off'
else:
rospy.logerr('Invalid RGB values specifies for intensity color sensor')
# create publisher
self.pub = rospy.Publisher(params['name'], Color)
def trigger(self):
co = Color()
co.header.frame_id = self.frame_id
co.header.stamp = rospy.Time.now()
co.r = self.color_r
co.g = self.color_g
co.b = self.color_b
co.intensity = self.intensity.get_reflected_light(self.color)
self.pub.publish(co)
def main():
rospy.init_node('nxt_ros')
ns = 'nxt_robot'
host = rospy.get_param('~host', None)
sock = nxt.locator.find_one_brick(host)
b = sock.connect()
config = rospy.get_param('~'+ns)
components = []
for c in config:
rospy.loginfo('Creating %s with name %s on %s',
c['type'], c['name'], c['port'])
if c['type'] == 'motor':
components.append(Motor(c, b))
elif c['type'] == 'touch':
components.append(TouchSensor(c, b))
elif c['type'] == 'ultrasonic':
components.append(UltraSonicSensor(c, b))
elif c['type'] == 'color':
components.append(ColorSensor(c, b))
elif c['type'] == 'intensity':
components.append(IntensitySensor(c, b))
elif c['type'] == 'gyro':
components.append(GyroSensor(c, b))
elif c['type'] == 'accelerometer':
components.append(AccelerometerSensor(c, b))
else:
rospy.logerr('Invalid sensor/actuator type %s' % c['type'])
callback_handle_frequency = 10.0
last_callback_handle = rospy.Time.now()
while not rospy.is_shutdown():
my_lock.acquire()
triggered = False
for c in components:
if c.needs_trigger() and not triggered:
c.do_trigger()
triggered = True
my_lock.release()
now = rospy.Time.now()
if (now - last_callback_handle).to_sec() > 1.0 / callback_handle_frequency:
last_callback_handle = now
rospy.sleep(0.01)
if __name__ == '__main__':
main()
|
chand3040/cloud_that
|
refs/heads/named-release/cypress.rc
|
openedx/core/djangoapps/user_api/accounts/tests/test_image_helpers.py
|
124
|
"""
Tests for helpers.py
"""
import datetime
import hashlib
from mock import patch
from pytz import UTC
from unittest import skipUnless
from django.conf import settings
from django.test import TestCase
from ..image_helpers import get_profile_image_urls_for_user
from student.tests.factories import UserFactory
TEST_SIZES = {'full': 50, 'small': 10}
TEST_PROFILE_IMAGE_UPLOAD_DT = datetime.datetime(2002, 1, 9, 15, 43, 01, tzinfo=UTC)
@patch.dict('openedx.core.djangoapps.user_api.accounts.image_helpers.PROFILE_IMAGE_SIZES_MAP', TEST_SIZES, clear=True)
@skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class ProfileImageUrlTestCase(TestCase):
"""
Tests for profile image URL generation helpers.
"""
def setUp(self):
super(ProfileImageUrlTestCase, self).setUp()
self.user = UserFactory()
# Ensure that parental controls don't apply to this user
self.user.profile.year_of_birth = 1980
self.user.profile.profile_image_uploaded_at = TEST_PROFILE_IMAGE_UPLOAD_DT
self.user.profile.save()
def verify_url(self, actual_url, expected_name, expected_pixels, expected_version):
"""
Verify correct url structure.
"""
self.assertEqual(
actual_url,
'http://example-storage.com/profile-images/{name}_{size}.jpg?v={version}'.format(
name=expected_name, size=expected_pixels, version=expected_version
)
)
def verify_default_url(self, actual_url, expected_pixels):
"""
Verify correct url structure for a default profile image.
"""
self.assertEqual(
actual_url,
'/static/default_{size}.png'.format(size=expected_pixels)
)
def verify_urls(self, actual_urls, expected_name, is_default=False):
"""
Verify correct url dictionary structure.
"""
self.assertEqual(set(TEST_SIZES.keys()), set(actual_urls.keys()))
for size_display_name, url in actual_urls.items():
if is_default:
self.verify_default_url(url, TEST_SIZES[size_display_name])
else:
self.verify_url(
url, expected_name, TEST_SIZES[size_display_name], TEST_PROFILE_IMAGE_UPLOAD_DT.strftime("%s")
)
def test_get_profile_image_urls(self):
"""
Tests `get_profile_image_urls_for_user`
"""
self.user.profile.profile_image_uploaded_at = TEST_PROFILE_IMAGE_UPLOAD_DT
self.user.profile.save()
expected_name = hashlib.md5('secret' + self.user.username).hexdigest()
actual_urls = get_profile_image_urls_for_user(self.user)
self.verify_urls(actual_urls, expected_name, is_default=False)
self.user.profile.profile_image_uploaded_at = None
self.user.profile.save()
self.verify_urls(get_profile_image_urls_for_user(self.user), 'default', is_default=True)
|
ua-snap/downscale
|
refs/heads/master
|
snap_scripts/epscor_sc/dot_dof_logs_cmip5_decadals.py
|
1
|
# # # #
# CALCULATE DOF/DOT/LOGS FROM TAS TIMESERIES
# # # #
def tfg_days( x, err='off' ):
''' calculate DOF/DOT/LOGS for a vector of 12 chronological monthly values '''
import itertools
import numpy as np
# filter the div by zero and comparison with np.nan warnings from numpy
if err == 'off':
np.warnings.filterwarnings( "ignore", category=RuntimeWarning )
x[ x == 0 ] = -0.0001 # need to treat zero as freezing (working with signs)
# positive or negative monthly temps
s1 = np.sign( x )
# products of consecutive months' signs: positive indicates no change; negative indicates a potential freeze or thaw transition
s = s1[:11] * s1[1:]
idx, = np.where( s < 0 )
# may be length zero (no transitions)
ind = np.sort( np.concatenate( [idx, idx+1] ) )
if np.any( np.isnan( x ) == True ): # ignore cells with missing data
dot, dof, grow = itertools.repeat( np.array([np.nan]), 3 )
case = 1
elif (len(ind) == 0) & (s1[0] > 0): # no transitions: all positive temps means no freeze day
dot = np.array([0])
dof, grow = itertools.repeat( np.array([365]), 2 )
case = 2
elif (len(ind) == 0) & (s1[0] < 0): # no transitions: all negative temps means no thaw day
dot = np.array([365])
dof, grow = itertools.repeat( np.array([0]), 2 )
case = 3
# [ML FIXED]
elif len(ind) == 2: # only one transition during the year, thawing or freezing
# places where we know the ground freezes and thaws,
# but during a specific 12 months we just don't happen to witness both
# only thaw occurs
if x[ ind[0] ] < 0:
# [ml] note:((ind[0]+1)-1) is ind[0]+1 is the month number and minus 1 is to get to previous month
# we could make that a call to a months array -- months = range(1, 12+1)
dot = 15 + 30 * ((ind[0]+1)-1) - np.round( x[ ind[0] ] / (np.diff( x[ ind[:2] ] ) / 30.0), decimals=0 )
dof = np.array([350]) # 350: we know the ground freezes so we use 350 rather than the special 365
grow = dof - dot
case = 4
# only freeze occurs
if x[ ind[0] ] > 0:
dof = 350 - 30 * (12-ind[1]-1) - np.round( x[ ind[1] ] / (np.diff( x[ ind[:2] ] ) / 30.0), decimals=0 )
dot = np.array([15]) # 15: we know the ground thaws so we use 15 rather than the special 0
grow = dof - dot
case = 5
# [ML FIXED]
elif (len(ind) == 4 ) & (s1[0] < 0): # two transitions occur: thaw, then freeze (this is the ideal case; everything else is an idiosyncratic edge case)
# [ml] note:((ind[0]+1)-1) is ind[0]+1 is the month number and minus 1 is to get to previous month
# we could make that a call to a months array -- months = range(1, 12+1)
dot = 15 + 30 * ((ind[0]+1)-1) - np.round( x[ ind[0] ] / (np.diff( x[ ind[:2] ] ) / 30.0), decimals=0 )
dof = 350 - 30 * (12-ind[3]-1) - np.round( x[ ind[3] ] / (np.diff( x[ ind[2:4] ] ) / 30.0), decimals=0 )
grow = dof - dot
case = 0
# [ML FIXED]
elif (len(ind) == 4) & (s1[0] > 0): # two transitions occur but backward to what is expected; freeze, then thaw
if( ind[0] >= 7 ): # freeze occurs in second half of year as expected; late thaw is spurious
# dof = 350 - 30 * (12-ind[1]-1) - np.round( x[ ind[1] ] / (np.diff( x[ ind[:2] ] ) / 30.0), decimals=0 )
dof = 350 - 30 * (12-ind[1]-1) - np.round( x[ ind[1] ] / (np.diff( x[ ind[:2] ] ) / 30.0), decimals=0 )
dot = np.array([15]) # ignore spurious post-freeze thaw; treat as early, unobserved thaw
grow = dof - dot
case = 6
if ind[0] <= 6: # spurious freeze occurs in first half of year; thaw probably fine
dot = 15 + 30 * ((ind[2]+1)-1) - np.round( x[ ind[2] ] / (np.diff( x[ ind[2:4] ]) / 30.0), decimals=0 )
dof = np.array([350]) # ignore spurious early freeze; treat as late, unobserved freeze
grow = dof - dot
case = 7
# [ML FIXED]
elif len(ind) > 4: # more than two transitions; at least one definitely spurious
# [MATT Q]:
# what is the prepending 0 below? and what is its intention?
# what do u do if there is a use-case where idx-0 is already chosen? Py is ZERO-anchored...
ind2, = np.where( s < 0 )
ind2 = ind2 + 1
ind2 = np.insert( ind2, 0, np.array([0]) )
# [ml] m1, m2 are month indexes
m1, = np.where( np.diff( ind2 ) == np.max( np.diff( ind2 ) ) )
m1 = m1[-1] + 1
m2, = np.where( np.delete(np.diff( ind2 ), (m1-1)-1) == max( np.delete(np.diff( ind2 ), (m1-1)-1)) )
m2, = np.where( np.delete(np.diff( ind2 ), (m1-1)) == max( np.delete(np.diff( ind2 ), (m1-1))) )
m2 = m2[-1] + 1
if m1 == m2:
m2 = m2 - 1
ind2 = ind2[ np.sort( np.append( m1, m2 ) ) ]
ind = np.sort( np.append(ind2, ind2+1) ) - 1
dot = 15 + 30 * (ind[1]-1) - np.round( x[ind[1]-1] / (np.diff( x[ ind[:2] ] ) / 30.0), 0) # [ml] SOME WEIRD -1's here...
dof = 350 - 30 * (12-ind[3]-1) - np.round( x[ind[3]] / (np.diff( x[ ind[2:4] ] ) / 30.0), 0)
grow = dof - dot
case = 8
else:
dot, dof, grow = itertools.repeat( np.array([np.nan]), 3 )
return np.concatenate([dof, dot, grow])
def read_arr( fn, band=1 ):
'''
read the array from a GTiff using rasterio without mem leaks
and return a tuple of (arr, meta)
where `meta` is the file metadata
'''
with rasterio.open( fn ) as rst:
arr = rst.read( band )
meta = rst.meta
return (arr, meta)
def rasterize_shp( shp_fn, arr, affine, fill=0 ):
'''
convert a shapefile into a raster using a template arr and
affine transform.
'''
from rasterio import features
import geopandas as gpd
shp = gpd.read_file( shp_fn )
geoms = [ (geom, idx+1) for idx, geom in enumerate( shp.geometry ) ]
mask = features.rasterize( geoms, out_shape=arr.shape,
fill=fill, transform=affine,
dtype='float32', all_touched=True )
return mask
def get_monyear( fn ):
'''
specialized function to split the filename
pattern of SNAP data and extract
year and month information from it.
'''
fn, ext = os.path.splitext( fn )
return fn.split( '_' )[-2:]
def get_decade( fn ):
month, year = get_monyear( fn )
return str(year[:-3])+ '0s'
def get_scenario( fn ):
'''
specialized function to return the rcp or historical for the
data files scenario from the filename components. Its Quasi-smart
but not perfect.
'''
basename = os.path.basename( fn )
basename, ext = os.path.splitext( basename )
bsplit = basename.split('_')
scenario, = [ i for i in bsplit if i.startswith('rcp') or i == 'historical' ]
return scenario
# def run( fn, mask, band=1 ):
# ''' run the extractor '''
# arr, meta = read_arr( fn, band=band )
# month, year = get_monyear( fn )
# return pd.DataFrame({ '-'.join([month, year]):{int(i):arr[ mask == i ][0]
# for i in np.unique( mask ) if i > 0} }).T
def run_decade( filenames ):
import os, rasterio
import numpy as np
# load the data to an ndarray
arr = np.array([ read_arr( fn )[0] for fn in filenames ])
mask = rasterio.open( fn ).read_masks( 1 )
# run the function
out_arr = np.apply_along_axis( tfg_days, axis=0, arr=arr )
# filename work
fn = filenames[0]
dirname, basename = os.path.split( fn )
basename, ext = os.path.splitext( basename )
new_base = basename.replace( 'tas_mean_monthly_mean_C', 'dot_dof_grow_mean_decadal' ).replace( '_01_', '_' )
new_dir = dirname.replace( 'tas', 'dot_dof_grow' )
# build output directory if needed.
if not os.path.exists( new_dir ):
os.makedirs( new_dir )
# copy metadata
with rasterio.open( fn ) as rst:
meta = rst.meta
meta.update( compress='lzw', count=3 )
_ = meta.pop( 'transform' )
# write out file
output_filename = os.path.join( new_dir, new_base + ext )
with rasterio.open( output_filename, 'w', **meta ) as out:
# make it float32?
out_arr = out_arr.astype( np.float32 )
ind0, ind1 = np.where( mask == 0 )
out_arr[ ..., ind0, ind1 ] = meta[ 'nodata' ]
out.write( out_arr )
return output_filename
if __name__ == '__main__':
import os, rasterio, glob
import numpy as np
import pandas as pd
from pathos.mp_map import mp_map
from functools import partial
from downscale.utils import sort_files, only_years
import matplotlib
matplotlib.use( 'agg' )
from matplotlib import pyplot as plt
import argparse
# parse some args
parser = argparse.ArgumentParser( description='downscale the AR5-CMIP5 data to the AKCAN extent required by SNAP' )
parser.add_argument( "-b", "--base_path", action='store', dest='base_path', type=str, help="path to the directory where the decadal monthly downscaled data are stored" )
parser.add_argument( "-m", "--model", action='store', dest='model', type=str, help="model name (exact)" )
# parse the args and unpack
args = parser.parse_args()
base_path = args.base_path
model = args.model
# # TESTING STUFF
# base_path = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/derived_grids/decadal_monthlies'
# model = 'GFDL-CM3'
# # END TESTING
# # # # TESTING MATTS STUFF
# base_path = '/Data/Base_Data/Climate/AK_CAN_2km/projected/AR5_CMIP5_models/rcp60/IPSL-CM5A-LR/derived/tas/decadal_mean'
# model = 'IPSL-CM5A-LR'
# RUN ALL SCENARIOS IN A SINGLE NODE FOR SINGLE MODEL
# # list and sort the files from the directory
files = [ os.path.join( r, f ) for r,s,files in os.walk( base_path ) for f in files if f.endswith('.tif') and 'tas_' in f and model in f ]
# # TESTING ONE BELOW
# files = [ os.path.join( r, f ) for r,s,files in os.walk( base_path ) for f in files if f.endswith('.tif') and 'tas_' in f and model in f and 'rcp60' in f and '_201' in f]
# # END TESTING
scenarios = [ get_scenario(fn) for fn in files ]
file_groups = [ [ sorted( y.tolist() ) for x,y in j.groupby( [ fn.split('.')[0].split('_')[-1] for fn in j ] )] for i,j in pd.Series( files ).groupby( scenarios ) ]
file_groups = [ j for i in file_groups for j in i ]
# groupby decade
# decades = [ fn.split('.')[0].split('_')[-1] for fn in files ]
# file_groups = [ j.tolist() for i,j in pd.Series( files ).groupby( decades ) ]
# NEW RUNNNER!
done = mp_map( run_decade, file_groups, nproc=32 )
|
guilhermebr/python-docx
|
refs/heads/master
|
docx/api.py
|
1
|
# encoding: utf-8
"""
Directly exposed API functions and classes, :func:`Document` for now.
Provides a syntactically more convenient API for interacting with the
OpcPackage graph.
"""
from __future__ import absolute_import, division, print_function
import os
from docx.enum.section import WD_SECTION
from docx.enum.text import WD_BREAK
from docx.opc.constants import CONTENT_TYPE as CT, RELATIONSHIP_TYPE as RT
from docx.package import Package
from docx.parts.numbering import NumberingPart
from docx.parts.styles import StylesPart
from docx.parts.header import HeaderPart
from docx.shared import lazyproperty
_thisdir = os.path.split(__file__)[0]
_default_docx_path = os.path.join(_thisdir, 'templates', 'default.docx')
class Document(object):
"""
Return a |Document| instance loaded from *docx*, where *docx* can be
either a path to a ``.docx`` file (a string) or a file-like object. If
*docx* is missing or ``None``, the built-in default document "template"
is loaded.
"""
def __init__(self, docx=None):
super(Document, self).__init__()
document_part, package = self._open(docx)
self._document_part = document_part
self._package = package
def add_heading(self, text='', level=1):
"""
Return a heading paragraph newly added to the end of the document,
populated with *text* and having the heading paragraph style
determined by *level*. If *level* is 0, the style is set to
``'Title'``. If *level* is 1 (or not present), ``'Heading1'`` is used.
Otherwise the style is set to ``'Heading{level}'``. If *level* is
outside the range 0-9, |ValueError| is raised.
"""
if not 0 <= level <= 9:
raise ValueError("level must be in range 0-9, got %d" % level)
style = 'Title' if level == 0 else 'Heading%d' % level
return self.add_paragraph(text, style)
def add_page_break(self):
"""
Return a paragraph newly added to the end of the document and
containing only a page break.
"""
p = self._document_part.add_paragraph()
r = p.add_run()
r.add_break(WD_BREAK.PAGE)
return p
def add_paragraph(self, text='', style=None):
"""
Return a paragraph newly added to the end of the document, populated
with *text* and having paragraph style *style*.
"""
p = self._document_part.add_paragraph()
if text:
r = p.add_run()
r.add_text(text)
if style is not None:
p.style = style
return p
def add_picture(self, image_path_or_stream, width=None, height=None):
"""
Add the image at *image_path_or_stream* in a new paragraph at the end
of the document. If neither width nor height is specified, the
picture appears at its native size. If only one is specified, it is
used to compute a scaling factor that is then applied to the
unspecified dimension, preserving the aspect ratio of the image. The
native size of the picture is calculated using the dots-per-inch
(dpi) value specified in the image file, defaulting to 72 dpi if no
value is specified, as is often the case.
"""
picture = self.inline_shapes.add_picture(image_path_or_stream)
# scale picture dimensions if width and/or height provided
if width is not None or height is not None:
native_width, native_height = picture.width, picture.height
if width is None:
scaling_factor = float(height) / float(native_height)
width = int(round(native_width * scaling_factor))
elif height is None:
scaling_factor = float(width) / float(native_width)
height = int(round(native_height * scaling_factor))
# set picture to scaled dimensions
picture.width = width
picture.height = height
return picture
def add_section(self, start_type=WD_SECTION.NEW_PAGE):
"""
Return a |Section| object representing a new section added at the end
of the document. The optional *start_type* argument must be a member
of the :ref:`WdSectionStart` enumeration defaulting to
``WD_SECTION.NEW_PAGE`` if not provided.
"""
return self._document_part.add_section(start_type)
def add_table(self, rows, cols, style='LightShading-Accent1'):
"""
Add a table having row and column counts of *rows* and *cols*
respectively and table style of *style*. If *style* is |None|, a
table with no style is produced.
"""
table = self._document_part.add_table(rows, cols)
if style:
table.style = style
return table
@property
def inline_shapes(self):
"""
Return a reference to the |InlineShapes| instance for this document.
"""
return self._document_part.inline_shapes
@lazyproperty
def numbering_part(self):
"""
Instance of |NumberingPart| for this document. Creates an empty
numbering part if one is not present.
"""
try:
return self._document_part.part_related_by(RT.NUMBERING)
except KeyError:
numbering_part = NumberingPart.new()
self._document_part.relate_to(numbering_part, RT.NUMBERING)
return numbering_part
@property
def paragraphs(self):
"""
A list of |Paragraph| instances corresponding to the paragraphs in
the document, in document order. Note that paragraphs within revision
marks such as ``<w:ins>`` or ``<w:del>`` do not appear in this list.
"""
return self._document_part.paragraphs
def save(self, path_or_stream):
"""
Save this document to *path_or_stream*, which can be either a path to
a filesystem location (a string) or a file-like object.
"""
self._package.save(path_or_stream)
@property
def sections(self):
"""
Return a reference to the |Sections| instance for this document.
"""
return self._document_part.sections
@lazyproperty
def styles_part(self):
"""
Instance of |StylesPart| for this document. Creates an empty styles
part if one is not present.
"""
try:
return self._document_part.part_related_by(RT.STYLES)
except KeyError:
styles_part = StylesPart.new()
self._document_part.relate_to(styles_part, RT.STYLES)
return styles_part
@lazyproperty
def header_part(self, section=None):
"""
Instance of |HeaderPart| for this document. Takes a section argument.
If none is passed, assumes default (sentinel) Section.
Creates an empty header part if one is not present.
"""
if not section:
section = self.sections()[0]
try:
return self._document_part.part_related_by(RT.HEADER)
except KeyError:
header_part = HeaderPart.new()
self._document_part.relate_to(header_part, RT.HEADER)
return header_part
@property
def tables(self):
"""
A list of |Table| instances corresponding to the tables in the
document, in document order. Note that tables within revision marks
such as ``<w:ins>`` or ``<w:del>`` do not appear in this list.
"""
return self._document_part.tables
@staticmethod
def _open(docx):
"""
Return a (document_part, package) 2-tuple loaded from *docx*, where
*docx* can be either a path to a ``.docx`` file (a string) or a
file-like object. If *docx* is ``None``, the built-in default
document "template" is loaded.
"""
docx = _default_docx_path if docx is None else docx
package = Package.open(docx)
document_part = package.main_document
if document_part.content_type != CT.WML_DOCUMENT_MAIN:
tmpl = "file '%s' is not a Word file, content type is '%s'"
raise ValueError(tmpl % (docx, document_part.content_type))
return document_part, package
|
agoravoting/agora_elections
|
refs/heads/master
|
admin/cycle.py
|
1
|
#!/usr/bin/env python
# This file is part of agora_elections.
# Copyright (C) 2014-2016 Agora Voting SL <agora@agoravoting.com>
# agora_elections is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License.
# agora_elections is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with agora_elections. If not, see <http://www.gnu.org/licenses/>.
import admin
import sys
import StringIO
from functools import partial
import time
import json
import os
import traceback
import argparse
from argparse import RawTextHelpFormatter
public_ds = '../datastore/public'
private_ds = '../datastore/private'
class Args:
pass
def pks_path(id):
return os.path.join(public_ds, str(id), 'pks')
def tally_path(id):
return os.path.join(private_ds, str(id), 'tally.tar.gz')
def results_public_path(id):
return os.path.join(public_ds, str(id), 'results.json')
def tally_public_path(id):
return os.path.join(public_ds, str(id), 'tally.tar.gz')
def ids_path(id):
return os.path.join(private_ds, str(id), 'ids')
def capture_stdout(function):
def wrapper(*args):
stdout = sys.stdout
output = StringIO.StringIO()
sys.stdout = output
function(args)
sys.stdout = stdout
value = output.getvalue().strip()
return value
return wrapper
@capture_stdout
def get_state(id):
cfg = {}
cfg['election_id'] = id
args = Args()
args.column = 'state'
admin.show_column(cfg, args)
@capture_stdout
def count_votes(id):
cfg = {}
cfg['election_id'] = id
args = Args()
admin.count_votes(cfg, args)
def wait_for_state(id, state, seconds):
def wait():
s = get_state(id)
print("waiting for '%s', got '%s'" % (state, s))
return s != "" and s in state
wait_for(wait, seconds)
def wait_for(function, max):
i = 0
limit = max / 5
while True:
if function() == True:
return 1
else:
if(i + 1 > limit):
raise Exception("timeout")
i += 1
time.sleep(5)
def register(config):
cfg = {}
cfg['electionConfig'] = config
args = Args()
print('> register..')
admin.register(cfg, args)
def update(config):
cfg = {}
cfg['electionConfig'] = config
cfg['election_id'] = config['id']
args = Args()
print('> update..')
admin.update(cfg, args)
def create(id):
cfg = {}
cfg['election_id'] = id
args = Args()
print('> create..')
admin.create(cfg, args)
def dump_pks(id):
cfg = {}
cfg['election_id'] = id
args = Args()
print('> dump pks..')
admin.dump_pks(cfg, args)
if not os.path.isfile(pks_path(id)):
raise Exception('pks not found')
def encrypt(id, encrypt_count):
cfg = {}
cfg['election_id'] = id
cfg['plaintexts'] = 'votes.json'
cfg['encrypt-count'] = encrypt_count
cfg['ciphertexts'] = 'ciphertexts_' + str(cfg['election_id'])
args = Args()
print('> encrypt..')
admin.encrypt(cfg, args)
if not os.path.isfile(cfg['ciphertexts']):
raise Exception('ciphertexts not found')
def start(id):
cfg = {}
cfg['election_id'] = id
args = Args()
print('> start..')
admin.start(cfg, args)
def cast_votes(id):
cfg = {}
cfg['election_id'] = id
cfg['ciphertexts'] = 'ciphertexts_' + str(cfg['election_id'])
args = Args()
before = count_votes(id)
print('> cast_votes..')
admin.cast_votes(cfg, args)
after = count_votes(id)
print('votes after casting: %s' % after)
if not after > before:
raise Exception('no votes were cast')
def tally(id):
cfg = {}
cfg['election_id'] = id
args = Args()
print('> tally..')
admin.tally(cfg, args)
def stop(id):
cfg = {}
cfg['election_id'] = id
args = Args()
print('> tally..')
admin.stop(cfg, args)
# remove
def tally_no_dump(id):
cfg = {}
cfg['election_id'] = id
args = Args()
print('> tally_no_dump..')
admin.tally_no_dump(cfg, args)
# remove
def dump_votes_with_ids(id):
cfg = {}
cfg['election_id'] = id
args = Args()
args.voter_ids = ids_path(id)
print('> dump_votes_with_ids..')
admin.dump_votes_with_ids(cfg, args)
def calculate_results(id, results_config=None):
if not os.path.isfile(tally_path(id)):
raise Exception('tally file not found (private ds)')
cfg = {}
cfg['election_id'] = id
args = Args()
args.results_config = results_config
print('> calculate_results..')
admin.calculate_results(cfg, args)
def publish_results(id):
cfg = {}
cfg['election_id'] = id
args = Args()
print('> publish_results..')
admin.publish_results(cfg, args)
if not os.path.isfile(results_public_path(id)):
raise Exception('results file not found (public ds)')
if not os.path.isfile(tally_public_path(id)):
raise Exception('tally file not found (public ds')
def serial(cfg, args):
try:
print('>>> starting serial run')
for i in range(0, args.total_cycles):
cfg['id'] = args.init_id + i
print('>> starting cycle id = %d' % cfg['id'])
register(cfg)
wait_for_state(cfg['id'], 'registered', 5)
j = 1
while True:
try:
create(cfg['id'])
wait_for_state(cfg['id'], 'created', 60)
break
except Exception as e:
print('-'*60)
traceback.print_exc(file=sys.stdout)
print('-'*60)
print("trying again.. %d" % j)
j += 1
if j > 5:
raise e
dump_pks(cfg['id'])
encrypt(cfg['id'], args.encrypt_count)
start(cfg['id'])
wait_for_state(cfg['id'], 'started', 5)
cast_votes(cfg['id'])
j = 1
while True:
try:
tally(cfg['id'])
wait_for_state(cfg['id'], ['tally_ok', 'results_ok'], 500)
break
except Exception as e:
print('-'*60)
traceback.print_exc(file=sys.stdout)
print('-'*60)
print("trying again.. %d" % j)
j += 1
if j > 5:
raise e
calculate_results(cfg['id'], args.results_config)
wait_for_state(cfg['id'], 'results_ok', 5)
publish_results(cfg['id'])
print('>>> finished serial run (last id = %d)' % cfg['id'])
except Exception as e:
print('-'*60)
traceback.print_exc(file=sys.stdout)
print('-'*60)
state = get_state(cfg['id'])
print("id = %d, state is '%s'" % (cfg['id'], state))
def parallel(cfg, args):
try:
print('>>> starting parallel run')
for i in range(0, args.total_cycles):
cfg['id'] = args.init_id + i
print('>> create, id = %d' % cfg['id'])
register(cfg)
wait_for_state(cfg['id'], 'registered', 5)
j = 1
while True:
try:
create(cfg['id'])
wait_for_state(cfg['id'], 'created', 60)
break
except Exception as e:
print('-'*60)
traceback.print_exc(file=sys.stdout)
print('-'*60)
print("trying again.. %d" % j)
j += 1
if j > 5:
raise e
for i in range(0, args.total_cycles):
cfg['id'] = args.init_id + i
print('>> vote, id = %d' % cfg['id'])
dump_pks(cfg['id'])
encrypt(cfg['id'], args.encrypt_count)
start(cfg['id'])
wait_for_state(cfg['id'], 'started', 5)
cast_votes(cfg['id'])
for i in range(0, args.total_cycles):
cfg['id'] = args.init_id + i
print('>> tally + publish, id = %d' % cfg['id'])
j = 1
while True:
try:
tally(cfg['id'])
wait_for_state(cfg['id'], ['tally_ok', 'results_ok'], 500)
break
except Exception as e:
print('-'*60)
traceback.print_exc(file=sys.stdout)
print('-'*60)
print("trying again.. %d" % j)
j += 1
if j > 5:
raise e
calculate_results(cfg['id'], args.results_config)
wait_for_state(cfg['id'], 'results_ok', 5)
publish_results(cfg['id'])
print('>>> finished parallel run (last id = %d)' % cfg['id'])
except Exception as e:
print('-'*60)
traceback.print_exc(file=sys.stdout)
print('-'*60)
state = get_state(cfg['id'])
print("id = %d, state is '%s'" % (cfg['id'], state))
def main(argv):
parser = argparse.ArgumentParser(description='cycle testing script', formatter_class=RawTextHelpFormatter)
parser.add_argument('-e', '--encrypt-count', help='number of votes to encrypt (generates duplicates if more than in json file)', type=int, default = 0)
parser.add_argument('-c', '--election-config', help='config file for election', default='election.json')
parser.add_argument('-r', '--results-config', help='config file for agora-results', default='config.json')
parser.add_argument('-i', '--init-id', help='config file for agora-results', type=int)
parser.add_argument('-t', '--total-cycles', help='config file for agora-results', type=int, default='1')
parser.add_argument('-p', '--parallel', help='config file for agora-results', action='store_true')
args = parser.parse_args()
print('************************ cfg ************************')
if args.init_id is None:
args.init_id = admin.get_max_electionid() + 1
print('election_config = %s' % args.election_config)
print('results_config = %s' % args.results_config)
print('init_id = %d' % args.init_id)
print('encrypt_count = %d' % args.encrypt_count)
print('total_cycles = %d' % args.total_cycles)
print('parallel = %s' % args.parallel)
if not os.path.isfile(args.election_config):
raise Exception("election config not found '%s'" % args.election_config)
if not os.path.isfile(args.results_config):
raise Exception("results config not found '%s'" % args.results_config)
with open(args.election_config, 'r') as f:
cfg = json.loads(f.read())
cfg['id'] = args.init_id
print(cfg)
print('*****************************************************')
if args.parallel:
parallel(cfg, args)
else:
serial(cfg, args)
if __name__ == "__main__":
main(sys.argv[1:])
|
liqd/adhocracy3.mercator
|
refs/heads/master
|
src/adhocracy_spd/adhocracy_spd/resources/test_root.py
|
4
|
from pytest import fixture
from pytest import mark
@fixture
def integration(integration):
integration.include('pyramid_mailer.testing')
return integration
def test_root_meta():
from adhocracy_core.resources.root import root_meta
from adhocracy_core.resources.root import \
create_initial_content_for_app_root
from .root import add_spd_process
from .root import spd_root_meta
assert add_spd_process not in root_meta.after_creation
assert add_spd_process in spd_root_meta.after_creation
assert create_initial_content_for_app_root in\
spd_root_meta.after_creation
@mark.usefixtures('integration')
def test_add_spd_process(pool, registry):
from .digital_leben import IProcess
from .root import add_spd_process
add_spd_process(pool, registry, {})
IProcess.providedBy(pool['digital_leben'])
|
arkem/pyflag
|
refs/heads/main
|
src/plugins/MemoryForensics/Volatility-1.3_Linux_rc.1/memory_plugins/address_spaces/ewf.py
|
7
|
""" This Address Space allows us to open ewf files """
import standard
try:
## We must have this module or we dont activate ourselves
import pyewf
class EWFAddressSpace(standard.FileAddressSpace):
""" An EWF capable address space.
In order for us to work we need:
1) There must be a base AS.
2) The first 6 bytes must be 45 56 46 09 0D 0A (EVF header)
"""
order = 20
def __init__(self, base, opts):
assert(base)
assert(base.read(0,6) == "\x45\x56\x46\x09\x0D\x0A")
self.name = self.fname = opts['filename']
self.fhandle = pyewf.open([self.name])
self.mode = 'rb'
self.fhandle.seek(0,2)
self.fsize = self.fhandle.tell()
self.fhandle.seek(0)
def is_valid_address(self, addr):
return True
except ImportError:
pass
|
sriramvenkatapathy/pyswip
|
refs/heads/master
|
test/test_prolog.py
|
9
|
# -*- coding: utf-8 -*-
# pyswip -- Python SWI-Prolog bridge
# Copyright (c) 2007-2012 Yüce Tekol
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Tests the Prolog class.
"""
import unittest
import doctest
import pyswip.prolog as pl # This implicitly tests library loading code
class TestProlog(unittest.TestCase):
"""
Unit tests for prolog module (contains only Prolog class).
"""
def test_nested_queries(self):
"""
SWI-Prolog cannot have nested queries called by the foreign function
interface, that is, if we open a query and are getting results from it,
we cannot open another query before closing that one.
Since this is a user error, we just ensure that a appropriate error
message is thrown.
"""
p = pl.Prolog()
# Add something to the base
p.assertz("father(john,mich)")
p.assertz("father(john,gina)")
p.assertz("mother(jane,mich)")
somequery = "father(john, Y)"
otherquery = "mother(jane, X)"
# This should not throw an exception
for q in p.query(somequery):
pass
for q in p.query(otherquery):
pass
with self.assertRaises(pl.NestedQueryError):
for q in p.query(somequery):
for j in p.query(otherquery):
# This should throw an error, because I opened the second
# query
pass
if __name__ == "__main__":
unittest.main()
|
frederick-masterton/django
|
refs/heads/master
|
django/utils/module_loading.py
|
60
|
from __future__ import absolute_import # Avoid importing `importlib` from this package.
import copy
import imp
from importlib import import_module
import os
import sys
import warnings
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
from django.utils.deprecation import RemovedInDjango19Warning
def import_string(dotted_path):
"""
Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImportError if the import failed.
"""
try:
module_path, class_name = dotted_path.rsplit('.', 1)
except ValueError:
msg = "%s doesn't look like a module path" % dotted_path
six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])
module = import_module(module_path)
try:
return getattr(module, class_name)
except AttributeError:
msg = 'Module "%s" does not define a "%s" attribute/class' % (
dotted_path, class_name)
six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])
def import_by_path(dotted_path, error_prefix=''):
"""
Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImproperlyConfigured if something goes wrong.
"""
warnings.warn(
'import_by_path() has been deprecated. Use import_string() instead.',
RemovedInDjango19Warning, stacklevel=2)
try:
attr = import_string(dotted_path)
except ImportError as e:
msg = '%sError importing module %s: "%s"' % (
error_prefix, dotted_path, e)
six.reraise(ImproperlyConfigured, ImproperlyConfigured(msg),
sys.exc_info()[2])
return attr
def autodiscover_modules(*args, **kwargs):
"""
Auto-discover INSTALLED_APPS modules and fail silently when
not present. This forces an import on them to register any admin bits they
may want.
You may provide a register_to keyword parameter as a way to access a
registry. This register_to object must have a _registry instance variable
to access it.
"""
from django.apps import apps
register_to = kwargs.get('register_to')
for app_config in apps.get_app_configs():
# Attempt to import the app's module.
try:
if register_to:
before_import_registry = copy.copy(register_to._registry)
for module_to_search in args:
import_module('%s.%s' % (app_config.name, module_to_search))
except:
# Reset the model registry to the state before the last import as
# this import will have to reoccur on the next request and this
# could raise NotRegistered and AlreadyRegistered exceptions
# (see #8245).
if register_to:
register_to._registry = before_import_registry
# Decide whether to bubble up this error. If the app just
# doesn't have an admin module, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
if module_has_submodule(app_config.module, module_to_search):
raise
def module_has_submodule(package, module_name):
"""See if 'module' is in 'package'."""
name = ".".join([package.__name__, module_name])
try:
# None indicates a cached miss; see mark_miss() in Python/import.c.
return sys.modules[name] is not None
except KeyError:
pass
try:
package_path = package.__path__ # No __path__, then not a package.
except AttributeError:
# Since the remainder of this function assumes that we're dealing with
# a package (module with a __path__), so if it's not, then bail here.
return False
for finder in sys.meta_path:
if finder.find_module(name, package_path):
return True
for entry in package_path:
try:
# Try the cached finder.
finder = sys.path_importer_cache[entry]
if finder is None:
# Implicit import machinery should be used.
try:
file_, _, _ = imp.find_module(module_name, [entry])
if file_:
file_.close()
return True
except ImportError:
continue
# Else see if the finder knows of a loader.
elif finder.find_module(name):
return True
else:
continue
except KeyError:
# No cached finder, so try and make one.
for hook in sys.path_hooks:
try:
finder = hook(entry)
# XXX Could cache in sys.path_importer_cache
if finder.find_module(name):
return True
else:
# Once a finder is found, stop the search.
break
except ImportError:
# Continue the search for a finder.
continue
else:
# No finder found.
# Try the implicit import machinery if searching a directory.
if os.path.isdir(entry):
try:
file_, _, _ = imp.find_module(module_name, [entry])
if file_:
file_.close()
return True
except ImportError:
pass
# XXX Could insert None or NullImporter
else:
# Exhausted the search, so the module cannot be found.
return False
|
macieksk/sagenb
|
refs/heads/works_with_sage-6.3
|
sagenb/notebook/user.py
|
2
|
# -*- coding: utf-8 -*
import copy
import crypt
import cPickle
import random
import hashlib
import os
SALT = 'aa'
import user_conf
def User_from_basic(basic):
"""
Create a user from a basic data structure.
"""
user = User(basic['username'])
user.__dict__.update(dict([('_' + x, y) for x, y in basic.iteritems()]))
user._conf = user_conf.UserConfiguration_from_basic(user._conf)
return user
def generate_salt():
"""
Returns a salt for use in hashing.
"""
return hex(random.getrandbits(256))[2:-1]
class User(object):
def __init__(self, username, password='', email='', account_type='admin', external_auth=None):
self._username = username
self.set_password(password)
self._email = email
self._email_confirmed = False
if not account_type in ['admin', 'user', 'guest']:
raise ValueError("account type must be one of admin, user, or guest")
self._account_type = account_type
self._external_auth = external_auth
self._conf = user_conf.UserConfiguration()
self._temporary_password = ''
self._is_suspended = False
self._viewable_worksheets = set()
def __eq__(self, other):
if self.__class__ is not other.__class__:
return False
elif self.username() != other.username():
return False
elif self.get_email() != other.get_email():
return False
elif self.conf() != other.conf():
return False
elif self.account_type() != other.account_type():
return False
else:
return True
def __getstate__(self):
d = copy.copy(self.__dict__)
# Some old worksheets have this attribute, which we do *not* want to save.
if d.has_key('history'):
try:
self.save_history()
del d['history']
except Exception, msg:
print msg
print "Unable to dump history of user %s to disk yet."%self._username
return d
def basic(self):
"""
Return a basic Python data structure from which self can be
reconstructed.
"""
d = dict([ (x[1:],y) for x,y in self.__dict__.iteritems() if x[0]=='_'])
d['conf'] = self._conf.basic()
return d
def history_list(self):
try:
return self.history
except AttributeError:
import misc # late import
if misc.notebook is None: return []
history_file = "%s/worksheets/%s/history.sobj"%(misc.notebook.directory(), self._username)
if os.path.exists(history_file):
try:
self.history = cPickle.load(open(history_file))
except:
print "Error loading history for user %s"%self._username
self.history = []
else:
self.history = []
return self.history
def save_history(self):
if not hasattr(self, 'history'):
return
import misc # late import
if misc.notebook is None: return
history_file = "%s/worksheets/%s/history.sobj"%(misc.notebook.directory(), self._username)
try:
#print "Dumping %s history to '%s'"%(self.__username, history_file)
his = cPickle.dumps(self.history)
except AttributeError:
his = cPickle.dumps([])
open(history_file,'w').write(his)
def username(self):
"""
EXAMPLES::
sage: from sagenb.notebook.user import User
sage: User('andrew', 'tEir&tiwk!', 'andrew@matrixstuff.com', 'user').username()
'andrew'
sage: User('sarah', 'Miaasc!', 'sarah@ellipticcurves.org', 'user').username()
'sarah'
sage: User('bob', 'Aisfa!!', 'bob@sagemath.net', 'admin').username()
'bob'
"""
return self._username
def password(self):
"""
Deprecated. Use user_manager object instead.
EXAMPLES::
sage: from sagenb.notebook.user import User
sage: User('andrew', 'tEir&tiwk!', 'andrew@matrixstuff.com', 'user').password() #random
"""
return self._password
def __repr__(self):
return self._username
def conf(self):
"""
EXAMPLES::
sage: from sagenb.notebook.user import User
sage: config = User('bob', 'Aisfa!!', 'bob@sagemath.net', 'admin').conf(); config
Configuration: {}
sage: config['max_history_length']
1000
sage: config['default_system']
'sage'
sage: config['autosave_interval']
3600
sage: config['default_pretty_print']
False
"""
return self._conf
def __getitem__(self, *args):
return self._conf.__getitem__(*args)
def __setitem__(self, *args):
self._conf.__setitem__(*args)
def set_password(self, password, encrypt=True):
"""
EXAMPLES::
sage: from sagenb.notebook.user import User
sage: user = User('bob', 'Aisfa!!', 'bob@sagemath.net', 'admin')
sage: old = user.password()
sage: user.set_password('Crrc!')
sage: old != user.password()
True
"""
if password == '':
self._password = 'x' # won't get as a password -- i.e., this account is closed.
else:
if encrypt:
salt = generate_salt()
self._password = 'sha256${0}${1}'.format(salt,
hashlib.sha256(salt + password).hexdigest())
else:
self._password = password
self._temporary_password = ''
def set_hashed_password(self, password):
"""
EXAMPLES::
sage: from sagenb.notebook.user import User
sage: user = User('bob', 'Aisfa!!', 'bob@sagemath.net', 'admin')
sage: user.set_hashed_password('Crrc!')
sage: user.password()
'Crrc!'
"""
self._password = password
self._temporary_password = ''
def get_email(self):
"""
EXAMPLES::
sage: from sagenb.notebook.user import User
sage: user = User('bob', 'Aisfa!!', 'bob@sagemath.net', 'admin')
sage: user.get_email()
'bob@sagemath.net'
"""
return self._email
def set_email(self, email):
"""
EXAMPLES::
sage: from sagenb.notebook.user import User
sage: user = User('bob', 'Aisfa!!', 'bob@sagemath.net', 'admin')
sage: user.get_email()
'bob@sagemath.net'
sage: user.set_email('bob@gmail.gov')
sage: user.get_email()
'bob@gmail.gov'
"""
self._email = email
def set_email_confirmation(self, value):
"""
EXAMPLES::
sage: from sagenb.notebook.user import User
sage: user = User('bob', 'Aisfa!!', 'bob@sagemath.net', 'admin')
sage: user.is_email_confirmed()
False
sage: user.set_email_confirmation(True)
sage: user.is_email_confirmed()
True
sage: user.set_email_confirmation(False)
sage: user.is_email_confirmed()
False
"""
value = bool(value)
self._email_confirmed = value
def is_email_confirmed(self):
"""
EXAMPLES::
sage: from sagenb.notebook.user import User
sage: user = User('bob', 'Aisfa!!', 'bob@sagemath.net', 'admin')
sage: user.is_email_confirmed()
False
"""
try:
return self._email_confirmed
except AttributeError:
self._email_confirmed = False
return False
def account_type(self):
"""
EXAMPLES::
sage: from sagenb.notebook.user import User
sage: User('A', account_type='admin').account_type()
'admin'
sage: User('B', account_type='user').account_type()
'user'
sage: User('C', account_type='guest').account_type()
'guest'
"""
if self._username == 'admin':
return 'admin'
return self._account_type
def is_admin(self):
"""
EXAMPLES::
sage: from sagenb.notebook.user import User
sage: User('A', account_type='admin').is_admin()
True
sage: User('B', account_type='user').is_admin()
False
"""
return self.account_type() == 'admin'
def grant_admin(self):
if not self.is_guest():
self._account_type = 'admin'
def revoke_admin(self):
if not self.is_guest():
self._account_type = 'user'
def is_guest(self):
"""
EXAMPLES::
sage: from sagenb.notebook.user import User
sage: User('A', account_type='guest').is_guest()
True
sage: User('B', account_type='user').is_guest()
False
"""
return self.account_type() == 'guest'
def is_external(self):
return self.external_auth() is not None
def external_auth(self):
return self._external_auth
def is_suspended(self):
"""
EXAMPLES::
sage: from sagenb.notebook.user import User
sage: user = User('bob', 'Aisfa!!', 'bob@sagemath.net', 'admin')
sage: user.is_suspended()
False
"""
try:
return self._is_suspended
except AttributeError:
return False
def set_suspension(self):
"""
EXAMPLES::
sage: from sagenb.notebook.user import User
sage: user = User('bob', 'Aisfa!!', 'bob@sagemath.net', 'admin')
sage: user.is_suspended()
False
sage: user.set_suspension()
sage: user.is_suspended()
True
sage: user.set_suspension()
sage: user.is_suspended()
False
"""
try:
self._is_suspended = False if self._is_suspended else True
except AttributeError:
self._is_suspended = True
def viewable_worksheets(self):
"""
Returns the (mutable) set of viewable worksheets.
The elements of the set are of the form ('owner',id),
identifying worksheets the user is able to view.
"""
return self._viewable_worksheets
|
EMResearch/EMB
|
refs/heads/master
|
jdk_8_maven/cs/rest/original/languagetool/languagetool-language-modules/ar/src/main/resources/org/languagetool/resource/ar/grammar_csv2xml.py
|
3
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# test.py
# Convert a file which contains grammar rules into grammar xml format for LanguageTool
# The text file contains linguistic rules from book of "Guide of Commons errors" by Marwan Albawab
# الملف معالج يدويا ومجهز للبرمجة
# الملف فيه الأعمدة التالية:
# * style pattern النمطة
# * correction المستبدل
# * note الملاحظة
# * Error الخطأ
# * Correction التصحيح
import sys,re,string
import sys, getopt, os
scriptname = os.path.splitext(os.path.basename(sys.argv[0]))[0]
scriptversion = '0.1'
import pyarabic.araby as araby
AuthorName="Taha Zerrouki"
# Limit of the fields treatment
MAX_LINES_TREATED=1100000;
def usage():
# "Display usage options"
print "(C) CopyLeft 2017, %s"%AuthorName
print "Usage: %s -f filename [OPTIONS]" % scriptname
#"Display usage options"
print "\t[-h | --help]\t\toutputs this usage message"
print "\t[-v | --version]\tprogram version"
print "\t[-f | --file= filename]\tinput file to %s"%scriptname
print "\t[-l | --limit= limit_ number]\tthe limit of treated lines %s"%scriptname
print "\r\nN.B. FILE FORMAT is descripted in README"
print "\r\nThis program is licensed under the GPL License\n"
def grabargs():
# "Grab command-line arguments"
fname = ''
limit=MAX_LINES_TREATED;
if not sys.argv[1:]:
usage()
sys.exit(0)
try:
opts, args = getopt.getopt(sys.argv[1:], "hv:f:l:",
["help", "version", "file=","limit="],)
except getopt.GetoptError:
usage()
sys.exit(0)
for o, val in opts:
if o in ("-h", "--help"):
usage()
sys.exit(0)
if o in ("-v", "--version"):
print scriptversion
sys.exit(0)
if o in ("-f", "--file"):
fname = val
if o in ("-l", "--limit"):
try:
limit = int(val);
except:
limit=MAX_LINES_TREATED;
return fname,limit
def main():
filename,limit= grabargs()
try:
fl=open(filename);
except:
print " Error :No such file or directory: %s" % filename
sys.exit(0)
#abbrevated=False;
field_number=2;
cat_field_number=3;
#skip the first line
line=fl.readline().decode("utf");
line=fl.readline().decode("utf");
text=u""
rule_table=[];
nb_field=5;
while line :
line = line.strip('\n')
if not line.startswith("#"):
liste=line.split("\t");
liste = [x.strip() for x in liste]
if len(liste) >= nb_field:
rule_table.append(liste);
line=fl.readline().decode("utf8");
fl.close();
#limit=MAX_LINES_TREATED;
idrule = 1
for tuple_rule in rule_table[:limit]:
#
rule ={}
rule['pattern'] = tuple_rule[0].strip();
rule['suggestions'] = tuple_rule[1].strip();
rule['message'] = tuple_rule[2].strip();
rule['wrong_example'] = tuple_rule[3].strip();
rule['correct_example'] = tuple_rule[4].strip();
print treat_rule(rule, idrule).encode('utf8')
idrule += 1
def treat_rule(rule, idr):
""" treat rule to be displayed as LT grammar XML
XML format as:
<rule>
<pattern>
<marker><token>ثلاثة</token></marker>
<token postag='NFP'/>
</pattern>
<message>أتقصد <suggestion>ثلاث</suggestion>؟</message>
الاسم المؤنث يسبق بعدد مذكر
<example correction="ثلاثة"><marker>ثلاث</marker>أولاد</example>
<example correction="ثلاث"><marker>ثلاثة</marker>بنات</example>
</rule>
input format is
rule['pattern'] ;
rule['suggestions']
rule['message'] ;
rule['wrong_example'] ;
rule['correct_example'];
"""
pattern, message = treat_pattern(rule['pattern'], rule['suggestions'], rule['message'])
example = treat_example(rule['wrong_example'], rule['correct_example'])
text = u"""\t<rule id ='unsorted%03.d'>
\t\t<pattern>
\t\t%s
\t\t</pattern>
\t\t<message>%s</message>
\t\t%s
\t\t<!-- Wrong: %s -->
\t\t<!--Correct: %s -->
\t</rule>
"""%(idr, pattern, message, example, rule['wrong_example'], rule['correct_example'])
return text
def treat_pattern(pattern, suggestions, message):
"""
Extract infos and fields from input
"""
tokens = araby.tokenize(pattern)
patternxml = u"""<token>%s</token>"""%u"</token>\n\t\t<token>".join(tokens)
sugs = suggestions.split('|')
sugsxml = u"""\t\t<suggestion>%s</suggestion>"""%u"</suggestion>\n\t\t<suggestion>".join(sugs)
messagexml = u"""يفضل أن يقال:\n%s\n%s"""%(sugsxml, message)
return patternxml, messagexml
def treat_example(wrong_example, correct_example):
""" create an element to represent an example of error """
# split tokens
correct_example = correct_example.split('/')[0]
correct_tokens = araby.tokenize(correct_example)
wrong_tokens = araby.tokenize(wrong_example)
correct_word , wrong_tokens = diff(wrong_tokens, correct_tokens)
correct_word = u" ".join(correct_word)
wrong_output = u" ".join(wrong_tokens)
example = u"<example correction='%s'>%s</example>\n"%(correct_word, wrong_output)
return example
def diff(wrong, correct):
""" diff two lists"""
i = 0
# equal parts from the beginning
while i < min(len(wrong),len(correct)) and correct[i] == wrong[i]:
i += 1
start = i
# equal parts from the end
i = len(correct) -1
j = len(wrong) -1
if i >= start and j >= start and correct[i] == wrong[j]:
i -= 1
j -= 1
end_correct = i
end_wrong = j
correct_word = correct[start:end_correct]
wrong = wrong[:start] +['<marker>',] + wrong[start:end_wrong] +['</marker>',]+ wrong[end_wrong:]
return correct_word, wrong
if __name__ == "__main__":
main()
|
oasiswork/odoo
|
refs/heads/8.0
|
addons/website_event/controllers/main.py
|
209
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import babel.dates
import time
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
import werkzeug.urls
from werkzeug.exceptions import NotFound
from openerp import http
from openerp import tools
from openerp.http import request
from openerp.tools.translate import _
from openerp.addons.website.models.website import slug
class website_event(http.Controller):
@http.route(['/event', '/event/page/<int:page>'], type='http', auth="public", website=True)
def events(self, page=1, **searches):
cr, uid, context = request.cr, request.uid, request.context
event_obj = request.registry['event.event']
type_obj = request.registry['event.type']
country_obj = request.registry['res.country']
searches.setdefault('date', 'all')
searches.setdefault('type', 'all')
searches.setdefault('country', 'all')
domain_search = {}
def sdn(date):
return date.strftime('%Y-%m-%d 23:59:59')
def sd(date):
return date.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
today = datetime.today()
dates = [
['all', _('Next Events'), [("date_end", ">", sd(today))], 0],
['today', _('Today'), [
("date_end", ">", sd(today)),
("date_begin", "<", sdn(today))],
0],
['week', _('This Week'), [
("date_end", ">=", sd(today + relativedelta(days=-today.weekday()))),
("date_begin", "<", sdn(today + relativedelta(days=6-today.weekday())))],
0],
['nextweek', _('Next Week'), [
("date_end", ">=", sd(today + relativedelta(days=7-today.weekday()))),
("date_begin", "<", sdn(today + relativedelta(days=13-today.weekday())))],
0],
['month', _('This month'), [
("date_end", ">=", sd(today.replace(day=1))),
("date_begin", "<", (today.replace(day=1) + relativedelta(months=1)).strftime('%Y-%m-%d 00:00:00'))],
0],
['nextmonth', _('Next month'), [
("date_end", ">=", sd(today.replace(day=1) + relativedelta(months=1))),
("date_begin", "<", (today.replace(day=1) + relativedelta(months=2)).strftime('%Y-%m-%d 00:00:00'))],
0],
['old', _('Old Events'), [
("date_end", "<", today.strftime('%Y-%m-%d 00:00:00'))],
0],
]
# search domains
current_date = None
current_type = None
current_country = None
for date in dates:
if searches["date"] == date[0]:
domain_search["date"] = date[2]
if date[0] != 'all':
current_date = date[1]
if searches["type"] != 'all':
current_type = type_obj.browse(cr, uid, int(searches['type']), context=context)
domain_search["type"] = [("type", "=", int(searches["type"]))]
if searches["country"] != 'all' and searches["country"] != 'online':
current_country = country_obj.browse(cr, uid, int(searches['country']), context=context)
domain_search["country"] = ['|', ("country_id", "=", int(searches["country"])), ("country_id", "=", False)]
elif searches["country"] == 'online':
domain_search["country"] = [("country_id", "=", False)]
def dom_without(without):
domain = [('state', "in", ['draft','confirm','done'])]
for key, search in domain_search.items():
if key != without:
domain += search
return domain
# count by domains without self search
for date in dates:
if date[0] <> 'old':
date[3] = event_obj.search(
request.cr, request.uid, dom_without('date') + date[2],
count=True, context=request.context)
domain = dom_without('type')
types = event_obj.read_group(
request.cr, request.uid, domain, ["id", "type"], groupby="type",
orderby="type", context=request.context)
type_count = event_obj.search(request.cr, request.uid, domain,
count=True, context=request.context)
types.insert(0, {
'type_count': type_count,
'type': ("all", _("All Categories"))
})
domain = dom_without('country')
countries = event_obj.read_group(
request.cr, request.uid, domain, ["id", "country_id"],
groupby="country_id", orderby="country_id", context=request.context)
country_id_count = event_obj.search(request.cr, request.uid, domain,
count=True, context=request.context)
countries.insert(0, {
'country_id_count': country_id_count,
'country_id': ("all", _("All Countries"))
})
step = 10 # Number of events per page
event_count = event_obj.search(
request.cr, request.uid, dom_without("none"), count=True,
context=request.context)
pager = request.website.pager(
url="/event",
url_args={'date': searches.get('date'), 'type': searches.get('type'), 'country': searches.get('country')},
total=event_count,
page=page,
step=step,
scope=5)
order = 'website_published desc, date_begin'
if searches.get('date','all') == 'old':
order = 'website_published desc, date_begin desc'
obj_ids = event_obj.search(
request.cr, request.uid, dom_without("none"), limit=step,
offset=pager['offset'], order=order, context=request.context)
events_ids = event_obj.browse(request.cr, request.uid, obj_ids,
context=request.context)
values = {
'current_date': current_date,
'current_country': current_country,
'current_type': current_type,
'event_ids': events_ids,
'dates': dates,
'types': types,
'countries': countries,
'pager': pager,
'searches': searches,
'search_path': "?%s" % werkzeug.url_encode(searches),
}
return request.website.render("website_event.index", values)
@http.route(['/event/<model("event.event"):event>/page/<path:page>'], type='http', auth="public", website=True)
def event_page(self, event, page, **post):
values = {
'event': event,
'main_object': event
}
if '.' not in page:
page = 'website_event.%s' % page
try:
request.website.get_template(page)
except ValueError, e:
# page not found
raise NotFound
return request.website.render(page, values)
@http.route(['/event/<model("event.event"):event>'], type='http', auth="public", website=True)
def event(self, event, **post):
if event.menu_id and event.menu_id.child_id:
target_url = event.menu_id.child_id[0].url
else:
target_url = '/event/%s/register' % str(event.id)
if post.get('enable_editor') == '1':
target_url += '?enable_editor=1'
return request.redirect(target_url);
@http.route(['/event/<model("event.event"):event>/register'], type='http', auth="public", website=True)
def event_register(self, event, **post):
values = {
'event': event,
'main_object': event,
'range': range,
}
return request.website.render("website_event.event_description_full", values)
@http.route('/event/add_event', type='http', auth="user", methods=['POST'], website=True)
def add_event(self, event_name="New Event", **kwargs):
return self._add_event(event_name, request.context, **kwargs)
def _add_event(self, event_name=None, context={}, **kwargs):
if not event_name:
event_name = _("New Event")
Event = request.registry.get('event.event')
date_begin = datetime.today() + timedelta(days=(14))
vals = {
'name': event_name,
'date_begin': date_begin.strftime('%Y-%m-%d'),
'date_end': (date_begin + timedelta(days=(1))).strftime('%Y-%m-%d'),
}
event_id = Event.create(request.cr, request.uid, vals, context=context)
event = Event.browse(request.cr, request.uid, event_id, context=context)
return request.redirect("/event/%s/register?enable_editor=1" % slug(event))
def get_formated_date(self, event):
context = request.context
start_date = datetime.strptime(event.date_begin, tools.DEFAULT_SERVER_DATETIME_FORMAT).date()
end_date = datetime.strptime(event.date_end, tools.DEFAULT_SERVER_DATETIME_FORMAT).date()
month = babel.dates.get_month_names('abbreviated', locale=context.get('lang', 'en_US'))[start_date.month]
return _('%(month)s %(start_day)s%(end_day)s') % {
'month': month,
'start_day': start_date.strftime("%e"),
'end_day': (end_date != start_date and ("-"+end_date.strftime("%e")) or "")
}
@http.route('/event/get_country_event_list', type='http', auth='public', website=True)
def get_country_events(self ,**post):
cr, uid, context, event_ids = request.cr, request.uid, request.context,[]
country_obj = request.registry['res.country']
event_obj = request.registry['event.event']
country_code = request.session['geoip'].get('country_code')
result = {'events':[],'country':False}
if country_code:
country_ids = country_obj.search(cr, uid, [('code', '=', country_code)], context=context)
event_ids = event_obj.search(cr, uid, ['|', ('address_id', '=', None),('country_id.code', '=', country_code),('date_begin','>=', time.strftime('%Y-%m-%d 00:00:00')),('state', '=', 'confirm')], order="date_begin", context=context)
if not event_ids:
event_ids = event_obj.search(cr, uid, [('date_begin','>=', time.strftime('%Y-%m-%d 00:00:00')),('state', '=', 'confirm')], order="date_begin", context=context)
for event in event_obj.browse(cr, uid, event_ids, context=context)[:6]:
if country_code and event.country_id.code == country_code:
result['country'] = country_obj.browse(cr, uid, country_ids[0], context=context)
result['events'].append({
"date": self.get_formated_date(event),
"event": event,
"url": event.website_url})
return request.website.render("website_event.country_events_list",result)
|
droidlabour/git_intgrtn_aws_s3
|
refs/heads/master
|
CreateSSHKey/Crypto/SelfTest/Hash/test_HMAC.py
|
117
|
# -*- coding: utf-8 -*-
#
# SelfTest/Hash/HMAC.py: Self-test for the HMAC module
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test suite for Crypto.Hash.HMAC"""
__revision__ = "$Id$"
from common import dict # For compatibility with Python 2.1 and 2.2
from Crypto.Util.py3compat import *
# This is a list of (key, data, results, description) tuples.
test_data = [
## Test vectors from RFC 2202 ##
# Test that the default hashmod is MD5
('0b' * 16,
'4869205468657265',
dict(default='9294727a3638bb1c13f48ef8158bfc9d'),
'default-is-MD5'),
# Test case 1 (MD5)
('0b' * 16,
'4869205468657265',
dict(MD5='9294727a3638bb1c13f48ef8158bfc9d'),
'RFC 2202 #1-MD5 (HMAC-MD5)'),
# Test case 1 (SHA1)
('0b' * 20,
'4869205468657265',
dict(SHA1='b617318655057264e28bc0b6fb378c8ef146be00'),
'RFC 2202 #1-SHA1 (HMAC-SHA1)'),
# Test case 2
('4a656665',
'7768617420646f2079612077616e7420666f72206e6f7468696e673f',
dict(MD5='750c783e6ab0b503eaa86e310a5db738',
SHA1='effcdf6ae5eb2fa2d27416d5f184df9c259a7c79'),
'RFC 2202 #2 (HMAC-MD5/SHA1)'),
# Test case 3 (MD5)
('aa' * 16,
'dd' * 50,
dict(MD5='56be34521d144c88dbb8c733f0e8b3f6'),
'RFC 2202 #3-MD5 (HMAC-MD5)'),
# Test case 3 (SHA1)
('aa' * 20,
'dd' * 50,
dict(SHA1='125d7342b9ac11cd91a39af48aa17b4f63f175d3'),
'RFC 2202 #3-SHA1 (HMAC-SHA1)'),
# Test case 4
('0102030405060708090a0b0c0d0e0f10111213141516171819',
'cd' * 50,
dict(MD5='697eaf0aca3a3aea3a75164746ffaa79',
SHA1='4c9007f4026250c6bc8414f9bf50c86c2d7235da'),
'RFC 2202 #4 (HMAC-MD5/SHA1)'),
# Test case 5 (MD5)
('0c' * 16,
'546573742057697468205472756e636174696f6e',
dict(MD5='56461ef2342edc00f9bab995690efd4c'),
'RFC 2202 #5-MD5 (HMAC-MD5)'),
# Test case 5 (SHA1)
# NB: We do not implement hash truncation, so we only test the full hash here.
('0c' * 20,
'546573742057697468205472756e636174696f6e',
dict(SHA1='4c1a03424b55e07fe7f27be1d58bb9324a9a5a04'),
'RFC 2202 #5-SHA1 (HMAC-SHA1)'),
# Test case 6
('aa' * 80,
'54657374205573696e67204c6172676572205468616e20426c6f636b2d53697a'
+ '65204b6579202d2048617368204b6579204669727374',
dict(MD5='6b1ab7fe4bd7bf8f0b62e6ce61b9d0cd',
SHA1='aa4ae5e15272d00e95705637ce8a3b55ed402112'),
'RFC 2202 #6 (HMAC-MD5/SHA1)'),
# Test case 7
('aa' * 80,
'54657374205573696e67204c6172676572205468616e20426c6f636b2d53697a'
+ '65204b657920616e64204c6172676572205468616e204f6e6520426c6f636b2d'
+ '53697a652044617461',
dict(MD5='6f630fad67cda0ee1fb1f562db3aa53e',
SHA1='e8e99d0f45237d786d6bbaa7965c7808bbff1a91'),
'RFC 2202 #7 (HMAC-MD5/SHA1)'),
## Test vectors from RFC 4231 ##
# 4.2. Test Case 1
('0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b',
'4869205468657265',
dict(SHA256='''
b0344c61d8db38535ca8afceaf0bf12b
881dc200c9833da726e9376c2e32cff7
'''),
'RFC 4231 #1 (HMAC-SHA256)'),
# 4.3. Test Case 2 - Test with a key shorter than the length of the HMAC
# output.
('4a656665',
'7768617420646f2079612077616e7420666f72206e6f7468696e673f',
dict(SHA256='''
5bdcc146bf60754e6a042426089575c7
5a003f089d2739839dec58b964ec3843
'''),
'RFC 4231 #2 (HMAC-SHA256)'),
# 4.4. Test Case 3 - Test with a combined length of key and data that is
# larger than 64 bytes (= block-size of SHA-224 and SHA-256).
('aa' * 20,
'dd' * 50,
dict(SHA256='''
773ea91e36800e46854db8ebd09181a7
2959098b3ef8c122d9635514ced565fe
'''),
'RFC 4231 #3 (HMAC-SHA256)'),
# 4.5. Test Case 4 - Test with a combined length of key and data that is
# larger than 64 bytes (= block-size of SHA-224 and SHA-256).
('0102030405060708090a0b0c0d0e0f10111213141516171819',
'cd' * 50,
dict(SHA256='''
82558a389a443c0ea4cc819899f2083a
85f0faa3e578f8077a2e3ff46729665b
'''),
'RFC 4231 #4 (HMAC-SHA256)'),
# 4.6. Test Case 5 - Test with a truncation of output to 128 bits.
#
# Not included because we do not implement hash truncation.
#
# 4.7. Test Case 6 - Test with a key larger than 128 bytes (= block-size of
# SHA-384 and SHA-512).
('aa' * 131,
'54657374205573696e67204c6172676572205468616e20426c6f636b2d53697a'
+ '65204b6579202d2048617368204b6579204669727374',
dict(SHA256='''
60e431591ee0b67f0d8a26aacbf5b77f
8e0bc6213728c5140546040f0ee37f54
'''),
'RFC 4231 #6 (HMAC-SHA256)'),
# 4.8. Test Case 7 - Test with a key and data that is larger than 128 bytes
# (= block-size of SHA-384 and SHA-512).
('aa' * 131,
'5468697320697320612074657374207573696e672061206c6172676572207468'
+ '616e20626c6f636b2d73697a65206b657920616e642061206c61726765722074'
+ '68616e20626c6f636b2d73697a6520646174612e20546865206b6579206e6565'
+ '647320746f20626520686173686564206265666f7265206265696e6720757365'
+ '642062792074686520484d414320616c676f726974686d2e',
dict(SHA256='''
9b09ffa71b942fcb27635fbcd5b0e944
bfdc63644f0713938a7f51535c3a35e2
'''),
'RFC 4231 #7 (HMAC-SHA256)'),
]
hashlib_test_data = [
# Test case 8 (SHA224)
('4a656665',
'7768617420646f2079612077616e74'
+ '20666f72206e6f7468696e673f',
dict(SHA224='a30e01098bc6dbbf45690f3a7e9e6d0f8bbea2a39e6148008fd05e44'),
'RFC 4634 8.4 SHA224 (HMAC-SHA224)'),
# Test case 9 (SHA384)
('4a656665',
'7768617420646f2079612077616e74'
+ '20666f72206e6f7468696e673f',
dict(SHA384='af45d2e376484031617f78d2b58a6b1b9c7ef464f5a01b47e42ec3736322445e8e2240ca5e69e2c78b3239ecfab21649'),
'RFC 4634 8.4 SHA384 (HMAC-SHA384)'),
# Test case 10 (SHA512)
('4a656665',
'7768617420646f2079612077616e74'
+ '20666f72206e6f7468696e673f',
dict(SHA512='164b7a7bfcf819e2e395fbe73b56e0a387bd64222e831fd610270cd7ea2505549758bf75c05a994a6d034f65f8f0e6fdcaeab1a34d4a6b4b636e070a38bce737'),
'RFC 4634 8.4 SHA512 (HMAC-SHA512)'),
]
def get_tests(config={}):
global test_data
from Crypto.Hash import HMAC, MD5, SHA as SHA1, SHA256
from common import make_mac_tests
hashmods = dict(MD5=MD5, SHA1=SHA1, SHA256=SHA256, default=None)
try:
from Crypto.Hash import SHA224, SHA384, SHA512
hashmods.update(dict(SHA224=SHA224, SHA384=SHA384, SHA512=SHA512))
test_data += hashlib_test_data
except ImportError:
import sys
sys.stderr.write("SelfTest: warning: not testing HMAC-SHA224/384/512 (not available)\n")
return make_mac_tests(HMAC, "HMAC", test_data, hashmods)
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
|
yaybu/touchdown
|
refs/heads/master
|
touchdown/aws/iam/__init__.py
|
1
|
# Copyright 2014 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .instance_profile import InstanceProfile
from .role import Role
from .server_certificate import ServerCertificate
__all__ = ["InstanceProfile", "Role", "ServerCertificate"]
|
the-zebulan/CodeWars
|
refs/heads/master
|
katas/kyu_7/dna_gc_content.py
|
1
|
def gc_content(seq):
if not seq:
return 0
gc_cnt = total_chars = 0
for a in seq:
if a in 'GC':
gc_cnt += 1
total_chars += 1
return round(100.0 * gc_cnt / total_chars, 2)
|
kikocorreoso/brython
|
refs/heads/master
|
www/src/Lib/test/support/testresult.py
|
2
|
'''Test runner and result class for the regression test suite.
'''
import functools
import io
import sys
import time
import traceback
import unittest
# Brython: xml is not available
# import xml.etree.ElementTree as ET
from datetime import datetime
class RegressionTestResult(unittest.TextTestResult):
separator1 = '=' * 70 + '\n'
separator2 = '-' * 70 + '\n'
def __init__(self, stream, descriptions, verbosity):
super().__init__(stream=stream, descriptions=descriptions, verbosity=0)
self.buffer = True
self.__suite = ET.Element('testsuite')
self.__suite.set('start', datetime.utcnow().isoformat(' '))
self.__e = None
self.__start_time = None
self.__results = []
self.__verbose = bool(verbosity)
@classmethod
def __getId(cls, test):
try:
test_id = test.id
except AttributeError:
return str(test)
try:
return test_id()
except TypeError:
return str(test_id)
return repr(test)
def startTest(self, test):
super().startTest(test)
self.__e = e = ET.SubElement(self.__suite, 'testcase')
self.__start_time = time.perf_counter()
if self.__verbose:
self.stream.write(f'{self.getDescription(test)} ... ')
self.stream.flush()
def _add_result(self, test, capture=False, **args):
e = self.__e
self.__e = None
if e is None:
return
e.set('name', args.pop('name', self.__getId(test)))
e.set('status', args.pop('status', 'run'))
e.set('result', args.pop('result', 'completed'))
if self.__start_time:
e.set('time', f'{time.perf_counter() - self.__start_time:0.6f}')
if capture:
if self._stdout_buffer is not None:
stdout = self._stdout_buffer.getvalue().rstrip()
ET.SubElement(e, 'system-out').text = stdout
if self._stderr_buffer is not None:
stderr = self._stderr_buffer.getvalue().rstrip()
ET.SubElement(e, 'system-err').text = stderr
for k, v in args.items():
if not k or not v:
continue
e2 = ET.SubElement(e, k)
if hasattr(v, 'items'):
for k2, v2 in v.items():
if k2:
e2.set(k2, str(v2))
else:
e2.text = str(v2)
else:
e2.text = str(v)
def __write(self, c, word):
if self.__verbose:
self.stream.write(f'{word}\n')
@classmethod
def __makeErrorDict(cls, err_type, err_value, err_tb):
if isinstance(err_type, type):
if err_type.__module__ == 'builtins':
typename = err_type.__name__
else:
typename = f'{err_type.__module__}.{err_type.__name__}'
else:
typename = repr(err_type)
msg = traceback.format_exception(err_type, err_value, None)
tb = traceback.format_exception(err_type, err_value, err_tb)
return {
'type': typename,
'message': ''.join(msg),
'': ''.join(tb),
}
def addError(self, test, err):
self._add_result(test, True, error=self.__makeErrorDict(*err))
super().addError(test, err)
self.__write('E', 'ERROR')
def addExpectedFailure(self, test, err):
self._add_result(test, True, output=self.__makeErrorDict(*err))
super().addExpectedFailure(test, err)
self.__write('x', 'expected failure')
def addFailure(self, test, err):
self._add_result(test, True, failure=self.__makeErrorDict(*err))
super().addFailure(test, err)
self.__write('F', 'FAIL')
def addSkip(self, test, reason):
self._add_result(test, skipped=reason)
super().addSkip(test, reason)
self.__write('S', f'skipped {reason!r}')
def addSuccess(self, test):
self._add_result(test)
super().addSuccess(test)
self.__write('.', 'ok')
def addUnexpectedSuccess(self, test):
self._add_result(test, outcome='UNEXPECTED_SUCCESS')
super().addUnexpectedSuccess(test)
self.__write('u', 'unexpected success')
def printErrors(self):
if self.__verbose:
self.stream.write('\n')
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavor, errors):
for test, err in errors:
self.stream.write(self.separator1)
self.stream.write(f'{flavor}: {self.getDescription(test)}\n')
self.stream.write(self.separator2)
self.stream.write('%s\n' % err)
def get_xml_element(self):
e = self.__suite
e.set('tests', str(self.testsRun))
e.set('errors', str(len(self.errors)))
e.set('failures', str(len(self.failures)))
return e
class QuietRegressionTestRunner:
def __init__(self, stream, buffer=False):
self.result = RegressionTestResult(stream, None, 0)
self.result.buffer = buffer
def run(self, test):
test(self.result)
return self.result
def get_test_runner_class(verbosity, buffer=False):
if verbosity:
return functools.partial(unittest.TextTestRunner,
resultclass=RegressionTestResult,
buffer=buffer,
verbosity=verbosity)
return functools.partial(QuietRegressionTestRunner, buffer=buffer)
def get_test_runner(stream, verbosity, capture_output=False):
return get_test_runner_class(verbosity, capture_output)(stream)
if __name__ == '__main__':
class TestTests(unittest.TestCase):
def test_pass(self):
pass
def test_pass_slow(self):
time.sleep(1.0)
def test_fail(self):
print('stdout', file=sys.stdout)
print('stderr', file=sys.stderr)
self.fail('failure message')
def test_error(self):
print('stdout', file=sys.stdout)
print('stderr', file=sys.stderr)
raise RuntimeError('error message')
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestTests))
stream = io.StringIO()
runner_cls = get_test_runner_class(sum(a == '-v' for a in sys.argv))
runner = runner_cls(sys.stdout)
result = runner.run(suite)
print('Output:', stream.getvalue())
print('XML: ', end='')
for s in ET.tostringlist(result.get_xml_element()):
print(s.decode(), end='')
print()
|
cromambo/pythonIntro
|
refs/heads/master
|
fizzbuzz/test_fizzbuzz.py
|
1
|
from fizzbuzz import fizzbuzz
import pytest
def test_fizzbuzz_2_9():
assert fizzbuzz(9, 2, 9) == 'buzz'
assert fizzbuzz(17, 2, 9) == 17
assert fizzbuzz(18, 2, 9) == 'fizzbuzz'
assert fizzbuzz(19, 2, 9) == 19
assert fizzbuzz(20, 2, 9) == 'fizz'
def test_fizzbuzz_divzero():
assert fizzbuzz(20, 0, 9) == 'error'
assert fizzbuzz(20, 9, 0) == 'error'
assert fizzbuzz(20, 0, 0) == 'error'
assert fizzbuzz(0, 0, 0) == 'error'
assert fizzbuzz(0, 3, 0) == 'error'
assert fizzbuzz(0, 0, 5) == 'error'
assert fizzbuzz(0, 3, 5) == 'fizzbuzz'
def test_fizzbuzz_3_5_nondivisible():
nonfizzbuzzNumList = [1, 2, 4, 7, -1, -2, -4, -7]
for num in nonfizzbuzzNumList:
assert fizzbuzz(num, 3, 5) == num
def test_fizzbuzz_3_5_fizz():
fizzNumList = [3, 6, 9, 12, 18, 21, -3, -6, -9]
for num in fizzNumList:
assert fizzbuzz(num, 3, 5) == 'fizz'
def test_fizzbuzz_3_5_buzz():
buzzNumList = [5, 10, 20, 25, 35, -5, -10, -20, -25, -35]
for num in buzzNumList:
assert fizzbuzz(num, 3, 5) == 'buzz'
def test_fizzbuzz_3_5_fizzbuzz():
fizzbuzzNumList = [15, 30, 45, 60, 150, 0, -15, -30, -45, -60, -150]
for num in fizzbuzzNumList:
assert fizzbuzz(num, 3, 5) == 'fizzbuzz'
def test_fizzbuzz_non_numeric_input():
dummylist = []
junkValueList = ['a', 'string', '3', dummylist]
with pytest.raises(TypeError):
for val in junkValueList:
fizzbuzz(val, 3, 5)
|
cortedeltimo/SickRage
|
refs/heads/master
|
lib/fanart/items.py
|
61
|
import json
import os
import requests
from fanart.core import Request
from fanart.immutable import Immutable
class LeafItem(Immutable):
KEY = NotImplemented
@Immutable.mutablemethod
def __init__(self, id, url, likes):
self.id = int(id)
self.url = url
self.likes = int(likes)
self._content = None
@classmethod
def from_dict(cls, resource):
return cls(**dict([(str(k), v) for k, v in resource.iteritems()]))
@classmethod
def extract(cls, resource):
return [cls.from_dict(i) for i in resource.get(cls.KEY, {})]
@Immutable.mutablemethod
def content(self):
if not self._content:
self._content = requests.get(self.url).content
return self._content
def __str__(self):
return self.url
class ResourceItem(Immutable):
WS = NotImplemented
request_cls = Request
@classmethod
def from_dict(cls, map):
raise NotImplementedError
@classmethod
def get(cls, id):
map = cls.request_cls(
apikey=os.environ.get('FANART_APIKEY'),
id=id,
ws=cls.WS
).response()
return cls.from_dict(map)
def json(self, **kw):
return json.dumps(
self,
default=lambda o: dict([(k, v) for k, v in o.__dict__.items() if not k.startswith('_')]),
**kw
)
class CollectableItem(Immutable):
@classmethod
def from_dict(cls, key, map):
raise NotImplementedError
@classmethod
def collection_from_dict(cls, map):
return [cls.from_dict(k, v) for k, v in map.iteritems()]
|
javierriveracastro/descargador
|
refs/heads/master
|
trunk/Descargador.py
|
1
|
# coding=utf-8
"""
Descargardor de Umbria, programa para descargar partidas de:
http://www.comunidadumbria.com
(c) 2013 Javier Rivera
"""
import sys
from PyQt4 import QtGui, QtCore
from UmbriaDescargar.DescargarUi import Ui_MainWindow
from UmbriaDescargar.Umbria import Partida, PartidaPruebas
from UmbriaDescargar.UmbriaTex import GeneradorText
# TODO: Icono del programa
# TODO: Control de errores de conexión
# TODO: Refactorizar la pantalla de progreso, cambiar por algo más elegante.
# Como por ejemplo un metodo de Principal y un callback. Y con más detalle
# TODO: Opcion de no descargar notas
# TODO: Limpiar los temporales
class Principal(QtGui.QMainWindow, object):
"""
Ventana principal de la aplicacion
"""
def __init__(self, pruebas=False):
QtGui.QMainWindow.__init__(self)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# noinspection PyUnresolvedReferences
self.ui.pushConectar.clicked.connect(self.cargar_partida)
# noinspection PyUnresolvedReferences
self.ui.btnPdf.clicked.connect(self.genera_pdf)
self.partida = None
self.pruebas = pruebas
# noinspection PyArgumentList
def cargar_partida(self):
"""
Se conecta a Umbria y carga una partida.
"""
if not self.pruebas:
try:
self.partida = Partida(self.ui.editCodigoPartida.text(),
self.ui.editUsuario.text(),
self.ui.editContrasena.text())
except IOError:
# noinspection PyCallByClass,PyArgumentList,PyTypeChecker
QtGui.QMessageBox.critical(
self, self.windowTitle(),
"Error: No se ha podido contactar con Umbria")
self.partida = None
return
except AttributeError:
# noinspection PyCallByClass,PyArgumentList,PyTypeChecker
QtGui.QMessageBox.critical(
self, self.windowTitle(),
u"No puedo entender la respuesta de Umrbia, el código de "
u"la partida o los datos de usuario y contraseña "
u"estan probablemente mal")
self.partida = None
return
else:
self.partida = PartidaPruebas("", "", "")
self.ui.labelTituloPartida.setText(self.partida.titulo)
self.ui.listWidget.clear()
for escena in self.partida.escenas:
novo_item = QtGui.QListWidgetItem(escena.titulo)
novo_item.setCheckState(QtCore.Qt.Checked)
self.ui.listWidget.addItem(novo_item)
self.ui.btnPdf.setEnabled(True)
def genera_pdf(self):
"""
Genera un pdf de la partida seleccionada
"""
# Buscar las partidas seleccionadas
self.partida.escenas_seleccionadas = []
for escena in self.partida.escenas:
item = self.ui.listWidget.findItems(escena.titulo,
QtCore.Qt.MatchExactly)
if item[0].checkState():
self.partida.escenas_seleccionadas.append(escena)
print(self.partida.escenas_seleccionadas)
# Pedir una ruta donde guardar la partida
# noinspection PyCallByClass,PyTypeChecker
nome_dir = QtGui.QFileDialog.getExistingDirectory(
self, "Guardar partida", "~", QtGui.QFileDialog.ShowDirsOnly)
# LLamar a proceso de generacion de verdad
GeneradorText(nome_dir, self.partida)
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
ventana = Principal(pruebas="--test" in sys.argv)
ventana.show()
sys.exit(app.exec_())
|
ondrokrc/gramps
|
refs/heads/master
|
gramps/gen/plug/report/_paper.py
|
2
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from ...const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from ..docgen import PaperSize
from ...const import PAPERSIZE
#-------------------------------------------------------------------------
#
# Try to abstract SAX1 from SAX2
#
#-------------------------------------------------------------------------
try:
from xml.sax import make_parser, handler, SAXParseException
except:
from _xmlplus.sax import make_parser, handler, SAXParseException
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
paper_sizes = []
#-------------------------------------------------------------------------
#
# PageSizeParser
#
#-------------------------------------------------------------------------
class PageSizeParser(handler.ContentHandler):
"""Parses the XML file and builds the list of page sizes"""
def __init__(self, paper_list):
handler.ContentHandler.__init__(self)
self.paper_list = paper_list
self.locator = None
def setDocumentLocator(self, locator):
self.locator = locator
def startElement(self, tag, attrs):
if tag == "page":
name = attrs['name']
height = float(attrs['height'])
width = float(attrs['width'])
self.paper_list.append(PaperSize(name, height, width))
#-------------------------------------------------------------------------
#
# Parse XML file. If it fails, use the default
#
#-------------------------------------------------------------------------
try:
parser = make_parser()
parser.setContentHandler(PageSizeParser(paper_sizes))
the_file = open(PAPERSIZE)
parser.parse(the_file)
the_file.close()
paper_sizes.append(PaperSize("Custom Size", -1, -1)) # always in English
except (IOError, OSError, SAXParseException):
paper_sizes = [
PaperSize("Letter",27.94,21.59),
PaperSize("Legal",35.56,21.59),
PaperSize("A0",118.9,84.1),
PaperSize("A1",84.1,59.4),
PaperSize("A2",59.4,42.0),
PaperSize("A3",42.0,29.7),
PaperSize("A4",29.7,21.0),
PaperSize("A5",21.0,14.8),
PaperSize("B0",141.4,100.0),
PaperSize("B1",100.0,70.7),
PaperSize("B2",70.7,50.0),
PaperSize("B3",50.0,35.3),
PaperSize("B4",35.3,25.0),
PaperSize("B5",25.0,17.6),
PaperSize("B6",17.6,12.5),
PaperSize("B",43.18,27.94),
PaperSize("C",55.88,43.18),
PaperSize("D",86.36, 55.88),
PaperSize("E",111.76,86.36),
PaperSize("Custom Size",-1,-1) # always in English
]
|
flotre/sickbeard-vfvo
|
refs/heads/master
|
lib/html5lib/sanitizer.py
|
805
|
from __future__ import absolute_import, division, unicode_literals
import re
from xml.sax.saxutils import escape, unescape
from .tokenizer import HTMLTokenizer
from .constants import tokenTypes
class HTMLSanitizerMixin(object):
""" sanitization of XHTML+MathML+SVG and of inline style attributes."""
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video']
mathml_elements = ['maction', 'math', 'merror', 'mfrac', 'mi',
'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom',
'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', 'msub',
'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none']
svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'clipPath', 'circle', 'defs', 'desc', 'ellipse',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph',
'mpath', 'path', 'polygon', 'polyline', 'radialGradient', 'rect',
'set', 'stop', 'svg', 'switch', 'text', 'title', 'tspan', 'use']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color',
'cols', 'colspan', 'compact', 'contenteditable', 'controls', 'coords',
'data', 'datafld', 'datapagesize', 'datasrc', 'datetime', 'default',
'delay', 'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end',
'face', 'for', 'form', 'frame', 'galleryimg', 'gutter', 'headers',
'height', 'hidefocus', 'hidden', 'high', 'href', 'hreflang', 'hspace',
'icon', 'id', 'inputmode', 'ismap', 'keytype', 'label', 'leftspacing',
'lang', 'list', 'longdesc', 'loop', 'loopcount', 'loopend',
'loopstart', 'low', 'lowsrc', 'max', 'maxlength', 'media', 'method',
'min', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'open',
'optimum', 'pattern', 'ping', 'point-size', 'poster', 'pqg', 'preload',
'prompt', 'radiogroup', 'readonly', 'rel', 'repeat-max', 'repeat-min',
'replace', 'required', 'rev', 'rightspacing', 'rows', 'rowspan',
'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start',
'step', 'style', 'summary', 'suppress', 'tabindex', 'target',
'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
'width', 'wrap', 'xml:lang']
mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'equalcolumns', 'equalrows', 'fence',
'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', 'lspace',
'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', 'maxsize',
'minsize', 'other', 'rowalign', 'rowalign', 'rowalign', 'rowlines',
'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show',
'xlink:type', 'xmlns', 'xmlns:xlink']
svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'clip-path', 'color', 'color-rendering', 'content', 'cx',
'cy', 'd', 'dx', 'dy', 'descent', 'display', 'dur', 'end', 'fill',
'fill-opacity', 'fill-rule', 'font-family', 'font-size',
'font-stretch', 'font-style', 'font-variant', 'font-weight', 'from',
'fx', 'fy', 'g1', 'g2', 'glyph-name', 'gradientUnits', 'hanging',
'height', 'horiz-adv-x', 'horiz-origin-x', 'id', 'ideographic', 'k',
'keyPoints', 'keySplines', 'keyTimes', 'lang', 'marker-end',
'marker-mid', 'marker-start', 'markerHeight', 'markerUnits',
'markerWidth', 'mathematical', 'max', 'min', 'name', 'offset',
'opacity', 'orient', 'origin', 'overline-position',
'overline-thickness', 'panose-1', 'path', 'pathLength', 'points',
'preserveAspectRatio', 'r', 'refX', 'refY', 'repeatCount',
'repeatDur', 'requiredExtensions', 'requiredFeatures', 'restart',
'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', 'stop-color',
'stop-opacity', 'strikethrough-position', 'strikethrough-thickness',
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap',
'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity',
'stroke-width', 'systemLanguage', 'target', 'text-anchor', 'to',
'transform', 'type', 'u1', 'u2', 'underline-position',
'underline-thickness', 'unicode', 'unicode-range', 'units-per-em',
'values', 'version', 'viewBox', 'visibility', 'width', 'widths', 'x',
'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y',
'y1', 'y2', 'zoomAndPan']
attr_val_is_uri = ['href', 'src', 'cite', 'action', 'longdesc', 'poster',
'xlink:href', 'xml:base']
svg_attr_val_allows_ref = ['clip-path', 'color-profile', 'cursor', 'fill',
'filter', 'marker', 'marker-start', 'marker-mid', 'marker-end',
'mask', 'stroke']
svg_allow_local_href = ['altGlyph', 'animate', 'animateColor',
'animateMotion', 'animateTransform', 'cursor', 'feImage', 'filter',
'linearGradient', 'pattern', 'radialGradient', 'textpath', 'tref',
'set', 'use']
acceptable_css_properties = ['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width']
acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow']
acceptable_svg_properties = ['fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity']
acceptable_protocols = ['ed2k', 'ftp', 'http', 'https', 'irc',
'mailto', 'news', 'gopher', 'nntp', 'telnet', 'webcal',
'xmpp', 'callto', 'feed', 'urn', 'aim', 'rsync', 'tag',
'ssh', 'sftp', 'rtsp', 'afs']
# subclasses may define their own versions of these constants
allowed_elements = acceptable_elements + mathml_elements + svg_elements
allowed_attributes = acceptable_attributes + mathml_attributes + svg_attributes
allowed_css_properties = acceptable_css_properties
allowed_css_keywords = acceptable_css_keywords
allowed_svg_properties = acceptable_svg_properties
allowed_protocols = acceptable_protocols
# Sanitize the +html+, escaping all elements not in ALLOWED_ELEMENTS, and
# stripping out all # attributes not in ALLOWED_ATTRIBUTES. Style
# attributes are parsed, and a restricted set, # specified by
# ALLOWED_CSS_PROPERTIES and ALLOWED_CSS_KEYWORDS, are allowed through.
# attributes in ATTR_VAL_IS_URI are scanned, and only URI schemes specified
# in ALLOWED_PROTOCOLS are allowed.
#
# sanitize_html('<script> do_nasty_stuff() </script>')
# => <script> do_nasty_stuff() </script>
# sanitize_html('<a href="javascript: sucker();">Click here for $100</a>')
# => <a>Click here for $100</a>
def sanitize_token(self, token):
# accommodate filters which use token_type differently
token_type = token["type"]
if token_type in list(tokenTypes.keys()):
token_type = tokenTypes[token_type]
if token_type in (tokenTypes["StartTag"], tokenTypes["EndTag"],
tokenTypes["EmptyTag"]):
if token["name"] in self.allowed_elements:
return self.allowed_token(token, token_type)
else:
return self.disallowed_token(token, token_type)
elif token_type == tokenTypes["Comment"]:
pass
else:
return token
def allowed_token(self, token, token_type):
if "data" in token:
attrs = dict([(name, val) for name, val in
token["data"][::-1]
if name in self.allowed_attributes])
for attr in self.attr_val_is_uri:
if attr not in attrs:
continue
val_unescaped = re.sub("[`\000-\040\177-\240\s]+", '',
unescape(attrs[attr])).lower()
# remove replacement characters from unescaped characters
val_unescaped = val_unescaped.replace("\ufffd", "")
if (re.match("^[a-z0-9][-+.a-z0-9]*:", val_unescaped) and
(val_unescaped.split(':')[0] not in
self.allowed_protocols)):
del attrs[attr]
for attr in self.svg_attr_val_allows_ref:
if attr in attrs:
attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)',
' ',
unescape(attrs[attr]))
if (token["name"] in self.svg_allow_local_href and
'xlink:href' in attrs and re.search('^\s*[^#\s].*',
attrs['xlink:href'])):
del attrs['xlink:href']
if 'style' in attrs:
attrs['style'] = self.sanitize_css(attrs['style'])
token["data"] = [[name, val] for name, val in list(attrs.items())]
return token
def disallowed_token(self, token, token_type):
if token_type == tokenTypes["EndTag"]:
token["data"] = "</%s>" % token["name"]
elif token["data"]:
attrs = ''.join([' %s="%s"' % (k, escape(v)) for k, v in token["data"]])
token["data"] = "<%s%s>" % (token["name"], attrs)
else:
token["data"] = "<%s>" % token["name"]
if token.get("selfClosing"):
token["data"] = token["data"][:-1] + "/>"
if token["type"] in list(tokenTypes.keys()):
token["type"] = "Characters"
else:
token["type"] = tokenTypes["Characters"]
del token["name"]
return token
def sanitize_css(self, style):
# disallow urls
style = re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
return ''
if not re.match("^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style):
return ''
clean = []
for prop, value in re.findall("([-\w]+)\s*:\s*([^:;]*)", style):
if not value:
continue
if prop.lower() in self.allowed_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background', 'border', 'margin',
'padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not re.match("^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$", keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif prop.lower() in self.allowed_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
class HTMLSanitizer(HTMLTokenizer, HTMLSanitizerMixin):
def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
lowercaseElementName=False, lowercaseAttrName=False, parser=None):
# Change case matching defaults as we only output lowercase html anyway
# This solution doesn't seem ideal...
HTMLTokenizer.__init__(self, stream, encoding, parseMeta, useChardet,
lowercaseElementName, lowercaseAttrName, parser=parser)
def __iter__(self):
for token in HTMLTokenizer.__iter__(self):
token = self.sanitize_token(token)
if token:
yield token
|
a10networks/a10sdk-python
|
refs/heads/master
|
a10sdk/core/version/__init__.py
|
12133432
| |
ismk/Python-Examples
|
refs/heads/master
|
learn python the hard way/ex40.py
|
12133432
| |
j-carl/ansible
|
refs/heads/devel
|
test/units/galaxy/__init__.py
|
12133432
| |
cwolferh/heat-scratch
|
refs/heads/master
|
heat/api/openstack/v1/views/__init__.py
|
12133432
| |
StackPointCloud/libcloud
|
refs/heads/trunk
|
docs/examples/dns/list_zone_records.py
|
64
|
from libcloud.dns.providers import get_driver
from libcloud.dns.types import Provider
CREDENTIALS_ZERIGO = ('email', 'api key')
ZONE_ID = 'example.myzone.com'
Cls = get_driver(Provider.ZERIGO)
driver = Cls(*CREDENTIALS_ZERIGO)
zone = driver.get_zone(zone_id=ZONE_ID)
records = driver.list_records(zone=zone)
|
hifly/OpenUpgrade
|
refs/heads/8.0
|
addons/fetchmail/fetchmail.py
|
64
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import time
from imaplib import IMAP4
from imaplib import IMAP4_SSL
from poplib import POP3
from poplib import POP3_SSL
try:
import cStringIO as StringIO
except ImportError:
import StringIO
import zipfile
import base64
from openerp import addons
from openerp.osv import fields, osv
from openerp import tools, api
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
MAX_POP_MESSAGES = 50
class fetchmail_server(osv.osv):
"""Incoming POP/IMAP mail server account"""
_name = 'fetchmail.server'
_description = "POP/IMAP Server"
_order = 'priority'
_columns = {
'name':fields.char('Name', required=True, readonly=False),
'active':fields.boolean('Active', required=False),
'state':fields.selection([
('draft', 'Not Confirmed'),
('done', 'Confirmed'),
], 'Status', select=True, readonly=True, copy=False),
'server' : fields.char('Server Name', readonly=True, help="Hostname or IP of the mail server", states={'draft':[('readonly', False)]}),
'port' : fields.integer('Port', readonly=True, states={'draft':[('readonly', False)]}),
'type':fields.selection([
('pop', 'POP Server'),
('imap', 'IMAP Server'),
('local', 'Local Server'),
], 'Server Type', select=True, required=True, readonly=False),
'is_ssl':fields.boolean('SSL/TLS', help="Connections are encrypted with SSL/TLS through a dedicated port (default: IMAPS=993, POP3S=995)"),
'attach':fields.boolean('Keep Attachments', help="Whether attachments should be downloaded. "
"If not enabled, incoming emails will be stripped of any attachments before being processed"),
'original':fields.boolean('Keep Original', help="Whether a full original copy of each email should be kept for reference"
"and attached to each processed message. This will usually double the size of your message database."),
'date': fields.datetime('Last Fetch Date', readonly=True),
'user' : fields.char('Username', readonly=True, states={'draft':[('readonly', False)]}),
'password' : fields.char('Password', readonly=True, states={'draft':[('readonly', False)]}),
'action_id':fields.many2one('ir.actions.server', 'Server Action', help="Optional custom server action to trigger for each incoming mail, "
"on the record that was created or updated by this mail"),
'object_id': fields.many2one('ir.model', "Create a New Record", help="Process each incoming mail as part of a conversation "
"corresponding to this document type. This will create "
"new documents for new conversations, or attach follow-up "
"emails to the existing conversations (documents)."),
'priority': fields.integer('Server Priority', readonly=True, states={'draft':[('readonly', False)]}, help="Defines the order of processing, "
"lower values mean higher priority"),
'message_ids': fields.one2many('mail.mail', 'fetchmail_server_id', 'Messages', readonly=True),
'configuration' : fields.text('Configuration', readonly=True),
'script' : fields.char('Script', readonly=True),
}
_defaults = {
'state': "draft",
'type': "pop",
'active': True,
'priority': 5,
'attach': True,
'script': '/mail/static/scripts/openerp_mailgate.py',
}
def onchange_server_type(self, cr, uid, ids, server_type=False, ssl=False, object_id=False):
port = 0
values = {}
if server_type == 'pop':
port = ssl and 995 or 110
elif server_type == 'imap':
port = ssl and 993 or 143
else:
values['server'] = ''
values['port'] = port
conf = {
'dbname' : cr.dbname,
'uid' : uid,
'model' : 'MODELNAME',
}
if object_id:
m = self.pool.get('ir.model')
r = m.read(cr,uid,[object_id],['model'])
conf['model']=r[0]['model']
values['configuration'] = """Use the below script with the following command line options with your Mail Transport Agent (MTA)
openerp_mailgate.py --host=HOSTNAME --port=PORT -u %(uid)d -p PASSWORD -d %(dbname)s
Example configuration for the postfix mta running locally:
/etc/postfix/virtual_aliases:
@youdomain openerp_mailgate@localhost
/etc/aliases:
openerp_mailgate: "|/path/to/openerp-mailgate.py --host=localhost -u %(uid)d -p PASSWORD -d %(dbname)s"
""" % conf
return {'value':values}
def set_draft(self, cr, uid, ids, context=None):
self.write(cr, uid, ids , {'state':'draft'})
return True
@api.cr_uid_ids_context
def connect(self, cr, uid, server_id, context=None):
if isinstance(server_id, (list,tuple)):
server_id = server_id[0]
server = self.browse(cr, uid, server_id, context)
if server.type == 'imap':
if server.is_ssl:
connection = IMAP4_SSL(server.server, int(server.port))
else:
connection = IMAP4(server.server, int(server.port))
connection.login(server.user, server.password)
elif server.type == 'pop':
if server.is_ssl:
connection = POP3_SSL(server.server, int(server.port))
else:
connection = POP3(server.server, int(server.port))
#TODO: use this to remove only unread messages
#connection.user("recent:"+server.user)
connection.user(server.user)
connection.pass_(server.password)
return connection
def button_confirm_login(self, cr, uid, ids, context=None):
if context is None:
context = {}
for server in self.browse(cr, uid, ids, context=context):
try:
connection = server.connect()
server.write({'state':'done'})
except Exception, e:
_logger.exception("Failed to connect to %s server %s.", server.type, server.name)
raise osv.except_osv(_("Connection test failed!"), _("Here is what we got instead:\n %s.") % tools.ustr(e))
finally:
try:
if connection:
if server.type == 'imap':
connection.close()
elif server.type == 'pop':
connection.quit()
except Exception:
# ignored, just a consequence of the previous exception
pass
return True
def _fetch_mails(self, cr, uid, ids=False, context=None):
if not ids:
ids = self.search(cr, uid, [('state','=','done'),('type','in',['pop','imap'])])
return self.fetch_mail(cr, uid, ids, context=context)
def fetch_mail(self, cr, uid, ids, context=None):
"""WARNING: meant for cron usage only - will commit() after each email!"""
context = dict(context or {})
context['fetchmail_cron_running'] = True
mail_thread = self.pool.get('mail.thread')
action_pool = self.pool.get('ir.actions.server')
for server in self.browse(cr, uid, ids, context=context):
_logger.info('start checking for new emails on %s server %s', server.type, server.name)
context.update({'fetchmail_server_id': server.id, 'server_type': server.type})
count, failed = 0, 0
imap_server = False
pop_server = False
if server.type == 'imap':
try:
imap_server = server.connect()
imap_server.select()
result, data = imap_server.search(None, '(UNSEEN)')
for num in data[0].split():
res_id = None
result, data = imap_server.fetch(num, '(RFC822)')
imap_server.store(num, '-FLAGS', '\\Seen')
try:
res_id = mail_thread.message_process(cr, uid, server.object_id.model,
data[0][1],
save_original=server.original,
strip_attachments=(not server.attach),
context=context)
except Exception:
_logger.exception('Failed to process mail from %s server %s.', server.type, server.name)
failed += 1
if res_id and server.action_id:
action_pool.run(cr, uid, [server.action_id.id], {'active_id': res_id, 'active_ids': [res_id], 'active_model': context.get("thread_model", server.object_id.model)})
imap_server.store(num, '+FLAGS', '\\Seen')
cr.commit()
count += 1
_logger.info("Fetched %d email(s) on %s server %s; %d succeeded, %d failed.", count, server.type, server.name, (count - failed), failed)
except Exception:
_logger.exception("General failure when trying to fetch mail from %s server %s.", server.type, server.name)
finally:
if imap_server:
imap_server.close()
imap_server.logout()
elif server.type == 'pop':
try:
while True:
pop_server = server.connect()
(numMsgs, totalSize) = pop_server.stat()
pop_server.list()
for num in range(1, min(MAX_POP_MESSAGES, numMsgs) + 1):
(header, msges, octets) = pop_server.retr(num)
msg = '\n'.join(msges)
res_id = None
try:
res_id = mail_thread.message_process(cr, uid, server.object_id.model,
msg,
save_original=server.original,
strip_attachments=(not server.attach),
context=context)
pop_server.dele(num)
except Exception:
_logger.exception('Failed to process mail from %s server %s.', server.type, server.name)
failed += 1
if res_id and server.action_id:
action_pool.run(cr, uid, [server.action_id.id], {'active_id': res_id, 'active_ids': [res_id], 'active_model': context.get("thread_model", server.object_id.model)})
cr.commit()
if numMsgs < MAX_POP_MESSAGES:
break
pop_server.quit()
_logger.info("Fetched %d email(s) on %s server %s; %d succeeded, %d failed.", numMsgs, server.type, server.name, (numMsgs - failed), failed)
except Exception:
_logger.exception("General failure when trying to fetch mail from %s server %s.", server.type, server.name)
finally:
if pop_server:
pop_server.quit()
server.write({'date': time.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)})
return True
def _update_cron(self, cr, uid, context=None):
if context and context.get('fetchmail_cron_running'):
return
try:
cron = self.pool['ir.model.data'].get_object(
cr, uid, 'fetchmail', 'ir_cron_mail_gateway_action', context=context)
except ValueError:
# Nevermind if default cron cannot be found
return
# Enabled/Disable cron based on the number of 'done' server of type pop or imap
cron.toggle(model=self._name, domain=[('state','=','done'), ('type','in',['pop','imap'])])
def create(self, cr, uid, values, context=None):
res = super(fetchmail_server, self).create(cr, uid, values, context=context)
self._update_cron(cr, uid, context=context)
return res
def write(self, cr, uid, ids, values, context=None):
res = super(fetchmail_server, self).write(cr, uid, ids, values, context=context)
self._update_cron(cr, uid, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
res = super(fetchmail_server, self).unlink(cr, uid, ids, context=context)
self._update_cron(cr, uid, context=context)
return res
class mail_mail(osv.osv):
_inherit = "mail.mail"
_columns = {
'fetchmail_server_id': fields.many2one('fetchmail.server', "Inbound Mail Server",
readonly=True,
select=True,
oldname='server_id'),
}
def create(self, cr, uid, values, context=None):
if context is None:
context = {}
fetchmail_server_id = context.get('fetchmail_server_id')
if fetchmail_server_id:
values['fetchmail_server_id'] = fetchmail_server_id
res = super(mail_mail, self).create(cr, uid, values, context=context)
return res
def write(self, cr, uid, ids, values, context=None):
if context is None:
context = {}
fetchmail_server_id = context.get('fetchmail_server_id')
if fetchmail_server_id:
values['fetchmail_server_id'] = fetchmail_server_id
res = super(mail_mail, self).write(cr, uid, ids, values, context=context)
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
SerCeMan/intellij-community
|
refs/heads/master
|
python/helpers/pydev/pydevd_attach_to_process/linux/lldb_threads_settrace.py
|
88
|
# This file is meant to be run inside lldb as a command after
# the attach_linux.dylib dll has already been loaded to settrace for all threads.
def __lldb_init_module(debugger, internal_dict):
# Command Initialization code goes here
# print('Startup LLDB in Python!')
import lldb
try:
show_debug_info = 1
is_debug = 0
options = lldb.SBExpressionOptions()
options.SetFetchDynamicValue()
options.SetTryAllThreads(run_others=False)
options.SetTimeoutInMicroSeconds(timeout=10000000)
target = debugger.GetSelectedTarget()
if target:
process = target.GetProcess()
if process:
for thread in process:
# Get the first frame
# print('Thread %s, suspended %s\n'%(thread, thread.IsStopped()))
if internal_dict.get('_thread_%d' % thread.GetThreadID(), False):
process.SetSelectedThread(thread)
if not thread.IsStopped():
# thread.Suspend()
error = process.Stop()
frame = thread.GetSelectedFrame()
if frame.GetFunctionName() == '__select':
# print('We are in __select')
# Step over select, otherwise evaluating expression there can terminate thread
thread.StepOver()
frame = thread.GetSelectedFrame()
print('Will settrace in: %s' % (frame,))
for f in thread:
print(f)
res = frame.EvaluateExpression("(int) SetSysTraceFunc(%s, %s)" % (
show_debug_info, is_debug), options)
error = res.GetError()
if error:
print(error)
thread.Resume()
except:
import traceback;traceback.print_exc()
|
adaur/SickRage
|
refs/heads/master
|
lib/github/PullRequestMergeStatus.py
|
74
|
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2013 martinqt <m.ki2@laposte.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
class PullRequestMergeStatus(github.GithubObject.NonCompletableGithubObject):
"""
This class represents PullRequestMergeStatuss. The reference can be found here http://developer.github.com/v3/pulls/#get-if-a-pull-request-has-been-merged
"""
@property
def merged(self):
"""
:type: bool
"""
return self._merged.value
@property
def message(self):
"""
:type: string
"""
return self._message.value
@property
def sha(self):
"""
:type: string
"""
return self._sha.value
def _initAttributes(self):
self._merged = github.GithubObject.NotSet
self._message = github.GithubObject.NotSet
self._sha = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "merged" in attributes: # pragma no branch
self._merged = self._makeBoolAttribute(attributes["merged"])
if "message" in attributes: # pragma no branch
self._message = self._makeStringAttribute(attributes["message"])
if "sha" in attributes: # pragma no branch
self._sha = self._makeStringAttribute(attributes["sha"])
|
CiscoSystems/vespa
|
refs/heads/master
|
neutron/plugins/midonet/common/net_util.py
|
20
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2013 Midokura PTE LTD
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Ryu Ishimoto, Midokura Japan KK
from neutron.common import constants
def subnet_str(cidr):
"""Convert the cidr string to x.x.x.x_y format
:param cidr: CIDR in x.x.x.x/y format
"""
if cidr is None:
return None
return cidr.replace("/", "_")
def net_addr(addr):
"""Get network address prefix and length from a given address."""
if addr is None:
return (None, None)
nw_addr, nw_len = addr.split('/')
nw_len = int(nw_len)
return nw_addr, nw_len
def get_ethertype_value(ethertype):
"""Convert string representation of ethertype to the numerical."""
if ethertype is None:
return None
mapping = {
'ipv4': 0x0800,
'ipv6': 0x86DD,
'arp': 0x806
}
return mapping.get(ethertype.lower())
def get_protocol_value(protocol):
"""Convert string representation of protocol to the numerical."""
if protocol is None:
return None
if isinstance(protocol, int):
return protocol
mapping = {
constants.PROTO_NAME_TCP: constants.PROTO_NUM_TCP,
constants.PROTO_NAME_UDP: constants.PROTO_NUM_UDP,
constants.PROTO_NAME_ICMP: constants.PROTO_NUM_ICMP
}
return mapping.get(protocol.lower())
|
TeutoNet-Netzdienste/ansible
|
refs/heads/devel
|
lib/ansible/__init__.py
|
92
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
__version__ = '2.0.0'
__author__ = 'Michael DeHaan'
|
paranoiasystem/Patterns
|
refs/heads/master
|
codice/prototype/cookie/cookie.py
|
1
|
import copy
#
# Prototype Class
#
class Cookie:
def __init__(self, name):
self.name = name
def clone(self):
return copy.deepcopy(self)
#
# Concrete Prototypes to clone
#
class CoconutCookie(Cookie):
def __init__(self):
Cookie.__init__(self, 'Coconut')
#
# Client Class
#
class CookieMachine:
def __init__(self, cookie):
self.cookie = cookie
def make_cookie(self):
return self.cookie.clone()
if __name__ == '__main__':
prot = CoconutCookie()
cm = CookieMachine(prot)
for i in range(10):
temp_cookie = cm.make_cookie()
|
WSDC-NITWarangal/django
|
refs/heads/master
|
tests/migrations/test_deprecated_fields.py
|
504
|
from django.core.management import call_command
from django.test import override_settings
from .test_base import MigrationTestBase
class Tests(MigrationTestBase):
"""
Deprecated model fields should still be usable in historic migrations.
"""
@override_settings(MIGRATION_MODULES={"migrations": "migrations.deprecated_field_migrations"})
def test_migrate(self):
# Make sure no tables are created
self.assertTableNotExists("migrations_ipaddressfield")
# Run migration
call_command("migrate", verbosity=0)
# Make sure the right tables exist
self.assertTableExists("migrations_ipaddressfield")
# Unmigrate everything
call_command("migrate", "migrations", "zero", verbosity=0)
# Make sure it's all gone
self.assertTableNotExists("migrations_ipaddressfield")
|
JordanReiter/django-mailer
|
refs/heads/master
|
mailer/backend.py
|
104
|
from django.core.mail.backends.base import BaseEmailBackend
from mailer.models import Message
class DbBackend(BaseEmailBackend):
def send_messages(self, email_messages):
num_sent = 0
for email in email_messages:
msg = Message()
msg.email = email
msg.save()
num_sent += 1
return num_sent
|
nikolas/edx-platform
|
refs/heads/master
|
lms/djangoapps/edxnotes/__init__.py
|
12133432
| |
lvgilmore/Luke
|
refs/heads/master
|
Luke/API/__init__.py
|
12133432
| |
fcole90/ubuntu-make
|
refs/heads/master
|
tests/small/__init__.py
|
12133432
| |
scriptnull/coala
|
refs/heads/master
|
coalib/collecting/__init__.py
|
12133432
| |
philanthropy-u/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/course_groups/management/__init__.py
|
12133432
| |
oswalpalash/remoteusermgmt
|
refs/heads/master
|
RUM/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/__init__.py
|
1730
|
"""A collection of modules for building different kinds of tree from
HTML documents.
To create a treebuilder for a new type of tree, you need to do
implement several things:
1) A set of classes for various types of elements: Document, Doctype,
Comment, Element. These must implement the interface of
_base.treebuilders.Node (although comment nodes have a different
signature for their constructor, see treebuilders.etree.Comment)
Textual content may also be implemented as another node type, or not, as
your tree implementation requires.
2) A treebuilder object (called TreeBuilder by convention) that
inherits from treebuilders._base.TreeBuilder. This has 4 required attributes:
documentClass - the class to use for the bottommost node of a document
elementClass - the class to use for HTML Elements
commentClass - the class to use for comments
doctypeClass - the class to use for doctypes
It also has one required method:
getDocument - Returns the root node of the complete document tree
3) If you wish to run the unit tests, you must also create a
testSerializer method on your treebuilder which accepts a node and
returns a string containing Node and its children serialized according
to the format used in the unittests
"""
from __future__ import absolute_import, division, unicode_literals
from ..utils import default_etree
treeBuilderCache = {}
def getTreeBuilder(treeType, implementation=None, **kwargs):
"""Get a TreeBuilder class for various types of tree with built-in support
treeType - the name of the tree type required (case-insensitive). Supported
values are:
"dom" - A generic builder for DOM implementations, defaulting to
a xml.dom.minidom based implementation.
"etree" - A generic builder for tree implementations exposing an
ElementTree-like interface, defaulting to
xml.etree.cElementTree if available and
xml.etree.ElementTree if not.
"lxml" - A etree-based builder for lxml.etree, handling
limitations of lxml's implementation.
implementation - (Currently applies to the "etree" and "dom" tree types). A
module implementing the tree type e.g.
xml.etree.ElementTree or xml.etree.cElementTree."""
treeType = treeType.lower()
if treeType not in treeBuilderCache:
if treeType == "dom":
from . import dom
# Come up with a sane default (pref. from the stdlib)
if implementation is None:
from xml.dom import minidom
implementation = minidom
# NEVER cache here, caching is done in the dom submodule
return dom.getDomModule(implementation, **kwargs).TreeBuilder
elif treeType == "lxml":
from . import etree_lxml
treeBuilderCache[treeType] = etree_lxml.TreeBuilder
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeBuilder
else:
raise ValueError("""Unrecognised treebuilder "%s" """ % treeType)
return treeBuilderCache.get(treeType)
|
BT-rmartin/odoo
|
refs/heads/BT_2900
|
openerp/addons/test_new_api/tests/test_attributes.py
|
394
|
# -*- coding: utf-8 -*-
from openerp.tests import common
ANSWER_TO_ULTIMATE_QUESTION = 42
class TestAttributes(common.TransactionCase):
def test_we_can_add_attributes(self):
Model = self.env['test_new_api.category']
instance = Model.create({'name': 'Foo'})
# assign an unknown attribute
instance.unknown = ANSWER_TO_ULTIMATE_QUESTION
# Does the attribute exist in the instance of the model ?
self.assertTrue(hasattr(instance, 'unknown'))
# Is it the right type ?
self.assertIsInstance(instance.unknown, (int, long))
# Is it the right value, in case of, we don't know ;-)
self.assertEqual(instance.unknown, ANSWER_TO_ULTIMATE_QUESTION)
# We are paranoiac !
self.assertEqual(getattr(instance, 'unknown'), ANSWER_TO_ULTIMATE_QUESTION)
|
pwarren/AGDeviceControl
|
refs/heads/master
|
agdevicecontrol/thirdparty/site-packages/win32/twisted/test/test_paths.py
|
3
|
import os, time, pickle
from twisted.python import filepath
from twisted.python.runtime import platform
from twisted.trial import unittest
class FilePathTestCase(unittest.TestCase):
f1content = "file 1"
f2content = "file 2"
def setUp(self):
self.now = time.time()
cmn = self.mktemp()
os.mkdir(cmn)
os.mkdir(os.path.join(cmn,"sub1"))
f = open(os.path.join(cmn, "file1"),"wb")
f.write(self.f1content)
f = open(os.path.join(cmn, "sub1", "file2"),"wb")
f.write(self.f2content)
os.mkdir(os.path.join(cmn, 'sub3'))
f = open(os.path.join(cmn, "sub3", "file3.ext1"),"wb")
f = open(os.path.join(cmn, "sub3", "file3.ext2"),"wb")
f = open(os.path.join(cmn, "sub3", "file3.ext3"),"wb")
self.path = filepath.FilePath(cmn)
def testGetAndSet(self):
content = 'newcontent'
self.path.child('new').setContent(content)
newcontent = self.path.child('new').getContent()
self.failUnlessEqual(content, newcontent)
content = 'content'
self.path.child('new').setContent(content, '.tmp')
newcontent = self.path.child('new').getContent()
self.failUnlessEqual(content, newcontent)
if platform.getType() == 'win32':
testGetAndSet.todo = "os.rename in FilePath.setContent doesn't work too well on Windows"
def testValidSubdir(self):
sub1 = self.path.child('sub1')
self.failUnless(sub1.exists(),
"This directory does exist.")
self.failUnless(sub1.isdir(),
"It's a directory.")
self.failUnless(not sub1.isfile(),
"It's a directory.")
self.failUnless(not sub1.islink(),
"It's a directory.")
self.failUnlessEqual(sub1.listdir(),
['file2'])
def testMultiExt(self):
f3 = self.path.child('sub3').child('file3')
exts = '.foo','.bar', 'ext1','ext2','ext3'
self.failIf(f3.siblingExtensionSearch(*exts))
f3e = f3.siblingExtension(".foo")
f3e.touch()
self.failIf(not f3.siblingExtensionSearch(*exts).exists())
self.failIf(not f3.siblingExtensionSearch('*').exists())
f3e.remove()
self.failIf(f3.siblingExtensionSearch(*exts))
def testInvalidSubdir(self):
sub2 = self.path.child('sub2')
self.failIf(sub2.exists(),
"This directory does not exist.")
def testValidFiles(self):
f1 = self.path.child('file1')
self.failUnlessEqual(f1.open().read(), self.f1content)
f2 = self.path.child('sub1').child('file2')
self.failUnlessEqual(f2.open().read(), self.f2content)
def testPreauthChild(self):
fp = filepath.FilePath('.')
fp.preauthChild('foo/bar')
self.assertRaises(filepath.InsecurePath, fp.child, '/foo')
def testStatCache(self):
p = self.path.child('stattest')
p.touch()
self.failUnlessEqual(p.getsize(), 0)
self.failUnlessEqual(abs(p.getmtime() - time.time()) // 20, 0)
self.failUnlessEqual(abs(p.getctime() - time.time()) // 20, 0)
self.failUnlessEqual(abs(p.getatime() - time.time()) // 20, 0)
self.failUnlessEqual(p.exists(), True)
self.failUnlessEqual(p.exists(), True)
p.remove()
# test caching
self.failUnlessEqual(p.exists(), True)
p.restat(reraise=False)
self.failUnlessEqual(p.exists(), False)
self.failUnlessEqual(p.islink(), False)
self.failUnlessEqual(p.isdir(), False)
self.failUnlessEqual(p.isfile(), False)
def testPersist(self):
newpath = pickle.loads(pickle.dumps(self.path))
self.failUnlessEqual(self.path.__class__, newpath.__class__)
self.failUnlessEqual(self.path.path, newpath.path)
def testInsecureUNIX(self):
self.assertRaises(filepath.InsecurePath, self.path.child, "..")
self.assertRaises(filepath.InsecurePath, self.path.child, "/etc")
self.assertRaises(filepath.InsecurePath, self.path.child, "../..")
def testInsecureWin32(self):
self.assertRaises(filepath.InsecurePath, self.path.child, r"..\..")
self.assertRaises(filepath.InsecurePath, self.path.child, r"C:randomfile")
if platform.getType() != 'win32':
testInsecureWin32.skip = "Consider yourself lucky."
else:
testInsecureWin32.todo = "Hrm, broken"
def testInsecureWin32Whacky(self):
"""Windows has 'special' filenames like NUL and CON and COM1 and LPR
and PRN and ... god knows what else. They can be located anywhere in
the filesystem. For obvious reasons, we do not wish to normally permit
access to these.
"""
self.assertRaises(filepath.InsecurePath, self.path.child, "CON")
self.assertRaises(filepath.InsecurePath, self.path.child, "C:CON")
self.assertRaises(filepath.InsecurePath, self.path.child, r"C:\CON")
if platform.getType() != 'win32':
testInsecureWin32Whacky.skip = "Consider yourself lucky."
else:
testInsecureWin32Whacky.todo = "Broken, no checking for whacky devices"
from twisted.python import urlpath
class URLPathTestCase(unittest.TestCase):
def setUp(self):
self.path = urlpath.URLPath.fromString("http://example.com/foo/bar?yes=no&no=yes#footer")
def testStringConversion(self):
self.assertEquals(str(self.path), "http://example.com/foo/bar?yes=no&no=yes#footer")
def testChildString(self):
self.assertEquals(str(self.path.child('hello')), "http://example.com/foo/bar/hello")
self.assertEquals(str(self.path.child('hello').child('')), "http://example.com/foo/bar/hello/")
def testSiblingString(self):
self.assertEquals(str(self.path.sibling('baz')), 'http://example.com/foo/baz')
# The sibling of http://example.com/foo/bar/
# is http://example.comf/foo/bar/baz
# because really we are constructing a sibling of
# http://example.com/foo/bar/index.html
self.assertEquals(str(self.path.child('').sibling('baz')), 'http://example.com/foo/bar/baz')
def testParentString(self):
# parent should be equivalent to '..'
# 'foo' is the current directory, '/' is the parent directory
self.assertEquals(str(self.path.parent()), 'http://example.com/')
self.assertEquals(str(self.path.child('').parent()), 'http://example.com/foo/')
self.assertEquals(str(self.path.child('baz').parent()), 'http://example.com/foo/')
self.assertEquals(str(self.path.parent().parent().parent().parent().parent()), 'http://example.com/')
def testHereString(self):
# here should be equivalent to '.'
self.assertEquals(str(self.path.here()), 'http://example.com/foo/')
self.assertEquals(str(self.path.child('').here()), 'http://example.com/foo/bar/')
|
GhostofGoes/ADLES
|
refs/heads/master
|
adles/vsphere/__main__.py
|
2
|
#!/usr/bin/env python3
import argparse
import sys
from adles.utils import setup_logging
from adles.vsphere.vsphere_scripts import VSPHERE_SCRIPTS
def main():
# Parse CLI arguments
# TODO: generalize this between other scripts and adles main?
args = parse_args()
# Set if console output should be colored
colors = (False if args.no_color else True)
# Configure logging
setup_logging(filename='vsphere_scripts.log', colors=colors,
console_verbose=args.verbose)
script = args.script(args.server_info)
script.run()
def parse_args():
parser = argparse.ArgumentParser(
prog='vsphere', formatter_class=argparse.RawDescriptionHelpFormatter,
description='Single-purpose CLI scripts for interacting with vSphere'
)
subparsers = parser.add_subparsers(title='vSphere scripts')
for s in VSPHERE_SCRIPTS:
subp = subparsers.add_parser(name=s.name, help=s.__doc__)
subp.set_defaults(script=s)
subp.add_argument('--version', action='version',
version=s.get_ver())
subp.add_argument('-f', '--server-info', type=str,
default=None, metavar='FILE',
help='Name of JSON file with vSphere '
'server connection information')
subp.add_argument('-v', '--verbose', action='store_true',
help='Emit debugging logs to terminal')
subp.add_argument('--no-color', action='store_true',
help='Do not color terminal output')
# Default to printing usage if no arguments are provided
if len(sys.argv) == 1:
parser.print_usage()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
main()
|
slogan621/tscharts
|
refs/heads/master
|
tools/covidinsert/covidinsert.py
|
1
|
#(C) Copyright Syd Logan 2021
#(C) Copyright Thousand Smiles Foundation 2021
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import getopt, sys
import json
from tschartslib.service.serviceapi import ServiceAPI
from tschartslib.tscharts.tscharts import Login, Logout
from tschartslib.covidvac.covidvac import CreateCOVIDVac, GetCOVIDVac
class UpdateCOVIDVacList():
def __init__(self):
login = Login(host, port, username, password)
ret = login.send(timeout=30)
global token
token = ret[1]["token"]
def parseCOVIDVacFile(self, filename):
ret = []
try:
file = open(filename, "r")
except:
print("ERROR: File doesn't exist")
sys.exit()
line = 0
for x in file:
line = line + 1
x = x.strip()
if len(x) == 0:
continue
if x[0] == "#": # comment
continue
ret.append(x)
file.close()
print("{}".format(ret))
return ret
def uploadCOVIDVacs(self, path):
data = self.parseCOVIDVacFile(path)
for x in data:
val = {}
val["name"] = x
c = CreateCOVIDVac(host, port, token, val)
ret = c.send(timeout = 30)
if ret[0] != 200:
print("unable to create COVIDVac entry {}".format(val))
def usage():
print("covidinsert [-h host] [-p port] [-u username] [-w password] -f path")
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "h:p:u:w:f:")
except getopt.GetoptError as err:
print str(err)
usage()
sys.exit(2)
global host
host = "127.0.0.1"
global port
port = 8000
global username
username = None
global password
password = None
path = None
for o, a in opts:
if o == "-h":
host = a
elif o == "-p":
port = int(a)
elif o == "-u":
username = a
elif o == "-w":
password = a
elif o == "-f":
path = a
else:
assert False, "unhandled option"
x = UpdateCOVIDVacList()
x.uploadCOVIDVacs(path)
if __name__ == "__main__":
main()
|
jkbockstael/adventofcode-2015
|
refs/heads/master
|
day07_part1.py
|
1
|
# Advent of Code 2015 - Day 7 - Some Assembly Required
# http://adventofcode.com/2015/day/7
import sys
GATE_INPUT = 0
GATE_NOT = 1
GATE_AND = 2
GATE_OR = 3
GATE_RSHIFT = 4
GATE_LSHIFT = 5
GATE_CONNECT = 6
# Parse the input and return a map
def parse_input(s):
circuit = {}
for line in s:
gate, output = line.strip().split(' -> ')
if gate.isdigit():
operator = GATE_INPUT
operand_a = int(gate)
operand_b = None
elif 'NOT' in gate:
operator = GATE_NOT
operand_a = gate[4:]
operand_b = None
elif 'AND' in gate:
operator = GATE_AND
operand_a, operand_b = gate.split(' AND ')
elif 'OR' in gate:
operator = GATE_OR
operand_a, operand_b = gate.split(' OR ')
elif 'RSHIFT' in gate:
operator = GATE_RSHIFT
operand_a, operand_b = gate.split(' RSHIFT ')
operand_b = int(operand_b)
elif 'LSHIFT' in gate:
operator = GATE_LSHIFT
operand_a, operand_b = gate.split(' LSHIFT ')
operand_b = int(operand_b)
else: # eg a -> b
operator = GATE_CONNECT
operand_a = gate
operand_b = None
circuit[output] = (operator, operand_a, operand_b)
return circuit
# Solve the circuit for a given wire
def solve_for(circuit, memo, wire):
# Some gates have direct inputs
if wire.isdigit():
return int(wire)
if not wire in memo:
operator, operand_a, operand_b = circuit[wire]
if operator == GATE_INPUT:
result = operand_a
elif operator == GATE_NOT:
result = ~solve_for(circuit, memo, operand_a)
elif operator == GATE_AND:
result = solve_for(circuit, memo, operand_a) & solve_for(circuit, memo, operand_b)
elif operator == GATE_OR:
result = solve_for(circuit, memo, operand_a) | solve_for(circuit, memo, operand_b)
elif operator == GATE_RSHIFT:
result = solve_for(circuit, memo, operand_a) >> operand_b
elif operator == GATE_LSHIFT:
result = solve_for(circuit, memo, operand_a) << operand_b
elif operator == GATE_CONNECT:
result = solve_for(circuit, memo, operand_a)
memo[wire] = result
return memo[wire]
# Cast as unsigned 16-bit integer
def u16(integer):
return integer & 0xFFFF
# Main
if __name__ == '__main__':
circuit = parse_input(sys.stdin.readlines())
print(u16(solve_for(circuit, {}, 'a')))
|
j08lue/poppy
|
refs/heads/master
|
scripts/save_annual_max_timeseries.py
|
1
|
#!/usr/bin/env python
from __future__ import print_function
import numpy as np
import pandas as pd
import argparse
import glob
import poppy.metrics
regionlims = {
'Global' : dict(lonlim=(-90,90),latlim=(-180,180)),
'Atlantic' : dict(lonlim=(-80,40),latlim=(-70,70)),
'PolarNorthAtlantic': dict(lonlim=(-80,60),latlim=(60,90)),
'LabradorSea': dict(latlim=(50,60), lonlim=(-50,-40)),
'NorthAtlantic' : dict(lonlim=(-80,20),latlim=(0,65)),
'SubpolarNorthAtlantic': dict(lonlim=(-60,0),latlim=(40,65)),
'SubtropicalNorthAtlantic': dict(lonlim=(-100,0),latlim=(10,30)),
'TreguierNorthAtlantic': dict(lonlim=(-100,20),latlim=(10,50)),
'SubtropicalSouthAtlantic': dict(lonlim=(-50,20),latlim=(-40,-10)),
'BrazilEastCoast20S40W': dict(lonlim=(-45,-20),latlim=(-30,-10)),
'EquatorialAtlantic': dict(lonlim=(-55,15),latlim=(-10,10)),
'SubtropicalSouthPacific': dict(lonlim=(150,280),latlim=(-40,-10)),
}
def get_annual_max(files, varn, grid, region):
ts = poppy.metrics.get_timeseries(
files,
varn=varn,
grid=grid,
reducefunc=np.nanmax,
**regionlims[region])
tssmooth = pd.rolling_max(ts, window=12, center=True)
return tssmooth
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Save annual maximum time series")
parser.add_argument('files', nargs='+', help='Files to read and concatenate')
parser.add_argument('-v', '--varn', help='Variable name')
parser.add_argument('-g', '--grid', help='Grid', choices=['T', 'U'])
parser.add_argument('-r', '--region', help='Region name', choices=regionlims.keys())
parser.add_argument('-o', '--outfile', help='Output file', default='max.h5')
args = parser.parse_args()
files = sorted(args.files)
if len(files) == 1:
files = sorted(glob.glob(files[0]))
ts = get_annual_max(files, varn=args.varn, grid=args.grid, region=args.region)
ts.to_hdf(args.outfile, key=args.varn+'_'+args.region, mode='w', format='table')
print('Data saved to {}'.format(args.outfile))
|
burnpanck/chaco
|
refs/heads/master
|
chaco/polar_line_renderer.py
|
3
|
""" Defines the PolarLineRenderer class.
"""
from __future__ import with_statement
# Major library imports
from numpy import array, cos, pi, sin, transpose
# Enthought library imports
from enable.api import black_color_trait, LineStyle
from traits.api import Float
# Local, relative imports
from abstract_plot_renderer import AbstractPlotRenderer
class PolarLineRenderer(AbstractPlotRenderer):
""" A renderer for polar line plots.
"""
#------------------------------------------------------------------------
# Appearance-related traits
#------------------------------------------------------------------------
# The color of the origin axis.
origin_axis_color_ = (0,0,0,1)
# The width of the origin axis.
origin_axis_width = 2.0
# The origin axis is visible.
origin_axis_visible=True
# The grid is visible.
grid_visible= True
# The orientation of the plot is horizontal; for any other value, it is
# transposed
orientation = 'h'
# The color of the line.
color = black_color_trait
# The width of the line.
line_width = Float(1.0)
# The style of the line.
line_style = LineStyle("solid")
# The style of the grid lines.
grid_style= LineStyle("dot")
def _gather_points(self):
"""
Collects the data points that are within the plot bounds and caches them
"""
# This is just a stub for now. We should really find the lines only
# inside the screen range here.
x = self.index.get_data()
y = self.value.get_data()
rad= min(self.width/2.0,self.height/2.0)
sx = x*rad+ self.x + self.width/2.0
sy = y*rad+ self.y + self.height/2.0
points = transpose(array((sx,sy)))
self._cached_data_pts = points
self._cache_valid = True
return
def _data_changed(self):
self._cache_valid = False
return
def _update_mappers(self):
#Dunno if there is anything else to do here
self._cache_valid = False
def _render(self, gc, points):
""" Actually draw the plot.
"""
with gc:
gc.set_antialias(True)
self._draw_default_axes(gc)
self._draw_default_grid(gc)
if len(points)>0:
gc.clip_to_rect(self.x, self.y, self.width, self.height)
gc.set_stroke_color(self.color_)
gc.set_line_width(self.line_width)
gc.set_line_dash(self.line_style_)
gc.begin_path()
gc.lines(points)
gc.stroke_path()
return
def map_screen(self, data_array):
""" Maps an array of data points into screen space and returns it as
an array.
Implements the AbstractPlotRenderer interface.
"""
if len(data_array) == 0:
return []
elif len(data_array) == 1:
xtmp, ytmp = transpose(data_array)
x_ary = xtmp
y_ary = ytmp
else:
x_ary, y_ary = transpose(data_array)
sx = self.index_mapper.map_screen(x_ary)
sy = self.value_mapper.map_screen(y_ary)
if self.orientation == 'h':
return transpose(array((sx, sy)))
else:
return transpose(array((sy, sx)))
def map_data(self, screen_pt):
""" Maps a screen space point into the "index" space of the plot.
Implements the AbstractPlotRenderer interface.
"""
if self.orientation == 'h':
x, y = screen_pt
else:
y,x = screen_pt
return array((self.index_mapper.map_data(x),
self.value_mapper.map_data(y)))
def _downsample(self):
return self.map_screen(self._cached_data_pts)
def _draw_plot(self, *args, **kw):
""" Draws the 'plot' layer.
"""
# Simple compatibility with new-style rendering loop
return self._draw_component(*args, **kw)
def _draw_component(self, gc, view_bounds=None, mode='normal'):
""" Renders the component.
"""
self._gather_points()
self._render(gc, self._cached_data_pts)
def _bounds_changed(self, old, new):
super(PolarLineRenderer, self)._bounds_changed(old, new)
self._update_mappers()
def _bounds_items_changed(self, event):
super(PolarLineRenderer, self)._bounds_items_changed(event)
self._update_mappers()
def _draw_default_axes(self, gc):
if not self.origin_axis_visible:
return
with gc:
gc.set_stroke_color(self.origin_axis_color_)
gc.set_line_width(self.origin_axis_width)
gc.set_line_dash(self.grid_style_)
x_data,y_data= transpose(self._cached_data_pts)
x_center=self.x + self.width/2.0
y_center=self.y + self.height/2.0
for theta in range(12):
r= min(self.width/2.0,self.height/2.0)
x= r*cos(theta*pi/6) + x_center
y= r*sin(theta*pi/6) + y_center
data_pts= array([[x_center,y_center],[x,y]])
start,end = data_pts
gc.move_to(int(start[0]), int(start[1]))
gc.line_to(int(end[0]), int(end[1]))
gc.stroke_path()
return
def _draw_default_grid(self,gc):
if not self.grid_visible:
return
with gc:
gc.set_stroke_color(self.origin_axis_color_)
gc.set_line_width(self.origin_axis_width)
gc.set_line_dash(self.grid_style_)
x_data,y_data = transpose(self._cached_data_pts)
x_center = self.x + self.width/2.0
y_center = self.y + self.height/2.0
rad = min(self.width/2.0, self.height/2.0)
for r_part in range(1,5):
r = rad*r_part/4
gc.arc(x_center, y_center, r, 0, 2*pi)
gc.stroke_path()
return
|
malaterre/serna-free-backup
|
refs/heads/master
|
serna/dist/plugins/xhtml/__init__.py
|
12133432
| |
coder-james/mxnet
|
refs/heads/master
|
example/ssd/train/__init__.py
|
12133432
| |
devs1991/test_edx_docmode
|
refs/heads/master
|
venv/lib/python2.7/site-packages/networkx/external/__init__.py
|
12133432
| |
flavour/RedHat
|
refs/heads/master
|
modules/tests/roles/DRRPP/__init__.py
|
12133432
| |
iandees/all-the-places
|
refs/heads/master
|
locations/__init__.py
|
12133432
| |
MM1nd/worldengine
|
refs/heads/master
|
tests/cli_test.py
|
4
|
import os
import sys
from tests.draw_test import TestBase
import unittest
from worldengine import __main__
from worldengine.cli.main import main
class TestCLI(TestBase):
def setUp(self):
super(TestCLI, self).setUp()
self.world = "%s/seed_28070.world" % self.tests_data_dir
def test__main__(self):
assert __main__
def test_options(self):
backup_argv = sys.argv
sys.argv = ["python", "--help"]
self.assertRaises(SystemExit, main)
sys.argv = ["python", "--version"]
self.assertRaises(SystemExit, main)
sys.argv = ["python", "info"]
self.assertRaises(SystemExit, main)
sys.argv = ["python", "infooooooooo"]
self.assertRaises(SystemExit, main)
sys.argv = ["python", "info", "does_not_exist"]
self.assertRaises(SystemExit, main)
sys.argv = ["python", "info", "--gs"]
self.assertRaises(SystemExit, main)
sys.argv = ["python", "-o", __file__]
self.assertRaises(Exception, main)
sys.argv = ["python", "info", "-o", "test_dir"]
self.assertRaises(SystemExit, main)
self.assertTrue(os.path.isdir("test_dir"))
sys.argv = ["python", "plates", "--number-of-plates", "0"]
self.assertRaises(SystemExit, main)
sys.argv = ["python", "plates", "--number-of-plates", "101"]
self.assertRaises(SystemExit, main)
def test_warnings(self):
backup_argv = sys.argv
sys.argv = ["python", "--width", "16", "--height", "16",
"--temps", "1.1/.235/.406/.561/.634/-.1", "--humidity",
"1.1/.222/.493/.764/.927/.986/-.1"]
try:
main()
except Exception as e:
raise e
def test_smoke_info(self):
backup_argv = sys.argv
sys.argv = ["python", "info", self.world]
try:
main()
except Exception as e:
raise e
# TODO: fill in the rest of the options and their possibilities
sys.argv = backup_argv
def test_smoke_full(self):
# the big smoke test, can we go through
# everything without it exploding?
backup_argv = sys.argv
sys.argv = ["python", "--width", "16", "--height", "16",
"-r", "-v", "--gs", "--scatter", "--temps",
".126/.235/.406/.561/.634/.876", "--humidity",
".059/.222/.493/.764/.927/.986/.998", "-go", ".2",
"-gv", "1.25"]
try:
main()
except Exception as e:
raise e
def test_smoke_ancient(self):
backup_argv = sys.argv
sys.argv = ["python", "ancient_map", "-w", self.world]
try:
main()
except Exception as e:
raise e
sys.argv = backup_argv
def test_smoke_plates(self):
backup_argv = sys.argv
sys.argv = ["python", "plates", "--width", "16",
"--height", "16", "--number-of-plates", "2"]
try:
main()
except Exception as e:
raise e
sys.argv = backup_argv
if __name__ == '__main__':
unittest.main()
|
elkingtonmcb/django
|
refs/heads/master
|
django/contrib/gis/db/backends/mysql/introspection.py
|
700
|
from MySQLdb.constants import FIELD_TYPE
from django.contrib.gis.gdal import OGRGeomType
from django.db.backends.mysql.introspection import DatabaseIntrospection
class MySQLIntrospection(DatabaseIntrospection):
# Updating the data_types_reverse dictionary with the appropriate
# type for Geometry fields.
data_types_reverse = DatabaseIntrospection.data_types_reverse.copy()
data_types_reverse[FIELD_TYPE.GEOMETRY] = 'GeometryField'
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# In order to get the specific geometry type of the field,
# we introspect on the table definition using `DESCRIBE`.
cursor.execute('DESCRIBE %s' %
self.connection.ops.quote_name(table_name))
# Increment over description info until we get to the geometry
# column.
for column, typ, null, key, default, extra in cursor.fetchall():
if column == geo_col:
# Using OGRGeomType to convert from OGC name to Django field.
# MySQL does not support 3D or SRIDs, so the field params
# are empty.
field_type = OGRGeomType(typ).django
field_params = {}
break
finally:
cursor.close()
return field_type, field_params
def supports_spatial_index(self, cursor, table_name):
# Supported with MyISAM, or InnoDB on MySQL 5.7.5+
storage_engine = self.get_storage_engine(cursor, table_name)
return (
(storage_engine == 'InnoDB' and self.connection.mysql_version >= (5, 7, 5)) or
storage_engine == 'MyISAM'
)
|
martinwicke/tensorflow
|
refs/heads/master
|
tensorflow/contrib/distributions/python/kernel_tests/beta_test.py
|
11
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats, special
import tensorflow as tf
class BetaTest(tf.test.TestCase):
def testSimpleShapes(self):
with self.test_session():
a = np.random.rand(3)
b = np.random.rand(3)
dist = tf.contrib.distributions.Beta(a, b)
self.assertAllEqual([], dist.event_shape().eval())
self.assertAllEqual([3], dist.batch_shape().eval())
self.assertEqual(tf.TensorShape([]), dist.get_event_shape())
self.assertEqual(tf.TensorShape([3]), dist.get_batch_shape())
def testComplexShapes(self):
with self.test_session():
a = np.random.rand(3, 2, 2)
b = np.random.rand(3, 2, 2)
dist = tf.contrib.distributions.Beta(a, b)
self.assertAllEqual([], dist.event_shape().eval())
self.assertAllEqual([3, 2, 2], dist.batch_shape().eval())
self.assertEqual(tf.TensorShape([]), dist.get_event_shape())
self.assertEqual(tf.TensorShape([3, 2, 2]), dist.get_batch_shape())
def testComplexShapesBroadcast(self):
with self.test_session():
a = np.random.rand(3, 2, 2)
b = np.random.rand(2, 2)
dist = tf.contrib.distributions.Beta(a, b)
self.assertAllEqual([], dist.event_shape().eval())
self.assertAllEqual([3, 2, 2], dist.batch_shape().eval())
self.assertEqual(tf.TensorShape([]), dist.get_event_shape())
self.assertEqual(tf.TensorShape([3, 2, 2]), dist.get_batch_shape())
def testAlphaProperty(self):
a = [[1., 2, 3]]
b = [[2., 4, 3]]
with self.test_session():
dist = tf.contrib.distributions.Beta(a, b)
self.assertEqual([1, 3], dist.a.get_shape())
self.assertAllClose(a, dist.a.eval())
def testBetaProperty(self):
a = [[1., 2, 3]]
b = [[2., 4, 3]]
with self.test_session():
dist = tf.contrib.distributions.Beta(a, b)
self.assertEqual([1, 3], dist.b.get_shape())
self.assertAllClose(b, dist.b.eval())
def testPdfXProper(self):
a = [[1., 2, 3]]
b = [[2., 4, 3]]
with self.test_session():
dist = tf.contrib.distributions.Beta(a, b, validate_args=True)
dist.pdf([.1, .3, .6]).eval()
dist.pdf([.2, .3, .5]).eval()
# Either condition can trigger.
with self.assertRaisesOpError("(Condition x > 0.*|Condition x < y.*)"):
dist.pdf([-1., 1, 1]).eval()
with self.assertRaisesOpError("Condition x.*"):
dist.pdf([0., 1, 1]).eval()
with self.assertRaisesOpError("Condition x < y.*"):
dist.pdf([.1, .2, 1.2]).eval()
def testPdfTwoBatches(self):
with self.test_session():
a = [1., 2]
b = [1., 2]
x = [.5, .5]
dist = tf.contrib.distributions.Beta(a, b)
pdf = dist.pdf(x)
self.assertAllClose([1., 3./2], pdf.eval())
self.assertEqual((2,), pdf.get_shape())
def testPdfTwoBatchesNontrivialX(self):
with self.test_session():
a = [1., 2]
b = [1., 2]
x = [.3, .7]
dist = tf.contrib.distributions.Beta(a, b)
pdf = dist.pdf(x)
self.assertAllClose([1, 63./50], pdf.eval())
self.assertEqual((2,), pdf.get_shape())
def testPdfUniformZeroBatch(self):
with self.test_session():
# This is equivalent to a uniform distribution
a = 1.
b = 1.
x = np.array([.1, .2, .3, .5, .8], dtype=np.float32)
dist = tf.contrib.distributions.Beta(a, b)
pdf = dist.pdf(x)
self.assertAllClose([1.] * 5, pdf.eval())
self.assertEqual((5,), pdf.get_shape())
def testPdfAlphaStretchedInBroadcastWhenSameRank(self):
with self.test_session():
a = [[1., 2]]
b = [[1., 2]]
x = [[.5, .5], [.3, .7]]
dist = tf.contrib.distributions.Beta(a, b)
pdf = dist.pdf(x)
self.assertAllClose([[1., 3./2], [1., 63./50]], pdf.eval())
self.assertEqual((2, 2), pdf.get_shape())
def testPdfAlphaStretchedInBroadcastWhenLowerRank(self):
with self.test_session():
a = [1., 2]
b = [1., 2]
x = [[.5, .5], [.2, .8]]
pdf = tf.contrib.distributions.Beta(a, b).pdf(x)
self.assertAllClose([[1., 3./2], [1., 24./25]], pdf.eval())
self.assertEqual((2, 2), pdf.get_shape())
def testPdfXStretchedInBroadcastWhenSameRank(self):
with self.test_session():
a = [[1., 2], [2., 3]]
b = [[1., 2], [2., 3]]
x = [[.5, .5]]
pdf = tf.contrib.distributions.Beta(a, b).pdf(x)
self.assertAllClose([[1., 3./2], [3./2, 15./8]], pdf.eval())
self.assertEqual((2, 2), pdf.get_shape())
def testPdfXStretchedInBroadcastWhenLowerRank(self):
with self.test_session():
a = [[1., 2], [2., 3]]
b = [[1., 2], [2., 3]]
x = [.5, .5]
pdf = tf.contrib.distributions.Beta(a, b).pdf(x)
self.assertAllClose([[1., 3./2], [3./2, 15./8]], pdf.eval())
self.assertEqual((2, 2), pdf.get_shape())
def testBetaMean(self):
with tf.Session():
a = [1., 2, 3]
b = [2., 4, 1.2]
expected_mean = stats.beta.mean(a, b)
dist = tf.contrib.distributions.Beta(a, b)
self.assertEqual(dist.mean().get_shape(), (3,))
self.assertAllClose(expected_mean, dist.mean().eval())
def testBetaVariance(self):
with tf.Session():
a = [1., 2, 3]
b = [2., 4, 1.2]
expected_variance = stats.beta.var(a, b)
dist = tf.contrib.distributions.Beta(a, b)
self.assertEqual(dist.variance().get_shape(), (3,))
self.assertAllClose(expected_variance, dist.variance().eval())
def testBetaMode(self):
with tf.Session():
a = np.array([1.1, 2, 3])
b = np.array([2., 4, 1.2])
expected_mode = (a - 1)/(a + b - 2)
dist = tf.contrib.distributions.Beta(a, b)
self.assertEqual(dist.mode().get_shape(), (3,))
self.assertAllClose(expected_mode, dist.mode().eval())
def testBetaModeInvalid(self):
with tf.Session():
a = np.array([1., 2, 3])
b = np.array([2., 4, 1.2])
dist = tf.contrib.distributions.Beta(a, b, allow_nan_stats=False)
with self.assertRaisesOpError("Condition x < y.*"):
dist.mode().eval()
a = np.array([2., 2, 3])
b = np.array([1., 4, 1.2])
dist = tf.contrib.distributions.Beta(a, b, allow_nan_stats=False)
with self.assertRaisesOpError("Condition x < y.*"):
dist.mode().eval()
def testBetaModeEnableAllowNanStats(self):
with tf.Session():
a = np.array([1., 2, 3])
b = np.array([2., 4, 1.2])
dist = tf.contrib.distributions.Beta(a, b, allow_nan_stats=True)
expected_mode = (a - 1)/(a + b - 2)
expected_mode[0] = np.nan
self.assertEqual((3,), dist.mode().get_shape())
self.assertAllClose(expected_mode, dist.mode().eval())
a = np.array([2., 2, 3])
b = np.array([1., 4, 1.2])
dist = tf.contrib.distributions.Beta(a, b, allow_nan_stats=True)
expected_mode = (a - 1)/(a + b - 2)
expected_mode[0] = np.nan
self.assertEqual((3,), dist.mode().get_shape())
self.assertAllClose(expected_mode, dist.mode().eval())
def testBetaEntropy(self):
with tf.Session():
a = [1., 2, 3]
b = [2., 4, 1.2]
expected_entropy = stats.beta.entropy(a, b)
dist = tf.contrib.distributions.Beta(a, b)
self.assertEqual(dist.entropy().get_shape(), (3,))
self.assertAllClose(expected_entropy, dist.entropy().eval())
def testBetaSample(self):
with self.test_session():
a = 1.
b = 2.
beta = tf.contrib.distributions.Beta(a, b)
n = tf.constant(100000)
samples = beta.sample(n)
sample_values = samples.eval()
self.assertEqual(sample_values.shape, (100000,))
self.assertFalse(np.any(sample_values < 0.0))
self.assertLess(
stats.kstest(
# Beta is a univariate distribution.
sample_values, stats.beta(a=1., b=2.).cdf)[0],
0.01)
# The standard error of the sample mean is 1 / (sqrt(18 * n))
self.assertAllClose(sample_values.mean(axis=0),
stats.beta.mean(a, b),
atol=1e-2)
self.assertAllClose(np.cov(sample_values, rowvar=0),
stats.beta.var(a, b),
atol=1e-1)
# Test that sampling with the same seed twice gives the same results.
def testBetaSampleMultipleTimes(self):
with self.test_session():
a_val = 1.
b_val = 2.
n_val = 100
tf.set_random_seed(654321)
beta1 = tf.contrib.distributions.Beta(a=a_val, b=b_val, name="beta1")
samples1 = beta1.sample(n_val, seed=123456).eval()
tf.set_random_seed(654321)
beta2 = tf.contrib.distributions.Beta(a=a_val, b=b_val, name="beta2")
samples2 = beta2.sample(n_val, seed=123456).eval()
self.assertAllClose(samples1, samples2)
def testBetaSampleMultidimensional(self):
with self.test_session():
a = np.random.rand(3, 2, 2).astype(np.float32)
b = np.random.rand(3, 2, 2).astype(np.float32)
beta = tf.contrib.distributions.Beta(a, b)
n = tf.constant(100000)
samples = beta.sample(n)
sample_values = samples.eval()
self.assertEqual(sample_values.shape, (100000, 3, 2, 2))
self.assertFalse(np.any(sample_values < 0.0))
self.assertAllClose(
sample_values[:, 1, :].mean(axis=0),
stats.beta.mean(a, b)[1, :],
atol=1e-1)
def testBetaCdf(self):
with self.test_session():
shape = (30, 40, 50)
for dt in (np.float32, np.float64):
a = 10. * np.random.random(shape).astype(dt)
b = 10. * np.random.random(shape).astype(dt)
x = np.random.random(shape).astype(dt)
actual = tf.contrib.distributions.Beta(a, b).cdf(x).eval()
self.assertAllEqual(np.ones(shape, dtype=np.bool), 0. <= x)
self.assertAllEqual(np.ones(shape, dtype=np.bool), 1. >= x)
self.assertAllClose(stats.beta.cdf(x, a, b), actual, rtol=1e-4, atol=0)
def testBetaLogCdf(self):
with self.test_session():
shape = (30, 40, 50)
for dt in (np.float32, np.float64):
a = 10. * np.random.random(shape).astype(dt)
b = 10. * np.random.random(shape).astype(dt)
x = np.random.random(shape).astype(dt)
actual = tf.exp(tf.contrib.distributions.Beta(a, b).log_cdf(x)).eval()
self.assertAllEqual(np.ones(shape, dtype=np.bool), 0. <= x)
self.assertAllEqual(np.ones(shape, dtype=np.bool), 1. >= x)
self.assertAllClose(stats.beta.cdf(x, a, b), actual, rtol=1e-4, atol=0)
def testBetaWithSoftplusAB(self):
with self.test_session():
a, b = -4.2, -9.1
dist = tf.contrib.distributions.BetaWithSoftplusAB(a, b)
self.assertAllClose(tf.nn.softplus(a).eval(), dist.a.eval())
self.assertAllClose(tf.nn.softplus(b).eval(), dist.b.eval())
def testBetaBetaKL(self):
with self.test_session() as sess:
for shape in [(10,), (4,5)]:
a1 = 6.0*np.random.random(size=shape) + 1e-4
b1 = 6.0*np.random.random(size=shape) + 1e-4
a2 = 6.0*np.random.random(size=shape) + 1e-4
b2 = 6.0*np.random.random(size=shape) + 1e-4
# Take inverse softplus of values to test BetaWithSoftplusAB
a1_sp = np.log(np.exp(a1) - 1.0)
b1_sp = np.log(np.exp(b1) - 1.0)
a2_sp = np.log(np.exp(a2) - 1.0)
b2_sp = np.log(np.exp(b2) - 1.0)
d1 = tf.contrib.distributions.Beta(a=a1, b=b1)
d2 = tf.contrib.distributions.Beta(a=a2, b=b2)
d1_sp = tf.contrib.distributions.BetaWithSoftplusAB(a=a1_sp, b=b1_sp)
d2_sp = tf.contrib.distributions.BetaWithSoftplusAB(a=a2_sp, b=b2_sp)
kl_expected = (special.betaln(a2, b2) - special.betaln(a1, b1)
+ (a1 - a2)*special.digamma(a1)
+ (b1 - b2)*special.digamma(b1)
+ (a2 - a1 + b2 - b1)*special.digamma(a1 + b1))
for dist1 in [d1, d1_sp]:
for dist2 in [d2, d2_sp]:
kl = tf.contrib.distributions.kl(dist1, dist2)
kl_val = sess.run(kl)
self.assertEqual(kl.get_shape(), shape)
self.assertAllClose(kl_val, kl_expected)
# Make sure KL(d1||d1) is 0
kl_same = sess.run(tf.contrib.distributions.kl(d1, d1))
self.assertAllClose(kl_same, np.zeros_like(kl_expected))
if __name__ == "__main__":
tf.test.main()
|
Pluto-tv/chromium-crosswalk
|
refs/heads/master
|
tools/telemetry/third_party/gsutilz/third_party/boto/boto/sdb/db/sequence.py
|
153
|
# Copyright (c) 2010 Chris Moyer http://coredumped.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.exception import SDBResponseError
from boto.compat import six
class SequenceGenerator(object):
"""Generic Sequence Generator object, this takes a single
string as the "sequence" and uses that to figure out
what the next value in a string is. For example
if you give "ABC" and pass in "A" it will give you "B",
and if you give it "C" it will give you "AA".
If you set "rollover" to True in the above example, passing
in "C" would give you "A" again.
The Sequence string can be a string or any iterable
that has the "index" function and is indexable.
"""
__name__ = "SequenceGenerator"
def __init__(self, sequence_string, rollover=False):
"""Create a new SequenceGenerator using the sequence_string
as how to generate the next item.
:param sequence_string: The string or list that explains
how to generate the next item in the sequence
:type sequence_string: str,iterable
:param rollover: Rollover instead of incrementing when
we hit the end of the sequence
:type rollover: bool
"""
self.sequence_string = sequence_string
self.sequence_length = len(sequence_string[0])
self.rollover = rollover
self.last_item = sequence_string[-1]
self.__name__ = "%s('%s')" % (self.__class__.__name__, sequence_string)
def __call__(self, val, last=None):
"""Get the next value in the sequence"""
# If they pass us in a string that's not at least
# the lenght of our sequence, then return the
# first element in our sequence
if val is None or len(val) < self.sequence_length:
return self.sequence_string[0]
last_value = val[-self.sequence_length:]
if (not self.rollover) and (last_value == self.last_item):
val = "%s%s" % (self(val[:-self.sequence_length]), self._inc(last_value))
else:
val = "%s%s" % (val[:-self.sequence_length], self._inc(last_value))
return val
def _inc(self, val):
"""Increment a single value"""
assert(len(val) == self.sequence_length)
return self.sequence_string[(self.sequence_string.index(val) + 1) % len(self.sequence_string)]
#
# Simple Sequence Functions
#
def increment_by_one(cv=None, lv=None):
if cv is None:
return 0
return cv + 1
def double(cv=None, lv=None):
if cv is None:
return 1
return cv * 2
def fib(cv=1, lv=0):
"""The fibonacci sequence, this incrementer uses the
last value"""
if cv is None:
cv = 1
if lv is None:
lv = 0
return cv + lv
increment_string = SequenceGenerator("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
class Sequence(object):
"""A simple Sequence using the new SDB "Consistent" features
Based largly off of the "Counter" example from mitch garnaat:
http://bitbucket.org/mitch/stupidbototricks/src/tip/counter.py"""
def __init__(self, id=None, domain_name=None, fnc=increment_by_one, init_val=None):
"""Create a new Sequence, using an optional function to
increment to the next number, by default we just increment by one.
Every parameter here is optional, if you don't specify any options
then you'll get a new SequenceGenerator with a random ID stored in the
default domain that increments by one and uses the default botoweb
environment
:param id: Optional ID (name) for this counter
:type id: str
:param domain_name: Optional domain name to use, by default we get this out of the
environment configuration
:type domain_name:str
:param fnc: Optional function to use for the incrementation, by default we just increment by one
There are several functions defined in this module.
Your function must accept "None" to get the initial value
:type fnc: function, str
:param init_val: Initial value, by default this is the first element in your sequence,
but you can pass in any value, even a string if you pass in a function that uses
strings instead of ints to increment
"""
self._db = None
self._value = None
self.last_value = None
self.domain_name = domain_name
self.id = id
if init_val is None:
init_val = fnc(init_val)
if self.id is None:
import uuid
self.id = str(uuid.uuid4())
self.item_type = type(fnc(None))
self.timestamp = None
# Allow us to pass in a full name to a function
if isinstance(fnc, six.string_types):
from boto.utils import find_class
fnc = find_class(fnc)
self.fnc = fnc
# Bootstrap the value last
if not self.val:
self.val = init_val
def set(self, val):
"""Set the value"""
import time
now = time.time()
expected_value = []
new_val = {}
new_val['timestamp'] = now
if self._value is not None:
new_val['last_value'] = self._value
expected_value = ['current_value', str(self._value)]
new_val['current_value'] = val
try:
self.db.put_attributes(self.id, new_val, expected_value=expected_value)
self.timestamp = new_val['timestamp']
except SDBResponseError as e:
if e.status == 409:
raise ValueError("Sequence out of sync")
else:
raise
def get(self):
"""Get the value"""
val = self.db.get_attributes(self.id, consistent_read=True)
if val:
if 'timestamp' in val:
self.timestamp = val['timestamp']
if 'current_value' in val:
self._value = self.item_type(val['current_value'])
if "last_value" in val and val['last_value'] is not None:
self.last_value = self.item_type(val['last_value'])
return self._value
val = property(get, set)
def __repr__(self):
return "%s('%s', '%s', '%s.%s', '%s')" % (
self.__class__.__name__,
self.id,
self.domain_name,
self.fnc.__module__, self.fnc.__name__,
self.val)
def _connect(self):
"""Connect to our domain"""
if not self._db:
import boto
sdb = boto.connect_sdb()
if not self.domain_name:
self.domain_name = boto.config.get("DB", "sequence_db", boto.config.get("DB", "db_name", "default"))
try:
self._db = sdb.get_domain(self.domain_name)
except SDBResponseError as e:
if e.status == 400:
self._db = sdb.create_domain(self.domain_name)
else:
raise
return self._db
db = property(_connect)
def next(self):
self.val = self.fnc(self.val, self.last_value)
return self.val
def delete(self):
"""Remove this sequence"""
self.db.delete_attributes(self.id)
|
maurerpe/FreeCAD
|
refs/heads/master
|
src/Mod/Material/InitGui.py
|
57
|
#***************************************************************************
#* *
#* Copyright (c) 2013 - Juergen Riegel <FreeCAD@juergen-riegel.net> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
|
hhirsch/dnt_rpg
|
refs/heads/master
|
blend/blendercal/bcobject.py
|
1
|
# -*- indent-tabs-mode: t -*-
# $Id: bcobject.py,v 1.2 2007/12/31 12:30:17 farpro Exp $
import Blender
import blendercal
import bcconf
import bcgui
# We use immutable sets in the LOD algorithm
# to identify unique edges and faces. Immutable
# sets are handy for identification because they're
# hashable and unordered.
from sets import ImmutableSet
CONCAT = lambda s, j="": j.join([str(v) for v in s])
STRFLT = lambda f: "%%.%df" % bcconf.FLOATPRE % f
class Cal3DObject(object):
# The base class for all of the bcobject classes. Children of this class must
# define a method called XML which should build and return an XML representation
# of the object. Furthermore, children that pass a value in the constructor
# for the magic parameter are treated as top-level XML files and preprend
# the appropriate headers.
def __init__(self, magic=None):
self.__magic = magic
def __repr__(self):
ret = ""
xml = self.XML().replace("\t", "").replace("#", " " * bcconf.XMLINDENT)
if self.__magic:
ret += """<?xml version="1.0"?>\n"""
ret += """<HEADER MAGIC="%s" VERSION="1200"/>\n""" % self.__magic
return ret + xml
def __str__(self):
return self.__repr__()
def XML(self):
raise AttributeError, "Children must define this method!"
class Material(Cal3DObject):
MATERIALS = {}
def __init__(self, name, ambient=[255]*4, diffuse=[255]*4, specular=[255]*4, mapnames=None):
Cal3DObject.__init__(self, "XRF")
self.name = name
self.ambient = ambient
self.diffuse = diffuse
self.specular = specular
self.shininess = 1.0
self.mapnames = []
self.id = len(Material.MATERIALS)
mapnames and self.mapnames.extend(mapnames)
Material.MATERIALS[self.name] = self
def XML(self):
mapXML = ""
for mapname in self.mapnames:
mapXML += "#<MAP>" + mapname + "</MAP>\n"
return """\
<MATERIAL NUMMAPS="%s">
#<AMBIENT>%s %s %s %s</AMBIENT>
#<DIFFUSE>%s %s %s %s</DIFFUSE>
#<SPECULAR>%s %s %s %s</SPECULAR>
#<SHININESS>%s</SHININESS>
%s</MATERIAL>
""" % (
len(self.mapnames),
self.ambient[0],
self.ambient[1],
self.ambient[2],
self.ambient[3],
self.diffuse[0],
self.diffuse[1],
self.diffuse[2],
self.diffuse[3],
self.specular[0],
self.specular[1],
self.specular[2],
self.specular[3],
self.shininess,
mapXML
)
class Mesh(Cal3DObject):
def __init__(self, name):
Cal3DObject.__init__(self, "XMF")
self.name = name.replace(".", "_")
self.submeshes = []
def XML(self):
return """\
<MESH NUMSUBMESH="%s">
%s</MESH>
""" % (
len(self.submeshes),
CONCAT(self.submeshes)
)
class SubMesh(Cal3DObject):
def __init__(self, mesh, material):
Cal3DObject.__init__(self)
self.material = material
self.vertices = []
self.faces = []
self.springs = []
self.mesh = mesh
self.num_lodsteps = 0
mesh.submeshes.append(self)
if not material: self.material = Material("Default")
# These are all small classes for the creation of a very specific,
# temporary data structure for LOD calculations. The entire submesh
# will be temporarily copied into this structure, to allow more freedom
# in on-the-fly refactorizations and manipulations.
class LODVertex:
"""We need to factor in some other information, compared
to standard vertices, like edges and faces. On the other hand,
we don't really need stuff like UVs when we do this. Doing another
small, inner Vertex class for this, will hopefully not be
seen as a total waste."""
def __init__(self, origindex, loc, cloned):
self.id = origindex
self.loc = Blender.Mathutils.Vector(loc)
self.edges = {}
self.faces = {}
self.cloned = cloned
self.col_to = None
self.col_from = None
self.face_collapses = 0
self.deathmarked = False
def colto(self):
if self.col_to:
cvert = self.col_to
while cvert.col_to:
cvert = cvert.col_to
return cvert
else:
return self
def colfrom(self):
if self.col_from:
cvert = self.col_from
while cvert.col_from:
cvert = cvert.col_from
return cvert
else:
return self
def getid(self):
return self.colto().id
def getloc(self):
return self.colto().loc
def getfaces(self, facel = None):
if not facel:
facelist = []
else:
facelist = facel
for face in self.faces.values():
if (not face.dead) and (not facelist.__contains__(face)):
facelist.append(face)
if self.col_from:
facelist = self.col_from.getfaces(facelist)
return facelist
def getedges(self, edgel = None):
if not edgel:
edgelist = []
else:
edgelist = edgel
for edge in self.edges.values():
if (not edge.dead) and (not edgelist.__contains__(edge)):
edgelist.append(edge)
if self.col_from:
edgelist = self.col_from.getedges(edgelist)
return edgelist
class LODFace:
def __init__(self, verts, fid):
self.verts = verts
vertset = ImmutableSet((self.verts[0].id, self.verts[1].id, self.verts[2].id))
for vert in self.verts:
vert.faces[self.getHashableSet()] = self
self.id = fid
self.edges = []
self.RefactorArea()
self.dead = False
def replaceVert(self, replacev, withv):
i = self.verts.index(replacev)
self.verts[i] = withv
# def Refactor(self):
# self.RefactorArea()
def RefactorArea(self):
crossp = Blender.Mathutils.CrossVecs(self.verts[1].getloc() - self.verts[2].getloc(),
self.verts[0].getloc() - self.verts[2].getloc())
self.area = (1./2.)*((crossp.x**2 + crossp.y**2 + crossp.z**2)**(1./2.))
def getHashableSet(self):
return ImmutableSet((self.verts[0].id, self.verts[1].id, self.verts[2].id))
class LODEdge:
"""Extra, inner class used for the temporary LOD datastructure"""
def __init__(self, v1, v2):
self.v1 = v1
self.v2 = v2
vertset = ImmutableSet((self.v1.id, self.v2.id))
self.v1.edges[vertset] = self
self.v2.edges[vertset] = self
# Get faces common for both v1 and v2
self.faces = []
#for key in filter(self.v1.faces.__contains__, self.v2.faces):
# face = self.v1.faces[key]
#self.faces[ImmutableSet((face.verts[0].id, face.verts[1].id, face.verts[2].id))] = face
# self.faces.append(face)
self.collapsed_faces = {}
self.RefactorLength()
self.dead = False
def getOtherVert(self, vertex):
if vertex == self.v1:
return self.v2
elif vertex == self.v2:
return self.v1
def Refactor(self):
self.RefactorLength()
self.RefactorWeight()
def RefactorLength(self):
self.length = (self.v2.getloc() - self.v1.getloc()).length
def RefactorWeight(self):
# Determine which vert to collapse to which,
# using jiba's method of basing this decision on
# The number of edges connected to each vertex
# I.e.: Collapse the edge with least amount of edges.
# The order of the vertices in v1, v2 do not matter in
# any other respect, so we simply use this order, and
# say we collapse v1 into v2.
if len(self.v1.getedges()) > len(self.v2.getedges()):
self.v1, self.v2 = self.v2, self.v1
# Get total area of faces surrounding edge
area = 0
for face in self.faces:
area += face.area
proportional_area = area / avg_area
proportional_length = self.length / avg_length
# Get dot products (angle sharpness) of edges connected to v1
edgeverts_factor = 0
self_vec = self.v2.getloc() - self.v1.getloc()
self_vec.normalize()
for edge in self.v1.edges.values():
if edge != self:
edgevert = edge.getOtherVert(self.v1)
edge_vec = edgevert.getloc() - self.v1.getloc()
edge_vec.normalize()
edgeverts_factor += (1 - Blender.Mathutils.DotVecs(self_vec, edge_vec))/2
# Get dot products of edges connected to v2. Wohoo, copy-paste!
self_vec = self.v1.getloc() - self.v2.getloc()
self_vec.normalize()
for edge in self.v2.edges.values():
if edge != self:
edgevert = edge.getOtherVert(self.v2)
edge_vec = edgevert.getloc() - self.v2.getloc()
edge_vec.normalize()
edgeverts_factor += (1 - Blender.Mathutils.DotVecs(self_vec, edge_vec))/2
# Error metric, or magic formula. Whatever you like to call it.
# This calculates the weight of the edge, based on the
# information we have now gathered. We can change this at
# any time to try and get better results.
self.weight = proportional_area * proportional_length * edgeverts_factor
#self.weight = proportional_length
return self.weight
def getHashableSet(self):
return ImmutableSet((self.v1.id, self.v2.id))
def collapse(self):
if self.v1.col_to or self.v2.col_to:
return False
if self.v1.cloned or self.v2.cloned:
return False
if len(self.faces) < 2:
return False
self.dead = True
# Mark all faces as dead and the two
# collapsed edges as dead
for face in filter(self.v1.getfaces().__contains__, self.v2.getfaces()):
# If not dead, add to dict of faces to collapse with this edge
if not face.dead:
self.collapsed_faces[face.getHashableSet()] = face
self.v1.face_collapses += 1
face.dead = True
# Mark collapsed edges as dead. Edges that don't share
# a vertex with this edge's v2 dies.
for edge in face.edges:
if (edge.v1 != self.v2) and (edge.v2 != self.v2):
edge.dead = True
# for face in self.faces:
# face.dead = True
# for edge in face.edges:
# if (edge.v1 != self.v2) and (edge.v2 != self.v2):
# edge.dead = True
# self.v1.face_collapses += 1
# Refactor area of all non-dead faces on vertex 1
for face in self.v1.getfaces():
if not face.dead:
face.RefactorArea()
# Refactor lengths and weights of all non-dead
# edges on vertex 1
for edge in self.v1.getedges():
if not edge.dead:
edge.Refactor()
self.v2.colfrom().col_from = self.v1
self.v1.col_to = self.v2
return True
def LOD(self):
global avg_area, avg_length
progressbar = bcgui.Progress(10)
# Step one. Build temporary data structure suited for weight calculations.
# Vertices are the only ones that can be/needs to be ordered.
# Faces and edges are dicts, with Immutable Sets (containing Vertex indices) as keys.
LODverts = []
LODfaces = {}
LODedges = {}
# Create vertices
progressbar.setup(len(self.vertices), "Creating LODverts")
for vertex in self.vertices:
progressbar.increment()
LODverts.append(self.LODVertex(vertex.id, vertex.loc, vertex.cloned))
# Create faces
num_faces = 0
avg_area = 0
total_area = 0
progressbar.setup(len(self.faces), "Creating LODfaces")
for face in self.faces:
progressbar.increment()
lface = self.LODFace([LODverts[face.vertices[0].id], LODverts[face.vertices[1].id], LODverts[face.vertices[2].id]], num_faces)
LODfaces[lface.getHashableSet()] = lface
total_area += lface.area
num_faces += 1
if num_faces:
avg_area = total_area / float(num_faces)
# Create edges
num_edges = 0
avg_length = 0
total_length = 0
progressbar.setup(len(LODfaces), "Creating LODedges")
for lodface in LODfaces.values():
progressbar.increment()
#Create the three edges from this face
for e in [(0, 1), (0, 2), (1, 2)]:
imset = ImmutableSet((lodface.verts[e[0]].id, lodface.verts[e[1]].id))
if not LODedges.has_key(imset):
#Create edge
lodedge = self.LODEdge(lodface.verts[e[0]],
lodface.verts[e[1]])
LODedges[imset] = lodedge
lodface.edges.append(lodedge)
lodedge.faces.append(lodface)
total_length += lodedge.length
num_edges += 1
else:
lodedge = LODedges[imset]
lodface.edges.append(lodedge)
lodedge.faces.append(lodface)
if num_edges:
avg_length = total_length / float(num_edges)
# print total_length
# print avg_length
# Step two. Calculate initial weights of all edges.
progressbar.setup(len(LODedges), "Calculating weights")
for edge in LODedges.values():
progressbar.increment()
edge.RefactorWeight()
# print edge.weight
# Order edges in list after weights
LODedgelist = LODedges.values()
LODedgelist.sort(self.compareweights)
weight = LODedgelist[0].weight
percentage = len(LODedgelist) * 0.6
count = 0
collapse_list = []
progressbar.setup(percentage, "Calculating LOD")
while count < percentage:
edge = LODedgelist.pop(0)
if not edge.dead:
if edge.collapse():
LODedgelist.sort(self.compareweights)
collapse_list.append((edge.v1, edge.collapsed_faces))
count += 1
progressbar.increment()
self.num_lodsteps = len(collapse_list)
newvertlist = []
newfacelist = []
# The list should be in reverse order, with the most
# important ones first.
collapse_list.reverse()
for vertex, faces in collapse_list:
vertex.col_to = self.vertices[vertex.col_to.id]
for vertex in LODverts:
if not vertex.col_to:
cvert = self.vertices[vertex.id]
cvert.id = len(newvertlist)
newvertlist.append(cvert)
for face in LODfaces.values():
if not face.dead:
newfacelist.append(self.faces[face.id])
for vertex, faces in collapse_list:
for face in faces.values():
newfacelist.append(self.faces[face.id])
cvert = self.vertices[vertex.id]
cvert.id = len(newvertlist)
cvert.collapse_to = vertex.col_to
cvert.num_faces = vertex.face_collapses
newvertlist.append(cvert)
self.vertices = newvertlist
self.faces = newfacelist
def compareweights(self, x, y):
result = x.weight - y.weight
if result < 0:
return -1
elif result > 0:
return 1
else:
return 0
def XML(self):
return """\
#<SUBMESH
##NUMVERTICES="%s"
##NUMFACES="%s"
##MATERIAL="%s"
##NUMLODSTEPS="%s"
##NUMSPRINGS="%s"
##NUMTEXCOORDS="%s"
#>
%s%s%s#</SUBMESH>
""" % (
len(self.vertices),
len(self.faces),
self.material.id,
self.num_lodsteps,
len(self.springs),
len(self.material.mapnames),
CONCAT(self.vertices),
CONCAT(self.springs),
CONCAT(self.faces)
)
#class Progress:
# self.progress = 0.0
# self.
class Map(Cal3DObject):
def __init__(self, uv):
Cal3DObject.__init__(self)
self.uv = Blender.Mathutils.Vector(uv)
def XML(self):
return "###<TEXCOORD>%s %s</TEXCOORD>\n" % (
STRFLT(self.uv.x),
STRFLT(self.uv.y)
)
class Vertex(Cal3DObject):
# An interesting note about this class is that we keep Blender objects
# as self.loc and self.normal. Of note is how we "wrap" the existing
# instances with our own copies, since I was experiencing bugs where
# the existing ones would go out of scope.
def __init__(self, submesh, loc, normal, cloned, uvs):
Cal3DObject.__init__(self)
self.loc = Blender.Mathutils.Vector(loc)
self.normal = Blender.Mathutils.Vector(normal)
self.maps = []
self.influences = []
self.submesh = submesh
self.id = len(submesh.vertices)
self.cloned = cloned
self.collapse_to = None
self.num_faces = 0
# If one UV is None, the rest will also be None.
if len(uvs) and (uvs[0][0] != None):
self.maps.extend([Map(uv) for uv in uvs])
submesh.vertices.append(self)
def XML(self):
loc = blendercal.VECTOR2GL(self.loc)
normal = blendercal.VECTOR2GL(self.normal)
unset = lambda t: "###<!-- %s unset -->\n" % t
collapse = ""
# Note: collapse_to is an index, and _can_ be 0
if self.collapse_to != None:
collapse = """\
###<COLLAPSEID>%s</COLLAPSEID>
###<COLLAPSECOUNT>%s</COLLAPSECOUNT>
""" % (
str(self.collapse_to.id),
str(self.num_faces)
)
loc = loc * bcconf.SCALE
normal = normal * bcconf.SCALE
return """\
##<VERTEX ID="%s" NUMINFLUENCES="%s">
###<POS>%s %s %s</POS>
###<NORM>%s %s %s</NORM>
%s%s%s##</VERTEX>
""" % (
self.id,
len(self.influences),
STRFLT(loc.x),
STRFLT(loc.y),
STRFLT(loc.z),
STRFLT(normal.x),
STRFLT(normal.y),
STRFLT(normal.z),
collapse,
len(self.maps) and CONCAT(self.maps) or unset("UV Coords"),
self.influences and CONCAT(self.influences) or unset("Influences")
)
class Influence(Cal3DObject):
def __init__(self, bone, weight):
Cal3DObject.__init__(self)
self.bone = bone
self.weight = weight
def XML(self):
return """###<INFLUENCE ID="%s">%s</INFLUENCE>\n""" % (
self.bone.id,
STRFLT(self.weight)
)
class Face(Cal3DObject):
def __init__(self, submesh, v1, v2, v3):
Cal3DObject.__init__(self)
self.vertices = (v1, v2, v3)
self.submesh = submesh
submesh.faces.append(self)
def XML(self):
return """##<FACE VERTEXID="%s %s %s"/>\n""" % (self.vertices[0].id, self.vertices[1].id, self.vertices[2].id)
class Skeleton(Cal3DObject):
ARMATURE = None
def __init__(self):
Cal3DObject.__init__(self, "XSF")
self.bones = []
def XML(self):
return """\
<SKELETON NUMBONES="%s">
%s</SKELETON>
""" % (
len(self.bones),
CONCAT(self.bones)
)
class Bone(Cal3DObject):
BONES = {}
def __init__(self, skeleton, parent, bone, armamat):
Cal3DObject.__init__(self)
absmat = bone.matrix["ARMATURESPACE"] * armamat
self.parent = parent
self.name = bone.name.replace(".", "_")
self.invert = Blender.Mathutils.Matrix(absmat).invert()
self.local = (parent and (absmat * self.parent.invert)) or absmat
self.children = []
self.skeleton = skeleton
self.id = len(skeleton.bones)
if self.parent:
self.parent.children.append(self)
skeleton.bones.append(self)
Bone.BONES[self.name] = self
def XML(self):
# TRANSLATION and ROTATION are relative to the parent bone.
# They are virtually useless since the animations (.XAF .CAF)
# will always override them.
#
# LOCALTRANSLATION and LOCALROTATION are the invert of the cumulated
# TRANSLATION and ROTATION (see above). It is used to calculate the
# delta between an animated bone and the original non animated bone.
# This delta will be applied to the influenced vertexes.
#
# Negate the rotation because blender rotations are clockwise
# and cal3d rotations are counterclockwise
local = blendercal.MATRIX2GL(self.local)
local = local * bcconf.SCALE
localloc = local.translationPart()
localrot = local.toQuat()
invert = blendercal.MATRIX2GL(self.invert)
invertloc = invert.translationPart()
invertloc = invertloc * bcconf.SCALE
invertrot = invert.toQuat()
return """\
#<BONE ID="%s" NAME="%s" NUMCHILD="%s">
##<TRANSLATION>%s %s %s</TRANSLATION>
##<ROTATION>%s %s %s -%s</ROTATION>
##<LOCALTRANSLATION>%s %s %s</LOCALTRANSLATION>
##<LOCALROTATION>%s %s %s -%s</LOCALROTATION>
##<PARENTID>%s</PARENTID>
%s#</BONE>
""" % (
self.id,
self.name,
len(self.children),
STRFLT(localloc.x),
STRFLT(localloc.y),
STRFLT(localloc.z),
STRFLT(localrot.x),
STRFLT(localrot.y),
STRFLT(localrot.z),
STRFLT(localrot.w),
STRFLT(invertloc.x),
STRFLT(invertloc.y),
STRFLT(invertloc.z),
STRFLT(invertrot.x),
STRFLT(invertrot.y),
STRFLT(invertrot.z),
STRFLT(invertrot.w),
self.parent and "%d" % self.parent.id or "-1",
"".join(["##<CHILDID>%s</CHILDID>\n" % c.id for c in self.children])
)
class Animation(Cal3DObject):
def __init__(self, name, duration=0.0):
Cal3DObject.__init__(self, "XAF")
self.name = name.replace(".", "_")
self.duration = duration
self.tracks = {}
def XML(self):
return """\
<ANIMATION DURATION="%s" NUMTRACKS="%s">
%s</ANIMATION>
""" % (
self.duration,
len(self.tracks),
CONCAT(self.tracks.values())
)
class Track(Cal3DObject):
def __init__(self, animation, bone):
Cal3DObject.__init__(self)
self.bone = bone
self.keyframes = []
self.animation = animation
animation.tracks[bone.name] = self
def XML(self):
return """\
#<TRACK BONEID="%s" NUMKEYFRAMES="%s">
%s#</TRACK>
""" % (
self.bone.id,
len(self.keyframes),
CONCAT(self.keyframes)
)
class KeyFrame(Cal3DObject):
def __init__(self, track, time, loc, rot):
Cal3DObject.__init__(self)
self.time = time
self.loc = Blender.Mathutils.Vector(loc)
self.rot = Blender.Mathutils.Quaternion(rot)
self.track = track
track.keyframes.append(self)
def XML(self):
self.loc = self.loc * bcconf.SCALE
return """\
##<KEYFRAME TIME="%s">
###<TRANSLATION>%s %s %s</TRANSLATION>
###<ROTATION>%s %s %s -%s</ROTATION>
##</KEYFRAME>
""" % (
STRFLT(self.time),
STRFLT(self.loc.x),
STRFLT(self.loc.y),
STRFLT(self.loc.z),
STRFLT(self.rot.x),
STRFLT(self.rot.y),
STRFLT(self.rot.z),
STRFLT(self.rot.w)
)
|
benfinkelcbt/CPD200
|
refs/heads/master
|
CPD200-Lab13-Python/rsa/cli.py
|
81
|
# -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commandline scripts.
These scripts are called by the executables defined in setup.py.
"""
from __future__ import with_statement, print_function
import abc
import sys
from optparse import OptionParser
import rsa
import rsa.bigfile
import rsa.pkcs1
HASH_METHODS = sorted(rsa.pkcs1.HASH_METHODS.keys())
def keygen():
"""Key generator."""
# Parse the CLI options
parser = OptionParser(usage='usage: %prog [options] keysize',
description='Generates a new RSA keypair of "keysize" bits.')
parser.add_option('--pubout', type='string',
help='Output filename for the public key. The public key is '
'not saved if this option is not present. You can use '
'pyrsa-priv2pub to create the public key file later.')
parser.add_option('-o', '--out', type='string',
help='Output filename for the private key. The key is '
'written to stdout if this option is not present.')
parser.add_option('--form',
help='key format of the private and public keys - default PEM',
choices=('PEM', 'DER'), default='PEM')
(cli, cli_args) = parser.parse_args(sys.argv[1:])
if len(cli_args) != 1:
parser.print_help()
raise SystemExit(1)
try:
keysize = int(cli_args[0])
except ValueError:
parser.print_help()
print('Not a valid number: %s' % cli_args[0], file=sys.stderr)
raise SystemExit(1)
print('Generating %i-bit key' % keysize, file=sys.stderr)
(pub_key, priv_key) = rsa.newkeys(keysize)
# Save public key
if cli.pubout:
print('Writing public key to %s' % cli.pubout, file=sys.stderr)
data = pub_key.save_pkcs1(format=cli.form)
with open(cli.pubout, 'wb') as outfile:
outfile.write(data)
# Save private key
data = priv_key.save_pkcs1(format=cli.form)
if cli.out:
print('Writing private key to %s' % cli.out, file=sys.stderr)
with open(cli.out, 'wb') as outfile:
outfile.write(data)
else:
print('Writing private key to stdout', file=sys.stderr)
sys.stdout.write(data)
class CryptoOperation(object):
"""CLI callable that operates with input, output, and a key."""
__metaclass__ = abc.ABCMeta
keyname = 'public' # or 'private'
usage = 'usage: %%prog [options] %(keyname)s_key'
description = None
operation = 'decrypt'
operation_past = 'decrypted'
operation_progressive = 'decrypting'
input_help = 'Name of the file to %(operation)s. Reads from stdin if ' \
'not specified.'
output_help = 'Name of the file to write the %(operation_past)s file ' \
'to. Written to stdout if this option is not present.'
expected_cli_args = 1
has_output = True
key_class = rsa.PublicKey
def __init__(self):
self.usage = self.usage % self.__class__.__dict__
self.input_help = self.input_help % self.__class__.__dict__
self.output_help = self.output_help % self.__class__.__dict__
@abc.abstractmethod
def perform_operation(self, indata, key, cli_args=None):
"""Performs the program's operation.
Implement in a subclass.
:returns: the data to write to the output.
"""
def __call__(self):
"""Runs the program."""
(cli, cli_args) = self.parse_cli()
key = self.read_key(cli_args[0], cli.keyform)
indata = self.read_infile(cli.input)
print(self.operation_progressive.title(), file=sys.stderr)
outdata = self.perform_operation(indata, key, cli_args)
if self.has_output:
self.write_outfile(outdata, cli.output)
def parse_cli(self):
"""Parse the CLI options
:returns: (cli_opts, cli_args)
"""
parser = OptionParser(usage=self.usage, description=self.description)
parser.add_option('-i', '--input', type='string', help=self.input_help)
if self.has_output:
parser.add_option('-o', '--output', type='string', help=self.output_help)
parser.add_option('--keyform',
help='Key format of the %s key - default PEM' % self.keyname,
choices=('PEM', 'DER'), default='PEM')
(cli, cli_args) = parser.parse_args(sys.argv[1:])
if len(cli_args) != self.expected_cli_args:
parser.print_help()
raise SystemExit(1)
return cli, cli_args
def read_key(self, filename, keyform):
"""Reads a public or private key."""
print('Reading %s key from %s' % (self.keyname, filename), file=sys.stderr)
with open(filename, 'rb') as keyfile:
keydata = keyfile.read()
return self.key_class.load_pkcs1(keydata, keyform)
def read_infile(self, inname):
"""Read the input file"""
if inname:
print('Reading input from %s' % inname, file=sys.stderr)
with open(inname, 'rb') as infile:
return infile.read()
print('Reading input from stdin', file=sys.stderr)
return sys.stdin.read()
def write_outfile(self, outdata, outname):
"""Write the output file"""
if outname:
print('Writing output to %s' % outname, file=sys.stderr)
with open(outname, 'wb') as outfile:
outfile.write(outdata)
else:
print('Writing output to stdout', file=sys.stderr)
sys.stdout.write(outdata)
class EncryptOperation(CryptoOperation):
"""Encrypts a file."""
keyname = 'public'
description = ('Encrypts a file. The file must be shorter than the key '
'length in order to be encrypted. For larger files, use the '
'pyrsa-encrypt-bigfile command.')
operation = 'encrypt'
operation_past = 'encrypted'
operation_progressive = 'encrypting'
def perform_operation(self, indata, pub_key, cli_args=None):
"""Encrypts files."""
return rsa.encrypt(indata, pub_key)
class DecryptOperation(CryptoOperation):
"""Decrypts a file."""
keyname = 'private'
description = ('Decrypts a file. The original file must be shorter than '
'the key length in order to have been encrypted. For larger '
'files, use the pyrsa-decrypt-bigfile command.')
operation = 'decrypt'
operation_past = 'decrypted'
operation_progressive = 'decrypting'
key_class = rsa.PrivateKey
def perform_operation(self, indata, priv_key, cli_args=None):
"""Decrypts files."""
return rsa.decrypt(indata, priv_key)
class SignOperation(CryptoOperation):
"""Signs a file."""
keyname = 'private'
usage = 'usage: %%prog [options] private_key hash_method'
description = ('Signs a file, outputs the signature. Choose the hash '
'method from %s' % ', '.join(HASH_METHODS))
operation = 'sign'
operation_past = 'signature'
operation_progressive = 'Signing'
key_class = rsa.PrivateKey
expected_cli_args = 2
output_help = ('Name of the file to write the signature to. Written '
'to stdout if this option is not present.')
def perform_operation(self, indata, priv_key, cli_args):
"""Signs files."""
hash_method = cli_args[1]
if hash_method not in HASH_METHODS:
raise SystemExit('Invalid hash method, choose one of %s' %
', '.join(HASH_METHODS))
return rsa.sign(indata, priv_key, hash_method)
class VerifyOperation(CryptoOperation):
"""Verify a signature."""
keyname = 'public'
usage = 'usage: %%prog [options] public_key signature_file'
description = ('Verifies a signature, exits with status 0 upon success, '
'prints an error message and exits with status 1 upon error.')
operation = 'verify'
operation_past = 'verified'
operation_progressive = 'Verifying'
key_class = rsa.PublicKey
expected_cli_args = 2
has_output = False
def perform_operation(self, indata, pub_key, cli_args):
"""Verifies files."""
signature_file = cli_args[1]
with open(signature_file, 'rb') as sigfile:
signature = sigfile.read()
try:
rsa.verify(indata, signature, pub_key)
except rsa.VerificationError:
raise SystemExit('Verification failed.')
print('Verification OK', file=sys.stderr)
class BigfileOperation(CryptoOperation):
"""CryptoOperation that doesn't read the entire file into memory."""
def __init__(self):
CryptoOperation.__init__(self)
self.file_objects = []
def __del__(self):
"""Closes any open file handles."""
for fobj in self.file_objects:
fobj.close()
def __call__(self):
"""Runs the program."""
(cli, cli_args) = self.parse_cli()
key = self.read_key(cli_args[0], cli.keyform)
# Get the file handles
infile = self.get_infile(cli.input)
outfile = self.get_outfile(cli.output)
# Call the operation
print(self.operation_progressive.title(), file=sys.stderr)
self.perform_operation(infile, outfile, key, cli_args)
def get_infile(self, inname):
"""Returns the input file object"""
if inname:
print('Reading input from %s' % inname, file=sys.stderr)
fobj = open(inname, 'rb')
self.file_objects.append(fobj)
else:
print('Reading input from stdin', file=sys.stderr)
fobj = sys.stdin
return fobj
def get_outfile(self, outname):
"""Returns the output file object"""
if outname:
print('Will write output to %s' % outname, file=sys.stderr)
fobj = open(outname, 'wb')
self.file_objects.append(fobj)
else:
print('Will write output to stdout', file=sys.stderr)
fobj = sys.stdout
return fobj
class EncryptBigfileOperation(BigfileOperation):
"""Encrypts a file to VARBLOCK format."""
keyname = 'public'
description = ('Encrypts a file to an encrypted VARBLOCK file. The file '
'can be larger than the key length, but the output file is only '
'compatible with Python-RSA.')
operation = 'encrypt'
operation_past = 'encrypted'
operation_progressive = 'encrypting'
def perform_operation(self, infile, outfile, pub_key, cli_args=None):
"""Encrypts files to VARBLOCK."""
return rsa.bigfile.encrypt_bigfile(infile, outfile, pub_key)
class DecryptBigfileOperation(BigfileOperation):
"""Decrypts a file in VARBLOCK format."""
keyname = 'private'
description = ('Decrypts an encrypted VARBLOCK file that was encrypted '
'with pyrsa-encrypt-bigfile')
operation = 'decrypt'
operation_past = 'decrypted'
operation_progressive = 'decrypting'
key_class = rsa.PrivateKey
def perform_operation(self, infile, outfile, priv_key, cli_args=None):
"""Decrypts a VARBLOCK file."""
return rsa.bigfile.decrypt_bigfile(infile, outfile, priv_key)
encrypt = EncryptOperation()
decrypt = DecryptOperation()
sign = SignOperation()
verify = VerifyOperation()
encrypt_bigfile = EncryptBigfileOperation()
decrypt_bigfile = DecryptBigfileOperation()
|
ArnossArnossi/django
|
refs/heads/master
|
tests/apps/__init__.py
|
12133432
| |
slayerjain/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/pytest/_pytest/vendored_packages/__init__.py
|
12133432
| |
bgris/ODL_bgris
|
refs/heads/master
|
lib/python3.5/site-packages/prompt_toolkit/keys.py
|
20
|
from __future__ import unicode_literals
__all__ = (
'Key',
'Keys',
)
class Key(object):
def __init__(self, name):
#: Descriptive way of writing keys in configuration files. e.g. <C-A>
#: for ``Control-A``.
self.name = name
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.name)
class Keys(object):
Escape = Key('<Escape>')
ControlA = Key('<C-A>')
ControlB = Key('<C-B>')
ControlC = Key('<C-C>')
ControlD = Key('<C-D>')
ControlE = Key('<C-E>')
ControlF = Key('<C-F>')
ControlG = Key('<C-G>')
ControlH = Key('<C-H>')
ControlI = Key('<C-I>') # Tab
ControlJ = Key('<C-J>') # Enter
ControlK = Key('<C-K>')
ControlL = Key('<C-L>')
ControlM = Key('<C-M>') # Enter
ControlN = Key('<C-N>')
ControlO = Key('<C-O>')
ControlP = Key('<C-P>')
ControlQ = Key('<C-Q>')
ControlR = Key('<C-R>')
ControlS = Key('<C-S>')
ControlT = Key('<C-T>')
ControlU = Key('<C-U>')
ControlV = Key('<C-V>')
ControlW = Key('<C-W>')
ControlX = Key('<C-X>')
ControlY = Key('<C-Y>')
ControlZ = Key('<C-Z>')
ControlSpace = Key('<C-Space>')
ControlBackslash = Key('<C-Backslash>')
ControlSquareClose = Key('<C-SquareClose>')
ControlCircumflex = Key('<C-Circumflex>')
ControlUnderscore = Key('<C-Underscore>')
ControlLeft = Key('<C-Left>')
ControlRight = Key('<C-Right>')
ControlUp = Key('<C-Up>')
ControlDown = Key('<C-Down>')
Up = Key('<Up>')
Down = Key('<Down>')
Right = Key('<Right>')
Left = Key('<Left>')
ShiftLeft = Key('<ShiftLeft>')
ShiftUp = Key('<ShiftUp>')
ShiftDown = Key('<ShiftDown>')
ShiftRight = Key('<ShiftRight>')
Home = Key('<Home>')
End = Key('<End>')
Delete = Key('<Delete>')
ShiftDelete = Key('<ShiftDelete>')
ControlDelete = Key('<C-Delete>')
PageUp = Key('<PageUp>')
PageDown = Key('<PageDown>')
BackTab = Key('<BackTab>') # shift + tab
Insert = Key('<Insert>')
Backspace = Key('<Backspace>')
# Aliases.
Tab = ControlI
Enter = ControlJ
# XXX: Actually Enter equals ControlM, not ControlJ,
# However, in prompt_toolkit, we made the mistake of translating
# \r into \n during the input, so everyone is now handling the
# enter key by binding ControlJ.
# From now on, it's better to bind `Keys.Enter` everywhere,
# because that's future compatible, and will still work when we
# stop replacing \r by \n.
F1 = Key('<F1>')
F2 = Key('<F2>')
F3 = Key('<F3>')
F4 = Key('<F4>')
F5 = Key('<F5>')
F6 = Key('<F6>')
F7 = Key('<F7>')
F8 = Key('<F8>')
F9 = Key('<F9>')
F10 = Key('<F10>')
F11 = Key('<F11>')
F12 = Key('<F12>')
F13 = Key('<F13>')
F14 = Key('<F14>')
F15 = Key('<F15>')
F16 = Key('<F16>')
F17 = Key('<F17>')
F18 = Key('<F18>')
F19 = Key('<F19>')
F20 = Key('<F20>')
F21 = Key('<F21>')
F22 = Key('<F22>')
F23 = Key('<F23>')
F24 = Key('<F24>')
# Matches any key.
Any = Key('<Any>')
# Special
CPRResponse = Key('<Cursor-Position-Response>')
Vt100MouseEvent = Key('<Vt100-Mouse-Event>')
WindowsMouseEvent = Key('<Windows-Mouse-Event>')
BracketedPaste = Key('<Bracketed-Paste>')
# Key which is ignored. (The key binding for this key should not do
# anything.)
Ignore = Key('<Ignore>')
|
sevansahumIlovemuhammad/grit-i18n
|
refs/heads/master
|
grit/format/resource_map.py
|
27
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''This file contains item formatters for resource_map_header and
resource_map_source files. A resource map is a mapping between resource names
(string) and the internal resource ID.'''
import os
from functools import partial
from grit import util
def GetFormatter(type):
if type == 'resource_map_header':
return _FormatHeader
elif type == 'resource_map_source':
return partial(_FormatSource, _GetItemName)
elif type == 'resource_file_map_source':
return partial(_FormatSource, _GetItemPath)
def GetMapName(root):
'''Get the name of the resource map based on the header file name. E.g.,
if our header filename is theme_resources.h, we name our resource map
kThemeResourcesMap.
|root| is the grd file root.'''
outputs = root.GetOutputFiles()
rc_header_file = None
for output in outputs:
if 'rc_header' == output.GetType():
rc_header_file = output.GetFilename()
if not rc_header_file:
raise Exception('unable to find resource header filename')
filename = os.path.splitext(os.path.split(rc_header_file)[1])[0]
filename = filename[0].upper() + filename[1:]
while filename.find('_') != -1:
pos = filename.find('_')
if pos >= len(filename):
break
filename = filename[:pos] + filename[pos + 1].upper() + filename[pos + 2:]
return 'k' + filename
def _FormatHeader(root, lang='en', output_dir='.'):
'''Create the header file for the resource mapping. This file just declares
an array of name/value pairs.'''
return '''\
// This file is automatically generated by GRIT. Do not edit.
#include <stddef.h>
#ifndef GRIT_RESOURCE_MAP_STRUCT_
#define GRIT_RESOURCE_MAP_STRUCT_
struct GritResourceMap {
const char* const name;
int value;
};
#endif // GRIT_RESOURCE_MAP_STRUCT_
extern const GritResourceMap %(map_name)s[];
extern const size_t %(map_name)sSize;
''' % { 'map_name': GetMapName(root) }
def _FormatSourceHeader(root):
'''Create the header of the C++ source file for the resource mapping.'''
rc_header_file = None
map_header_file = None
for output in root.GetOutputFiles():
if 'rc_header' == output.GetType():
rc_header_file = output.GetFilename()
elif 'resource_map_header' == output.GetType():
map_header_file = output.GetFilename()
if not rc_header_file or not map_header_file:
raise Exception('resource_map_source output type requires '
'resource_map_header and rc_header outputs')
return '''\
// This file is automatically generated by GRIT. Do not edit.
#include "%(map_header_file)s"
#include "base/basictypes.h"
#include "%(rc_header_file)s"
const GritResourceMap %(map_name)s[] = {
''' % { 'map_header_file': map_header_file,
'rc_header_file': rc_header_file,
'map_name': GetMapName(root),
}
def _FormatSourceFooter(root):
# Return the footer text.
return '''\
};
const size_t %(map_name)sSize = arraysize(%(map_name)s);
''' % { 'map_name': GetMapName(root) }
def _FormatSource(get_key, root, lang, output_dir):
from grit.format import rc_header
from grit.node import include, structure, message
yield _FormatSourceHeader(root)
tids = rc_header.GetIds(root)
seen = set()
active_descendants = [item for item in root.ActiveDescendants()]
output_all_resource_defines = root.ShouldOutputAllResourceDefines()
for item in root:
if not item.IsResourceMapSource():
continue
key = get_key(item)
tid = item.attrs['name']
if tid not in tids or key in seen:
continue
seen.add(key)
if item.GeneratesResourceMapEntry(output_all_resource_defines,
item in active_descendants):
yield ' {"%s", %s},\n' % (key, tid)
yield _FormatSourceFooter(root)
def _GetItemName(item):
return item.attrs['name']
def _GetItemPath(item):
return item.GetInputPath().replace("\\", "/")
|
zac1st1k/CodeIgniter
|
refs/heads/master
|
user_guide_src/source/_themes/sphinx_rtd_theme/__init__.py
|
1504
|
"""Sphinx ReadTheDocs theme.
From https://github.com/ryan-roemer/sphinx-bootstrap-theme.
"""
import os
VERSION = (0, 1, 5)
__version__ = ".".join(str(v) for v in VERSION)
__version_full__ = __version__
def get_html_theme_path():
"""Return list of HTML theme paths."""
cur_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
return cur_dir
|
oihane/odoo
|
refs/heads/8.0
|
addons/project_timesheet/project_timesheet.py
|
237
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import datetime
from openerp.osv import fields, osv
from openerp import tools
from openerp.tools.translate import _
class project_project(osv.osv):
_inherit = 'project.project'
def onchange_partner_id(self, cr, uid, ids, part=False, context=None):
res = super(project_project, self).onchange_partner_id(cr, uid, ids, part, context)
if part and res and ('value' in res):
# set Invoice Task Work to 100%
data_obj = self.pool.get('ir.model.data')
data_id = data_obj._get_id(cr, uid, 'hr_timesheet_invoice', 'timesheet_invoice_factor1')
if data_id:
factor_id = data_obj.browse(cr, uid, data_id).res_id
res['value'].update({'to_invoice': factor_id})
return res
_defaults = {
'use_timesheets': True,
}
def open_timesheets(self, cr, uid, ids, context=None):
""" open Timesheets view """
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
project = self.browse(cr, uid, ids[0], context)
view_context = {
'search_default_account_id': [project.analytic_account_id.id],
'default_account_id': project.analytic_account_id.id,
}
help = _("""<p class="oe_view_nocontent_create">Record your timesheets for the project '%s'.</p>""") % (project.name,)
try:
if project.to_invoice and project.partner_id:
help+= _("""<p>Timesheets on this project may be invoiced to %s, according to the terms defined in the contract.</p>""" ) % (project.partner_id.name,)
except:
# if the user do not have access rights on the partner
pass
res = mod_obj.get_object_reference(cr, uid, 'hr_timesheet', 'act_hr_timesheet_line_evry1_all_form')
id = res and res[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
result['name'] = _('Timesheets')
result['context'] = view_context
result['help'] = help
return result
class project_work(osv.osv):
_inherit = "project.task.work"
def get_user_related_details(self, cr, uid, user_id):
res = {}
emp_obj = self.pool.get('hr.employee')
emp_id = emp_obj.search(cr, uid, [('user_id', '=', user_id)])
if not emp_id:
user_name = self.pool.get('res.users').read(cr, uid, [user_id], ['name'])[0]['name']
raise osv.except_osv(_('Bad Configuration!'),
_('Please define employee for user "%s". You must create one.')% (user_name,))
emp = emp_obj.browse(cr, uid, emp_id[0])
if not emp.product_id:
raise osv.except_osv(_('Bad Configuration!'),
_('Please define product and product category property account on the related employee.\nFill in the HR Settings tab of the employee form.'))
if not emp.journal_id:
raise osv.except_osv(_('Bad Configuration!'),
_('Please define journal on the related employee.\nFill in the HR Settings tab of the employee form.'))
acc_id = emp.product_id.property_account_expense.id
if not acc_id:
acc_id = emp.product_id.categ_id.property_account_expense_categ.id
if not acc_id:
raise osv.except_osv(_('Bad Configuration!'),
_('Please define product and product category property account on the related employee.\nFill in the HR Settings of the employee form.'))
res['product_id'] = emp.product_id.id
res['journal_id'] = emp.journal_id.id
res['general_account_id'] = acc_id
res['product_uom_id'] = emp.product_id.uom_id.id
return res
def _create_analytic_entries(self, cr, uid, vals, context):
"""Create the hr analytic timesheet from project task work"""
timesheet_obj = self.pool['hr.analytic.timesheet']
task_obj = self.pool['project.task']
vals_line = {}
timeline_id = False
acc_id = False
task_obj = task_obj.browse(cr, uid, vals['task_id'], context=context)
result = self.get_user_related_details(cr, uid, vals.get('user_id', uid))
vals_line['name'] = '%s: %s' % (tools.ustr(task_obj.name), tools.ustr(vals['name'] or '/'))
vals_line['user_id'] = vals['user_id']
vals_line['product_id'] = result['product_id']
if vals.get('date'):
if len(vals['date']) > 10:
timestamp = datetime.datetime.strptime(vals['date'], tools.DEFAULT_SERVER_DATETIME_FORMAT)
ts = fields.datetime.context_timestamp(cr, uid, timestamp, context)
vals_line['date'] = ts.strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
else:
vals_line['date'] = vals['date']
# Calculate quantity based on employee's product's uom
vals_line['unit_amount'] = vals['hours']
default_uom = self.pool['res.users'].browse(cr, uid, uid, context=context).company_id.project_time_mode_id.id
if result['product_uom_id'] != default_uom:
vals_line['unit_amount'] = self.pool['product.uom']._compute_qty(cr, uid, default_uom, vals['hours'], result['product_uom_id'])
acc_id = task_obj.project_id and task_obj.project_id.analytic_account_id.id or acc_id
if acc_id:
vals_line['account_id'] = acc_id
res = timesheet_obj.on_change_account_id(cr, uid, False, acc_id)
if res.get('value'):
vals_line.update(res['value'])
vals_line['general_account_id'] = result['general_account_id']
vals_line['journal_id'] = result['journal_id']
vals_line['amount'] = 0.0
vals_line['product_uom_id'] = result['product_uom_id']
amount = vals_line['unit_amount']
prod_id = vals_line['product_id']
unit = False
timeline_id = timesheet_obj.create(cr, uid, vals=vals_line, context=context)
# Compute based on pricetype
amount_unit = timesheet_obj.on_change_unit_amount(cr, uid, timeline_id,
prod_id, amount, False, unit, vals_line['journal_id'], context=context)
if amount_unit and 'amount' in amount_unit.get('value',{}):
updv = { 'amount': amount_unit['value']['amount'] }
timesheet_obj.write(cr, uid, [timeline_id], updv, context=context)
return timeline_id
def create(self, cr, uid, vals, *args, **kwargs):
context = kwargs.get('context', {})
if not context.get('no_analytic_entry',False):
vals['hr_analytic_timesheet_id'] = self._create_analytic_entries(cr, uid, vals, context=context)
return super(project_work,self).create(cr, uid, vals, *args, **kwargs)
def write(self, cr, uid, ids, vals, context=None):
"""
When a project task work gets updated, handle its hr analytic timesheet.
"""
if context is None:
context = {}
timesheet_obj = self.pool.get('hr.analytic.timesheet')
uom_obj = self.pool.get('product.uom')
result = {}
if isinstance(ids, (long, int)):
ids = [ids]
for task in self.browse(cr, uid, ids, context=context):
line_id = task.hr_analytic_timesheet_id
if not line_id:
# if a record is deleted from timesheet, the line_id will become
# null because of the foreign key on-delete=set null
continue
vals_line = {}
if 'name' in vals:
vals_line['name'] = '%s: %s' % (tools.ustr(task.task_id.name), tools.ustr(vals['name'] or '/'))
if 'user_id' in vals:
vals_line['user_id'] = vals['user_id']
if 'date' in vals:
vals_line['date'] = vals['date'][:10]
if 'hours' in vals:
vals_line['unit_amount'] = vals['hours']
prod_id = vals_line.get('product_id', line_id.product_id.id) # False may be set
# Put user related details in analytic timesheet values
details = self.get_user_related_details(cr, uid, vals.get('user_id', task.user_id.id))
for field in ('product_id', 'general_account_id', 'journal_id', 'product_uom_id'):
if details.get(field, False):
vals_line[field] = details[field]
# Check if user's default UOM differs from product's UOM
user_default_uom_id = self.pool.get('res.users').browse(cr, uid, uid).company_id.project_time_mode_id.id
if details.get('product_uom_id', False) and details['product_uom_id'] != user_default_uom_id:
vals_line['unit_amount'] = uom_obj._compute_qty(cr, uid, user_default_uom_id, vals['hours'], details['product_uom_id'])
# Compute based on pricetype
amount_unit = timesheet_obj.on_change_unit_amount(cr, uid, line_id.id,
prod_id=prod_id, company_id=False,
unit_amount=vals_line['unit_amount'], unit=False, journal_id=vals_line['journal_id'], context=context)
if amount_unit and 'amount' in amount_unit.get('value',{}):
vals_line['amount'] = amount_unit['value']['amount']
if vals_line:
self.pool.get('hr.analytic.timesheet').write(cr, uid, [line_id.id], vals_line, context=context)
return super(project_work,self).write(cr, uid, ids, vals, context)
def unlink(self, cr, uid, ids, *args, **kwargs):
hat_obj = self.pool.get('hr.analytic.timesheet')
hat_ids = []
for task in self.browse(cr, uid, ids):
if task.hr_analytic_timesheet_id:
hat_ids.append(task.hr_analytic_timesheet_id.id)
# Delete entry from timesheet too while deleting entry to task.
if hat_ids:
hat_obj.unlink(cr, uid, hat_ids, *args, **kwargs)
return super(project_work,self).unlink(cr, uid, ids, *args, **kwargs)
_columns={
'hr_analytic_timesheet_id':fields.many2one('hr.analytic.timesheet','Related Timeline Id', ondelete='set null'),
}
class task(osv.osv):
_inherit = "project.task"
def unlink(self, cr, uid, ids, *args, **kwargs):
for task_obj in self.browse(cr, uid, ids, *args, **kwargs):
if task_obj.work_ids:
work_ids = [x.id for x in task_obj.work_ids]
self.pool.get('project.task.work').unlink(cr, uid, work_ids, *args, **kwargs)
return super(task,self).unlink(cr, uid, ids, *args, **kwargs)
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
task_work_obj = self.pool['project.task.work']
acc_id = False
missing_analytic_entries = {}
if vals.get('project_id',False) or vals.get('name',False):
vals_line = {}
hr_anlytic_timesheet = self.pool.get('hr.analytic.timesheet')
if vals.get('project_id',False):
project_obj = self.pool.get('project.project').browse(cr, uid, vals['project_id'], context=context)
acc_id = project_obj.analytic_account_id.id
for task_obj in self.browse(cr, uid, ids, context=context):
if len(task_obj.work_ids):
for task_work in task_obj.work_ids:
if not task_work.hr_analytic_timesheet_id:
if acc_id :
# missing timesheet activities to generate
missing_analytic_entries[task_work.id] = {
'name' : task_work.name,
'user_id' : task_work.user_id.id,
'date' : task_work.date,
'account_id': acc_id,
'hours' : task_work.hours,
'task_id' : task_obj.id
}
continue
line_id = task_work.hr_analytic_timesheet_id.id
if vals.get('project_id',False):
vals_line['account_id'] = acc_id
if vals.get('name',False):
vals_line['name'] = '%s: %s' % (tools.ustr(vals['name']), tools.ustr(task_work.name) or '/')
hr_anlytic_timesheet.write(cr, uid, [line_id], vals_line, {})
res = super(task,self).write(cr, uid, ids, vals, context)
for task_work_id, analytic_entry in missing_analytic_entries.items():
timeline_id = task_work_obj._create_analytic_entries(cr, uid, analytic_entry, context=context)
task_work_obj.write(cr, uid, task_work_id, {'hr_analytic_timesheet_id' : timeline_id}, context=context)
return res
class res_partner(osv.osv):
_inherit = 'res.partner'
def unlink(self, cursor, user, ids, context=None):
parnter_id=self.pool.get('project.project').search(cursor, user, [('partner_id', 'in', ids)])
if parnter_id:
raise osv.except_osv(_('Invalid Action!'), _('You cannot delete a partner which is assigned to project, but you can uncheck the active box.'))
return super(res_partner,self).unlink(cursor, user, ids,
context=context)
class account_analytic_line(osv.osv):
_inherit = "account.analytic.line"
def get_product(self, cr, uid, context=None):
emp_obj = self.pool.get('hr.employee')
emp_ids = emp_obj.search(cr, uid, [('user_id', '=', uid)], context=context)
if emp_ids:
employee = emp_obj.browse(cr, uid, emp_ids, context=context)[0]
if employee.product_id:return employee.product_id.id
return False
_defaults = {'product_id': get_product,}
def on_change_account_id(self, cr, uid, ids, account_id):
res = {}
if not account_id:
return res
res.setdefault('value',{})
acc = self.pool.get('account.analytic.account').browse(cr, uid, account_id)
st = acc.to_invoice.id
res['value']['to_invoice'] = st or False
if acc.state == 'close' or acc.state == 'cancelled':
raise osv.except_osv(_('Invalid Analytic Account!'), _('You cannot select a Analytic Account which is in Close or Cancelled state.'))
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
damianmoore/django-filer
|
refs/heads/develop
|
filer/admin/tools.py
|
43
|
#-*- coding: utf-8 -*-
from django.core.exceptions import PermissionDenied
def check_files_edit_permissions(request, files):
for f in files:
if not f.has_edit_permission(request):
raise PermissionDenied
def check_folder_edit_permissions(request, folders):
for f in folders:
if not f.has_edit_permission(request):
raise PermissionDenied
check_files_edit_permissions(request, f.files)
check_folder_edit_permissions(request, f.children.all())
def check_files_read_permissions(request, files):
for f in files:
if not f.has_read_permission(request):
raise PermissionDenied
def check_folder_read_permissions(request, folders):
for f in folders:
if not f.has_read_permission(request):
raise PermissionDenied
check_files_read_permissions(request, f.files)
check_folder_read_permissions(request, f.children.all())
def userperms_for_request(item, request):
r = []
ps = ['read', 'edit', 'add_children']
for p in ps:
attr = "has_%s_permission" % p
if hasattr(item, attr):
x = getattr(item, attr)(request)
if x:
r.append(p)
return r
|
EmreAtes/spack
|
refs/heads/develop
|
lib/spack/external/yaml/lib3/yaml/events.py
|
986
|
# Abstract classes.
class Event(object):
def __init__(self, start_mark=None, end_mark=None):
self.start_mark = start_mark
self.end_mark = end_mark
def __repr__(self):
attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
if hasattr(self, key)]
arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
for key in attributes])
return '%s(%s)' % (self.__class__.__name__, arguments)
class NodeEvent(Event):
def __init__(self, anchor, start_mark=None, end_mark=None):
self.anchor = anchor
self.start_mark = start_mark
self.end_mark = end_mark
class CollectionStartEvent(NodeEvent):
def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
flow_style=None):
self.anchor = anchor
self.tag = tag
self.implicit = implicit
self.start_mark = start_mark
self.end_mark = end_mark
self.flow_style = flow_style
class CollectionEndEvent(Event):
pass
# Implementations.
class StreamStartEvent(Event):
def __init__(self, start_mark=None, end_mark=None, encoding=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.encoding = encoding
class StreamEndEvent(Event):
pass
class DocumentStartEvent(Event):
def __init__(self, start_mark=None, end_mark=None,
explicit=None, version=None, tags=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.explicit = explicit
self.version = version
self.tags = tags
class DocumentEndEvent(Event):
def __init__(self, start_mark=None, end_mark=None,
explicit=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.explicit = explicit
class AliasEvent(NodeEvent):
pass
class ScalarEvent(NodeEvent):
def __init__(self, anchor, tag, implicit, value,
start_mark=None, end_mark=None, style=None):
self.anchor = anchor
self.tag = tag
self.implicit = implicit
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
self.style = style
class SequenceStartEvent(CollectionStartEvent):
pass
class SequenceEndEvent(CollectionEndEvent):
pass
class MappingStartEvent(CollectionStartEvent):
pass
class MappingEndEvent(CollectionEndEvent):
pass
|
michaelgallacher/intellij-community
|
refs/heads/master
|
python/testData/refactoring/move/usagesOfUnqualifiedOldStyleRelativeImportsInsideMovedModule/after/src/pkg/__init__.py
|
12133432
| |
jonyroda97/redbot-amigosprovaveis
|
refs/heads/develop
|
lib/matplotlib/compat/__init__.py
|
12133432
| |
samuel1208/scikit-learn
|
refs/heads/master
|
sklearn/mixture/tests/__init__.py
|
12133432
| |
joequery/django
|
refs/heads/master
|
tests/forms_tests/widget_tests/__init__.py
|
12133432
| |
nimbis/django-cms
|
refs/heads/master
|
cms/test_utils/project/placeholderapp/migrations/__init__.py
|
12133432
| |
svenstaro/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/vyos/vyos_command.py
|
20
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: vyos_command
version_added: "2.2"
author: "Nathaniel Case (@qalthos)"
short_description: Run one or more commands on VyOS devices
description:
- The command module allows running one or more commands on remote
devices running VyOS. This module can also be introspected
to validate key parameters before returning successfully. If the
conditional statements are not met in the wait period, the task
fails.
- Certain C(show) commands in VyOS produce many lines of output and
use a custom pager that can cause this module to hang. If the
value of the environment variable C(ANSIBLE_VYOS_TERMINAL_LENGTH)
is not set, the default number of 10000 is used.
extends_documentation_fragment: vyos
options:
commands:
description:
- The ordered set of commands to execute on the remote device
running VyOS. The output from the command execution is
returned to the playbook. If the I(wait_for) argument is
provided, the module is not returned until the condition is
satisfied or the number of retries has been exceeded.
required: true
wait_for:
description:
- Specifies what to evaluate from the output of the command
and what conditionals to apply. This argument will cause
the task to wait for a particular conditional to be true
before moving forward. If the conditional is not true
by the configured I(retries), the task fails. See examples.
required: false
default: null
aliases: ['waitfor']
match:
description:
- The I(match) argument is used in conjunction with the
I(wait_for) argument to specify the match policy. Valid
values are C(all) or C(any). If the value is set to C(all)
then all conditionals in the wait_for must be satisfied. If
the value is set to C(any) then only one of the values must be
satisfied.
required: false
default: all
choices: ['any', 'all']
retries:
description:
- Specifies the number of retries a command should be tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the I(wait_for)
conditionals.
required: false
default: 10
interval:
description:
- Configures the interval in seconds to wait between I(retries)
of the command. If the command does not pass the specified
conditions, the interval indicates how long to wait before
trying the command again.
required: false
default: 1
notes:
- Running C(show system boot-messages all) will cause the module to hang since
VyOS is using a custom pager setting to display the output of that command.
"""
EXAMPLES = """
tasks:
- name: show configuration on ethernet devices eth0 and eth1
vyos_command:
commands:
- show interfaces ethernet {{ item }}
with_items:
- eth0
- eth1
- name: run multiple commands and check if version output contains specific version string
vyos_command:
commands:
- show version
- show hardware cpu
wait_for:
- "result[0] contains 'VyOS 1.1.7'"
"""
RETURN = """
stdout:
description: The set of responses from the commands
returned: always
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: The conditionals that have failed
returned: failed
type: list
sample: ['...', '...']
warnings:
description: The list of warnings (if any) generated by module based on arguments
returned: always
type: list
sample: ['...', '...']
"""
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.netcli import Conditional
from ansible.module_utils.network_common import ComplexList
from ansible.module_utils.six import string_types
from ansible.module_utils.vyos import run_commands
from ansible.module_utils.vyos import vyos_argument_spec, check_args
def to_lines(stdout):
for item in stdout:
if isinstance(item, string_types):
item = str(item).split('\n')
yield item
def parse_commands(module, warnings):
command = ComplexList(dict(
command=dict(key=True),
prompt=dict(),
answer=dict(),
), module)
commands = command(module.params['commands'])
for index, cmd in enumerate(commands):
if module.check_mode and not cmd['command'].startswith('show'):
warnings.append('only show commands are supported when using '
'check mode, not executing `%s`' % cmd['command'])
commands[index] = module.jsonify(cmd)
return commands
def main():
spec = dict(
commands=dict(type='list', required=True),
wait_for=dict(type='list', aliases=['waitfor']),
match=dict(default='all', choices=['all', 'any']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
spec.update(vyos_argument_spec)
module = AnsibleModule(argument_spec=spec, supports_check_mode=True)
warnings = list()
check_args(module, warnings)
commands = parse_commands(module, warnings)
wait_for = module.params['wait_for'] or list()
try:
conditionals = [Conditional(c) for c in wait_for]
except AttributeError:
exc = get_exception()
module.fail_json(msg=str(exc))
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
for _ in range(retries):
responses = run_commands(module, commands)
for item in conditionals:
if item(responses):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not been satisfied'
module.fail_json(msg=msg, falied_conditions=failed_conditions)
result = {
'changed': False,
'stdout': responses,
'warnings': warnings,
'stdout_lines': list(to_lines(responses)),
}
module.exit_json(**result)
if __name__ == '__main__':
main()
|
lrdawg99/bigbyte
|
refs/heads/master
|
bigbyte/node_modules/cordova/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/input.py
|
64
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from compiler.ast import Const
from compiler.ast import Dict
from compiler.ast import Discard
from compiler.ast import List
from compiler.ast import Module
from compiler.ast import Node
from compiler.ast import Stmt
import compiler
import copy
import gyp.common
import multiprocessing
import optparse
import os.path
import re
import shlex
import signal
import subprocess
import sys
import threading
import time
from gyp.common import GypError
# A list of types that are treated as linkable.
linkable_types = ['executable', 'shared_library', 'loadable_module']
# A list of sections that contain links to other targets.
dependency_sections = ['dependencies', 'export_dependent_settings']
# base_path_sections is a list of sections defined by GYP that contain
# pathnames. The generators can provide more keys, the two lists are merged
# into path_sections, but you should call IsPathSection instead of using either
# list directly.
base_path_sections = [
'destination',
'files',
'include_dirs',
'inputs',
'libraries',
'outputs',
'sources',
]
path_sections = []
is_path_section_charset = set('=+?!')
is_path_section_match_re = re.compile('_(dir|file|path)s?$')
def IsPathSection(section):
# If section ends in one of these characters, it's applied to a section
# without the trailing characters. '/' is notably absent from this list,
# because there's no way for a regular expression to be treated as a path.
while section[-1:] in is_path_section_charset:
section = section[:-1]
return section in path_sections or is_path_section_match_re.search(section)
# base_non_configuraiton_keys is a list of key names that belong in the target
# itself and should not be propagated into its configurations. It is merged
# with a list that can come from the generator to
# create non_configuration_keys.
base_non_configuration_keys = [
# Sections that must exist inside targets and not configurations.
'actions',
'configurations',
'copies',
'default_configuration',
'dependencies',
'dependencies_original',
'link_languages',
'libraries',
'postbuilds',
'product_dir',
'product_extension',
'product_name',
'product_prefix',
'rules',
'run_as',
'sources',
'standalone_static_library',
'suppress_wildcard',
'target_name',
'toolset',
'toolsets',
'type',
'variants',
# Sections that can be found inside targets or configurations, but that
# should not be propagated from targets into their configurations.
'variables',
]
non_configuration_keys = []
# Keys that do not belong inside a configuration dictionary.
invalid_configuration_keys = [
'actions',
'all_dependent_settings',
'configurations',
'dependencies',
'direct_dependent_settings',
'libraries',
'link_settings',
'sources',
'standalone_static_library',
'target_name',
'type',
]
# Controls how the generator want the build file paths.
absolute_build_file_paths = False
# Controls whether or not the generator supports multiple toolsets.
multiple_toolsets = False
def GetIncludedBuildFiles(build_file_path, aux_data, included=None):
"""Return a list of all build files included into build_file_path.
The returned list will contain build_file_path as well as all other files
that it included, either directly or indirectly. Note that the list may
contain files that were included into a conditional section that evaluated
to false and was not merged into build_file_path's dict.
aux_data is a dict containing a key for each build file or included build
file. Those keys provide access to dicts whose "included" keys contain
lists of all other files included by the build file.
included should be left at its default None value by external callers. It
is used for recursion.
The returned list will not contain any duplicate entries. Each build file
in the list will be relative to the current directory.
"""
if included == None:
included = []
if build_file_path in included:
return included
included.append(build_file_path)
for included_build_file in aux_data[build_file_path].get('included', []):
GetIncludedBuildFiles(included_build_file, aux_data, included)
return included
def CheckedEval(file_contents):
"""Return the eval of a gyp file.
The gyp file is restricted to dictionaries and lists only, and
repeated keys are not allowed.
Note that this is slower than eval() is.
"""
ast = compiler.parse(file_contents)
assert isinstance(ast, Module)
c1 = ast.getChildren()
assert c1[0] is None
assert isinstance(c1[1], Stmt)
c2 = c1[1].getChildren()
assert isinstance(c2[0], Discard)
c3 = c2[0].getChildren()
assert len(c3) == 1
return CheckNode(c3[0], [])
def CheckNode(node, keypath):
if isinstance(node, Dict):
c = node.getChildren()
dict = {}
for n in range(0, len(c), 2):
assert isinstance(c[n], Const)
key = c[n].getChildren()[0]
if key in dict:
raise GypError("Key '" + key + "' repeated at level " +
repr(len(keypath) + 1) + " with key path '" +
'.'.join(keypath) + "'")
kp = list(keypath) # Make a copy of the list for descending this node.
kp.append(key)
dict[key] = CheckNode(c[n + 1], kp)
return dict
elif isinstance(node, List):
c = node.getChildren()
children = []
for index, child in enumerate(c):
kp = list(keypath) # Copy list.
kp.append(repr(index))
children.append(CheckNode(child, kp))
return children
elif isinstance(node, Const):
return node.getChildren()[0]
else:
raise TypeError, "Unknown AST node at key path '" + '.'.join(keypath) + \
"': " + repr(node)
def LoadOneBuildFile(build_file_path, data, aux_data, variables, includes,
is_target, check):
if build_file_path in data:
return data[build_file_path]
if os.path.exists(build_file_path):
build_file_contents = open(build_file_path).read()
else:
raise GypError("%s not found (cwd: %s)" % (build_file_path, os.getcwd()))
build_file_data = None
try:
if check:
build_file_data = CheckedEval(build_file_contents)
else:
build_file_data = eval(build_file_contents, {'__builtins__': None},
None)
except SyntaxError, e:
e.filename = build_file_path
raise
except Exception, e:
gyp.common.ExceptionAppend(e, 'while reading ' + build_file_path)
raise
data[build_file_path] = build_file_data
aux_data[build_file_path] = {}
# Scan for includes and merge them in.
try:
if is_target:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, variables, includes, check)
else:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, variables, None, check)
except Exception, e:
gyp.common.ExceptionAppend(e,
'while reading includes of ' + build_file_path)
raise
return build_file_data
def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data,
variables, includes, check):
includes_list = []
if includes != None:
includes_list.extend(includes)
if 'includes' in subdict:
for include in subdict['includes']:
# "include" is specified relative to subdict_path, so compute the real
# path to include by appending the provided "include" to the directory
# in which subdict_path resides.
relative_include = \
os.path.normpath(os.path.join(os.path.dirname(subdict_path), include))
includes_list.append(relative_include)
# Unhook the includes list, it's no longer needed.
del subdict['includes']
# Merge in the included files.
for include in includes_list:
if not 'included' in aux_data[subdict_path]:
aux_data[subdict_path]['included'] = []
aux_data[subdict_path]['included'].append(include)
gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'", include)
MergeDicts(subdict,
LoadOneBuildFile(include, data, aux_data, variables, None,
False, check),
subdict_path, include)
# Recurse into subdictionaries.
for k, v in subdict.iteritems():
if v.__class__ == dict:
LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data, variables,
None, check)
elif v.__class__ == list:
LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data, variables,
check)
# This recurses into lists so that it can look for dicts.
def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data,
variables, check):
for item in sublist:
if item.__class__ == dict:
LoadBuildFileIncludesIntoDict(item, sublist_path, data, aux_data,
variables, None, check)
elif item.__class__ == list:
LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data,
variables, check)
# Processes toolsets in all the targets. This recurses into condition entries
# since they can contain toolsets as well.
def ProcessToolsetsInDict(data):
if 'targets' in data:
target_list = data['targets']
new_target_list = []
for target in target_list:
# If this target already has an explicit 'toolset', and no 'toolsets'
# list, don't modify it further.
if 'toolset' in target and 'toolsets' not in target:
new_target_list.append(target)
continue
if multiple_toolsets:
toolsets = target.get('toolsets', ['target'])
else:
toolsets = ['target']
# Make sure this 'toolsets' definition is only processed once.
if 'toolsets' in target:
del target['toolsets']
if len(toolsets) > 0:
# Optimization: only do copies if more than one toolset is specified.
for build in toolsets[1:]:
new_target = copy.deepcopy(target)
new_target['toolset'] = build
new_target_list.append(new_target)
target['toolset'] = toolsets[0]
new_target_list.append(target)
data['targets'] = new_target_list
if 'conditions' in data:
for condition in data['conditions']:
if isinstance(condition, list):
for condition_dict in condition[1:]:
ProcessToolsetsInDict(condition_dict)
# TODO(mark): I don't love this name. It just means that it's going to load
# a build file that contains targets and is expected to provide a targets dict
# that contains the targets...
def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes,
depth, check, load_dependencies):
# If depth is set, predefine the DEPTH variable to be a relative path from
# this build file's directory to the directory identified by depth.
if depth:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
d = gyp.common.RelativePath(depth, os.path.dirname(build_file_path))
if d == '':
variables['DEPTH'] = '.'
else:
variables['DEPTH'] = d.replace('\\', '/')
# If the generator needs absolue paths, then do so.
if absolute_build_file_paths:
build_file_path = os.path.abspath(build_file_path)
if build_file_path in data['target_build_files']:
# Already loaded.
return False
data['target_build_files'].add(build_file_path)
gyp.DebugOutput(gyp.DEBUG_INCLUDES,
"Loading Target Build File '%s'", build_file_path)
build_file_data = LoadOneBuildFile(build_file_path, data, aux_data, variables,
includes, True, check)
# Store DEPTH for later use in generators.
build_file_data['_DEPTH'] = depth
# Set up the included_files key indicating which .gyp files contributed to
# this target dict.
if 'included_files' in build_file_data:
raise GypError(build_file_path + ' must not contain included_files key')
included = GetIncludedBuildFiles(build_file_path, aux_data)
build_file_data['included_files'] = []
for included_file in included:
# included_file is relative to the current directory, but it needs to
# be made relative to build_file_path's directory.
included_relative = \
gyp.common.RelativePath(included_file,
os.path.dirname(build_file_path))
build_file_data['included_files'].append(included_relative)
# Do a first round of toolsets expansion so that conditions can be defined
# per toolset.
ProcessToolsetsInDict(build_file_data)
# Apply "pre"/"early" variable expansions and condition evaluations.
ProcessVariablesAndConditionsInDict(
build_file_data, PHASE_EARLY, variables, build_file_path)
# Since some toolsets might have been defined conditionally, perform
# a second round of toolsets expansion now.
ProcessToolsetsInDict(build_file_data)
# Look at each project's target_defaults dict, and merge settings into
# targets.
if 'target_defaults' in build_file_data:
if 'targets' not in build_file_data:
raise GypError("Unable to find targets in build file %s" %
build_file_path)
index = 0
while index < len(build_file_data['targets']):
# This procedure needs to give the impression that target_defaults is
# used as defaults, and the individual targets inherit from that.
# The individual targets need to be merged into the defaults. Make
# a deep copy of the defaults for each target, merge the target dict
# as found in the input file into that copy, and then hook up the
# copy with the target-specific data merged into it as the replacement
# target dict.
old_target_dict = build_file_data['targets'][index]
new_target_dict = copy.deepcopy(build_file_data['target_defaults'])
MergeDicts(new_target_dict, old_target_dict,
build_file_path, build_file_path)
build_file_data['targets'][index] = new_target_dict
index += 1
# No longer needed.
del build_file_data['target_defaults']
# Look for dependencies. This means that dependency resolution occurs
# after "pre" conditionals and variable expansion, but before "post" -
# in other words, you can't put a "dependencies" section inside a "post"
# conditional within a target.
dependencies = []
if 'targets' in build_file_data:
for target_dict in build_file_data['targets']:
if 'dependencies' not in target_dict:
continue
for dependency in target_dict['dependencies']:
dependencies.append(
gyp.common.ResolveTarget(build_file_path, dependency, None)[0])
if load_dependencies:
for dependency in dependencies:
try:
LoadTargetBuildFile(dependency, data, aux_data, variables,
includes, depth, check, load_dependencies)
except Exception, e:
gyp.common.ExceptionAppend(
e, 'while loading dependencies of %s' % build_file_path)
raise
else:
return (build_file_path, dependencies)
def CallLoadTargetBuildFile(global_flags,
build_file_path, data,
aux_data, variables,
includes, depth, check):
"""Wrapper around LoadTargetBuildFile for parallel processing.
This wrapper is used when LoadTargetBuildFile is executed in
a worker process.
"""
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Apply globals so that the worker process behaves the same.
for key, value in global_flags.iteritems():
globals()[key] = value
# Save the keys so we can return data that changed.
data_keys = set(data)
aux_data_keys = set(aux_data)
result = LoadTargetBuildFile(build_file_path, data,
aux_data, variables,
includes, depth, check, False)
if not result:
return result
(build_file_path, dependencies) = result
data_out = {}
for key in data:
if key == 'target_build_files':
continue
if key not in data_keys:
data_out[key] = data[key]
aux_data_out = {}
for key in aux_data:
if key not in aux_data_keys:
aux_data_out[key] = aux_data[key]
# This gets serialized and sent back to the main process via a pipe.
# It's handled in LoadTargetBuildFileCallback.
return (build_file_path,
data_out,
aux_data_out,
dependencies)
except Exception, e:
print >>sys.stderr, 'Exception: ', e
return None
class ParallelProcessingError(Exception):
pass
class ParallelState(object):
"""Class to keep track of state when processing input files in parallel.
If build files are loaded in parallel, use this to keep track of
state during farming out and processing parallel jobs. It's stored
in a global so that the callback function can have access to it.
"""
def __init__(self):
# The multiprocessing pool.
self.pool = None
# The condition variable used to protect this object and notify
# the main loop when there might be more data to process.
self.condition = None
# The "data" dict that was passed to LoadTargetBuildFileParallel
self.data = None
# The "aux_data" dict that was passed to LoadTargetBuildFileParallel
self.aux_data = None
# The number of parallel calls outstanding; decremented when a response
# was received.
self.pending = 0
# The set of all build files that have been scheduled, so we don't
# schedule the same one twice.
self.scheduled = set()
# A list of dependency build file paths that haven't been scheduled yet.
self.dependencies = []
# Flag to indicate if there was an error in a child process.
self.error = False
def LoadTargetBuildFileCallback(self, result):
"""Handle the results of running LoadTargetBuildFile in another process.
"""
self.condition.acquire()
if not result:
self.error = True
self.condition.notify()
self.condition.release()
return
(build_file_path0, data0, aux_data0, dependencies0) = result
self.data['target_build_files'].add(build_file_path0)
for key in data0:
self.data[key] = data0[key]
for key in aux_data0:
self.aux_data[key] = aux_data0[key]
for new_dependency in dependencies0:
if new_dependency not in self.scheduled:
self.scheduled.add(new_dependency)
self.dependencies.append(new_dependency)
self.pending -= 1
self.condition.notify()
self.condition.release()
def LoadTargetBuildFileParallel(build_file_path, data, aux_data,
variables, includes, depth, check):
parallel_state = ParallelState()
parallel_state.condition = threading.Condition()
parallel_state.dependencies = [build_file_path]
parallel_state.scheduled = set([build_file_path])
parallel_state.pending = 0
parallel_state.data = data
parallel_state.aux_data = aux_data
try:
parallel_state.condition.acquire()
while parallel_state.dependencies or parallel_state.pending:
if parallel_state.error:
print >>sys.stderr, (
'\n'
'Note: an error occurred while running gyp using multiprocessing.\n'
'For more verbose output, set GYP_PARALLEL=0 in your environment.\n'
'If the error only occurs when GYP_PARALLEL=1, '
'please report a bug!')
break
if not parallel_state.dependencies:
parallel_state.condition.wait()
continue
dependency = parallel_state.dependencies.pop()
parallel_state.pending += 1
data_in = {}
data_in['target_build_files'] = data['target_build_files']
aux_data_in = {}
global_flags = {
'path_sections': globals()['path_sections'],
'non_configuration_keys': globals()['non_configuration_keys'],
'absolute_build_file_paths': globals()['absolute_build_file_paths'],
'multiple_toolsets': globals()['multiple_toolsets']}
if not parallel_state.pool:
parallel_state.pool = multiprocessing.Pool(8)
parallel_state.pool.apply_async(
CallLoadTargetBuildFile,
args = (global_flags, dependency,
data_in, aux_data_in,
variables, includes, depth, check),
callback = parallel_state.LoadTargetBuildFileCallback)
except KeyboardInterrupt, e:
parallel_state.pool.terminate()
raise e
parallel_state.condition.release()
if parallel_state.error:
sys.exit()
# Look for the bracket that matches the first bracket seen in a
# string, and return the start and end as a tuple. For example, if
# the input is something like "<(foo <(bar)) blah", then it would
# return (1, 13), indicating the entire string except for the leading
# "<" and trailing " blah".
LBRACKETS= set('{[(')
BRACKETS = {'}': '{', ']': '[', ')': '('}
def FindEnclosingBracketGroup(input_str):
stack = []
start = -1
for index, char in enumerate(input_str):
if char in LBRACKETS:
stack.append(char)
if start == -1:
start = index
elif char in BRACKETS:
if not stack:
return (-1, -1)
if stack.pop() != BRACKETS[char]:
return (-1, -1)
if not stack:
return (start, index + 1)
return (-1, -1)
canonical_int_re = re.compile('(0|-?[1-9][0-9]*)$')
def IsStrCanonicalInt(string):
"""Returns True if |string| is in its canonical integer form.
The canonical form is such that str(int(string)) == string.
"""
return isinstance(string, str) and canonical_int_re.match(string)
# This matches things like "<(asdf)", "<!(cmd)", "<!@(cmd)", "<|(list)",
# "<!interpreter(arguments)", "<([list])", and even "<([)" and "<(<())".
# In the last case, the inner "<()" is captured in match['content'].
early_variable_re = re.compile(
'(?P<replace>(?P<type><(?:(?:!?@?)|\|)?)'
'(?P<command_string>[-a-zA-Z0-9_.]+)?'
'\((?P<is_array>\s*\[?)'
'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '>' instead of '<'.
late_variable_re = re.compile(
'(?P<replace>(?P<type>>(?:(?:!?@?)|\|)?)'
'(?P<command_string>[-a-zA-Z0-9_.]+)?'
'\((?P<is_array>\s*\[?)'
'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '^' instead of '<'.
latelate_variable_re = re.compile(
'(?P<replace>(?P<type>[\^](?:(?:!?@?)|\|)?)'
'(?P<command_string>[-a-zA-Z0-9_.]+)?'
'\((?P<is_array>\s*\[?)'
'(?P<content>.*?)(\]?)\))')
# Global cache of results from running commands so they don't have to be run
# more then once.
cached_command_results = {}
def FixupPlatformCommand(cmd):
if sys.platform == 'win32':
if type(cmd) == list:
cmd = [re.sub('^cat ', 'type ', cmd[0])] + cmd[1:]
else:
cmd = re.sub('^cat ', 'type ', cmd)
return cmd
PHASE_EARLY = 0
PHASE_LATE = 1
PHASE_LATELATE = 2
def ExpandVariables(input, phase, variables, build_file):
# Look for the pattern that gets expanded into variables
if phase == PHASE_EARLY:
variable_re = early_variable_re
expansion_symbol = '<'
elif phase == PHASE_LATE:
variable_re = late_variable_re
expansion_symbol = '>'
elif phase == PHASE_LATELATE:
variable_re = latelate_variable_re
expansion_symbol = '^'
else:
assert False
input_str = str(input)
if IsStrCanonicalInt(input_str):
return int(input_str)
# Do a quick scan to determine if an expensive regex search is warranted.
if expansion_symbol not in input_str:
return input_str
# Get the entire list of matches as a list of MatchObject instances.
# (using findall here would return strings instead of MatchObjects).
matches = list(variable_re.finditer(input_str))
if not matches:
return input_str
output = input_str
# Reverse the list of matches so that replacements are done right-to-left.
# That ensures that earlier replacements won't mess up the string in a
# way that causes later calls to find the earlier substituted text instead
# of what's intended for replacement.
matches.reverse()
for match_group in matches:
match = match_group.groupdict()
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Matches: %r", match)
# match['replace'] is the substring to look for, match['type']
# is the character code for the replacement type (< > <! >! <| >| <@
# >@ <!@ >!@), match['is_array'] contains a '[' for command
# arrays, and match['content'] is the name of the variable (< >)
# or command to run (<! >!). match['command_string'] is an optional
# command string. Currently, only 'pymod_do_main' is supported.
# run_command is true if a ! variant is used.
run_command = '!' in match['type']
command_string = match['command_string']
# file_list is true if a | variant is used.
file_list = '|' in match['type']
# Capture these now so we can adjust them later.
replace_start = match_group.start('replace')
replace_end = match_group.end('replace')
# Find the ending paren, and re-evaluate the contained string.
(c_start, c_end) = FindEnclosingBracketGroup(input_str[replace_start:])
# Adjust the replacement range to match the entire command
# found by FindEnclosingBracketGroup (since the variable_re
# probably doesn't match the entire command if it contained
# nested variables).
replace_end = replace_start + c_end
# Find the "real" replacement, matching the appropriate closing
# paren, and adjust the replacement start and end.
replacement = input_str[replace_start:replace_end]
# Figure out what the contents of the variable parens are.
contents_start = replace_start + c_start + 1
contents_end = replace_end - 1
contents = input_str[contents_start:contents_end]
# Do filter substitution now for <|().
# Admittedly, this is different than the evaluation order in other
# contexts. However, since filtration has no chance to run on <|(),
# this seems like the only obvious way to give them access to filters.
if file_list:
processed_variables = copy.deepcopy(variables)
ProcessListFiltersInDict(contents, processed_variables)
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase,
processed_variables, build_file)
else:
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase, variables, build_file)
# Strip off leading/trailing whitespace so that variable matches are
# simpler below (and because they are rarely needed).
contents = contents.strip()
# expand_to_list is true if an @ variant is used. In that case,
# the expansion should result in a list. Note that the caller
# is to be expecting a list in return, and not all callers do
# because not all are working in list context. Also, for list
# expansions, there can be no other text besides the variable
# expansion in the input string.
expand_to_list = '@' in match['type'] and input_str == replacement
if run_command or file_list:
# Find the build file's directory, so commands can be run or file lists
# generated relative to it.
build_file_dir = os.path.dirname(build_file)
if build_file_dir == '':
# If build_file is just a leaf filename indicating a file in the
# current directory, build_file_dir might be an empty string. Set
# it to None to signal to subprocess.Popen that it should run the
# command in the current directory.
build_file_dir = None
# Support <|(listfile.txt ...) which generates a file
# containing items from a gyp list, generated at gyp time.
# This works around actions/rules which have more inputs than will
# fit on the command line.
if file_list:
if type(contents) == list:
contents_list = contents
else:
contents_list = contents.split(' ')
replacement = contents_list[0]
path = replacement
if not os.path.isabs(path):
path = os.path.join(build_file_dir, path)
f = gyp.common.WriteOnDiff(path)
for i in contents_list[1:]:
f.write('%s\n' % i)
f.close()
elif run_command:
use_shell = True
if match['is_array']:
contents = eval(contents)
use_shell = False
# Check for a cached value to avoid executing commands, or generating
# file lists more than once.
# TODO(http://code.google.com/p/gyp/issues/detail?id=112): It is
# possible that the command being invoked depends on the current
# directory. For that case the syntax needs to be extended so that the
# directory is also used in cache_key (it becomes a tuple).
# TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory,
# someone could author a set of GYP files where each time the command
# is invoked it produces different output by design. When the need
# arises, the syntax should be extended to support no caching off a
# command's output so it is run every time.
cache_key = str(contents)
cached_value = cached_command_results.get(cache_key, None)
if cached_value is None:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Executing command '%s' in directory '%s'",
contents, build_file_dir)
replacement = ''
if command_string == 'pymod_do_main':
# <!pymod_do_main(modulename param eters) loads |modulename| as a
# python module and then calls that module's DoMain() function,
# passing ["param", "eters"] as a single list argument. For modules
# that don't load quickly, this can be faster than
# <!(python modulename param eters). Do this in |build_file_dir|.
oldwd = os.getcwd() # Python doesn't like os.open('.'): no fchdir.
os.chdir(build_file_dir)
try:
parsed_contents = shlex.split(contents)
try:
py_module = __import__(parsed_contents[0])
except ImportError as e:
raise GypError("Error importing pymod_do_main"
"module (%s): %s" % (parsed_contents[0], e))
replacement = str(py_module.DoMain(parsed_contents[1:])).rstrip()
finally:
os.chdir(oldwd)
assert replacement != None
elif command_string:
raise GypError("Unknown command string '%s' in '%s'." %
(command_string, contents))
else:
# Fix up command with platform specific workarounds.
contents = FixupPlatformCommand(contents)
p = subprocess.Popen(contents, shell=use_shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=build_file_dir)
p_stdout, p_stderr = p.communicate('')
if p.wait() != 0 or p_stderr:
sys.stderr.write(p_stderr)
# Simulate check_call behavior, since check_call only exists
# in python 2.5 and later.
raise GypError("Call to '%s' returned exit status %d." %
(contents, p.returncode))
replacement = p_stdout.rstrip()
cached_command_results[cache_key] = replacement
else:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Had cache value for command '%s' in directory '%s'",
contents,build_file_dir)
replacement = cached_value
else:
if not contents in variables:
if contents[-1] in ['!', '/']:
# In order to allow cross-compiles (nacl) to happen more naturally,
# we will allow references to >(sources/) etc. to resolve to
# and empty list if undefined. This allows actions to:
# 'action!': [
# '>@(_sources!)',
# ],
# 'action/': [
# '>@(_sources/)',
# ],
replacement = []
else:
raise GypError('Undefined variable ' + contents +
' in ' + build_file)
else:
replacement = variables[contents]
if isinstance(replacement, list):
for item in replacement:
if (not contents[-1] == '/' and
not isinstance(item, str) and not isinstance(item, int)):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'list contains a ' +
item.__class__.__name__)
# Run through the list and handle variable expansions in it. Since
# the list is guaranteed not to contain dicts, this won't do anything
# with conditions sections.
ProcessVariablesAndConditionsInList(replacement, phase, variables,
build_file)
elif not isinstance(replacement, str) and \
not isinstance(replacement, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'found a ' + replacement.__class__.__name__)
if expand_to_list:
# Expanding in list context. It's guaranteed that there's only one
# replacement to do in |input_str| and that it's this replacement. See
# above.
if isinstance(replacement, list):
# If it's already a list, make a copy.
output = replacement[:]
else:
# Split it the same way sh would split arguments.
output = shlex.split(str(replacement))
else:
# Expanding in string context.
encoded_replacement = ''
if isinstance(replacement, list):
# When expanding a list into string context, turn the list items
# into a string in a way that will work with a subprocess call.
#
# TODO(mark): This isn't completely correct. This should
# call a generator-provided function that observes the
# proper list-to-argument quoting rules on a specific
# platform instead of just calling the POSIX encoding
# routine.
encoded_replacement = gyp.common.EncodePOSIXShellList(replacement)
else:
encoded_replacement = replacement
output = output[:replace_start] + str(encoded_replacement) + \
output[replace_end:]
# Prepare for the next match iteration.
input_str = output
# Look for more matches now that we've replaced some, to deal with
# expanding local variables (variables defined in the same
# variables block as this one).
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Found output %r, recursing.", output)
if isinstance(output, list):
if output and isinstance(output[0], list):
# Leave output alone if it's a list of lists.
# We don't want such lists to be stringified.
pass
else:
new_output = []
for item in output:
new_output.append(
ExpandVariables(item, phase, variables, build_file))
output = new_output
else:
output = ExpandVariables(output, phase, variables, build_file)
# Convert all strings that are canonically-represented integers into integers.
if isinstance(output, list):
for index in xrange(0, len(output)):
if IsStrCanonicalInt(output[index]):
output[index] = int(output[index])
elif IsStrCanonicalInt(output):
output = int(output)
return output
def ProcessConditionsInDict(the_dict, phase, variables, build_file):
# Process a 'conditions' or 'target_conditions' section in the_dict,
# depending on phase.
# early -> conditions
# late -> target_conditions
# latelate -> no conditions
#
# Each item in a conditions list consists of cond_expr, a string expression
# evaluated as the condition, and true_dict, a dict that will be merged into
# the_dict if cond_expr evaluates to true. Optionally, a third item,
# false_dict, may be present. false_dict is merged into the_dict if
# cond_expr evaluates to false.
#
# Any dict merged into the_dict will be recursively processed for nested
# conditionals and other expansions, also according to phase, immediately
# prior to being merged.
if phase == PHASE_EARLY:
conditions_key = 'conditions'
elif phase == PHASE_LATE:
conditions_key = 'target_conditions'
elif phase == PHASE_LATELATE:
return
else:
assert False
if not conditions_key in the_dict:
return
conditions_list = the_dict[conditions_key]
# Unhook the conditions list, it's no longer needed.
del the_dict[conditions_key]
for condition in conditions_list:
if not isinstance(condition, list):
raise GypError(conditions_key + ' must be a list')
if len(condition) != 2 and len(condition) != 3:
# It's possible that condition[0] won't work in which case this
# attempt will raise its own IndexError. That's probably fine.
raise GypError(conditions_key + ' ' + condition[0] +
' must be length 2 or 3, not ' + str(len(condition)))
[cond_expr, true_dict] = condition[0:2]
false_dict = None
if len(condition) == 3:
false_dict = condition[2]
# Do expansions on the condition itself. Since the conditon can naturally
# contain variable references without needing to resort to GYP expansion
# syntax, this is of dubious value for variables, but someone might want to
# use a command expansion directly inside a condition.
cond_expr_expanded = ExpandVariables(cond_expr, phase, variables,
build_file)
if not isinstance(cond_expr_expanded, str) and \
not isinstance(cond_expr_expanded, int):
raise ValueError, \
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__
try:
ast_code = compile(cond_expr_expanded, '<string>', 'eval')
if eval(ast_code, {'__builtins__': None}, variables):
merge_dict = true_dict
else:
merge_dict = false_dict
except SyntaxError, e:
syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s '
'at character %d.' %
(str(e.args[0]), e.text, build_file, e.offset),
e.filename, e.lineno, e.offset, e.text)
raise syntax_error
except NameError, e:
gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' %
(cond_expr_expanded, build_file))
raise GypError(e)
if merge_dict != None:
# Expand variables and nested conditinals in the merge_dict before
# merging it.
ProcessVariablesAndConditionsInDict(merge_dict, phase,
variables, build_file)
MergeDicts(the_dict, merge_dict, build_file, build_file)
def LoadAutomaticVariablesFromDict(variables, the_dict):
# Any keys with plain string values in the_dict become automatic variables.
# The variable name is the key name with a "_" character prepended.
for key, value in the_dict.iteritems():
if isinstance(value, str) or isinstance(value, int) or \
isinstance(value, list):
variables['_' + key] = value
def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key):
# Any keys in the_dict's "variables" dict, if it has one, becomes a
# variable. The variable name is the key name in the "variables" dict.
# Variables that end with the % character are set only if they are unset in
# the variables dict. the_dict_key is the name of the key that accesses
# the_dict in the_dict's parent dict. If the_dict's parent is not a dict
# (it could be a list or it could be parentless because it is a root dict),
# the_dict_key will be None.
for key, value in the_dict.get('variables', {}).iteritems():
if not isinstance(value, str) and not isinstance(value, int) and \
not isinstance(value, list):
continue
if key.endswith('%'):
variable_name = key[:-1]
if variable_name in variables:
# If the variable is already set, don't set it.
continue
if the_dict_key is 'variables' and variable_name in the_dict:
# If the variable is set without a % in the_dict, and the_dict is a
# variables dict (making |variables| a varaibles sub-dict of a
# variables dict), use the_dict's definition.
value = the_dict[variable_name]
else:
variable_name = key
variables[variable_name] = value
def ProcessVariablesAndConditionsInDict(the_dict, phase, variables_in,
build_file, the_dict_key=None):
"""Handle all variable and command expansion and conditional evaluation.
This function is the public entry point for all variable expansions and
conditional evaluations. The variables_in dictionary will not be modified
by this function.
"""
# Make a copy of the variables_in dict that can be modified during the
# loading of automatics and the loading of the variables dict.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
if 'variables' in the_dict:
# Make sure all the local variables are added to the variables
# list before we process them so that you can reference one
# variable from another. They will be fully expanded by recursion
# in ExpandVariables.
for key, value in the_dict['variables'].iteritems():
variables[key] = value
# Handle the associated variables dict first, so that any variable
# references within can be resolved prior to using them as variables.
# Pass a copy of the variables dict to avoid having it be tainted.
# Otherwise, it would have extra automatics added for everything that
# should just be an ordinary variable in this scope.
ProcessVariablesAndConditionsInDict(the_dict['variables'], phase,
variables, build_file, 'variables')
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
for key, value in the_dict.iteritems():
# Skip "variables", which was already processed if present.
if key != 'variables' and isinstance(value, str):
expanded = ExpandVariables(value, phase, variables, build_file)
if not isinstance(expanded, str) and not isinstance(expanded, int):
raise ValueError, \
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__ + ' for ' + key
the_dict[key] = expanded
# Variable expansion may have resulted in changes to automatics. Reload.
# TODO(mark): Optimization: only reload if no changes were made.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Process conditions in this dict. This is done after variable expansion
# so that conditions may take advantage of expanded variables. For example,
# if the_dict contains:
# {'type': '<(library_type)',
# 'conditions': [['_type=="static_library"', { ... }]]},
# _type, as used in the condition, will only be set to the value of
# library_type if variable expansion is performed before condition
# processing. However, condition processing should occur prior to recursion
# so that variables (both automatic and "variables" dict type) may be
# adjusted by conditions sections, merged into the_dict, and have the
# intended impact on contained dicts.
#
# This arrangement means that a "conditions" section containing a "variables"
# section will only have those variables effective in subdicts, not in
# the_dict. The workaround is to put a "conditions" section within a
# "variables" section. For example:
# {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]],
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will not result in "IS_MAC" being appended to the "defines" list in the
# current scope but would result in it being appended to the "defines" list
# within "my_subdict". By comparison:
# {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]},
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will append "IS_MAC" to both "defines" lists.
# Evaluate conditions sections, allowing variable expansions within them
# as well as nested conditionals. This will process a 'conditions' or
# 'target_conditions' section, perform appropriate merging and recursive
# conditional and variable processing, and then remove the conditions section
# from the_dict if it is present.
ProcessConditionsInDict(the_dict, phase, variables, build_file)
# Conditional processing may have resulted in changes to automatics or the
# variables dict. Reload.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Recurse into child dicts, or process child lists which may result in
# further recursion into descendant dicts.
for key, value in the_dict.iteritems():
# Skip "variables" and string values, which were already processed if
# present.
if key == 'variables' or isinstance(value, str):
continue
if isinstance(value, dict):
# Pass a copy of the variables dict so that subdicts can't influence
# parents.
ProcessVariablesAndConditionsInDict(value, phase, variables,
build_file, key)
elif isinstance(value, list):
# The list itself can't influence the variables dict, and
# ProcessVariablesAndConditionsInList will make copies of the variables
# dict if it needs to pass it to something that can influence it. No
# copy is necessary here.
ProcessVariablesAndConditionsInList(value, phase, variables,
build_file)
elif not isinstance(value, int):
raise TypeError, 'Unknown type ' + value.__class__.__name__ + \
' for ' + key
def ProcessVariablesAndConditionsInList(the_list, phase, variables,
build_file):
# Iterate using an index so that new values can be assigned into the_list.
index = 0
while index < len(the_list):
item = the_list[index]
if isinstance(item, dict):
# Make a copy of the variables dict so that it won't influence anything
# outside of its own scope.
ProcessVariablesAndConditionsInDict(item, phase, variables, build_file)
elif isinstance(item, list):
ProcessVariablesAndConditionsInList(item, phase, variables, build_file)
elif isinstance(item, str):
expanded = ExpandVariables(item, phase, variables, build_file)
if isinstance(expanded, str) or isinstance(expanded, int):
the_list[index] = expanded
elif isinstance(expanded, list):
the_list[index:index+1] = expanded
index += len(expanded)
# index now identifies the next item to examine. Continue right now
# without falling into the index increment below.
continue
else:
raise ValueError, \
'Variable expansion in this context permits strings and ' + \
'lists only, found ' + expanded.__class__.__name__ + ' at ' + \
index
elif not isinstance(item, int):
raise TypeError, 'Unknown type ' + item.__class__.__name__ + \
' at index ' + index
index = index + 1
def BuildTargetsDict(data):
"""Builds a dict mapping fully-qualified target names to their target dicts.
|data| is a dict mapping loaded build files by pathname relative to the
current directory. Values in |data| are build file contents. For each
|data| value with a "targets" key, the value of the "targets" key is taken
as a list containing target dicts. Each target's fully-qualified name is
constructed from the pathname of the build file (|data| key) and its
"target_name" property. These fully-qualified names are used as the keys
in the returned dict. These keys provide access to the target dicts,
the dicts in the "targets" lists.
"""
targets = {}
for build_file in data['target_build_files']:
for target in data[build_file].get('targets', []):
target_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if target_name in targets:
raise GypError('Duplicate target definitions for ' + target_name)
targets[target_name] = target
return targets
def QualifyDependencies(targets):
"""Make dependency links fully-qualified relative to the current directory.
|targets| is a dict mapping fully-qualified target names to their target
dicts. For each target in this dict, keys known to contain dependency
links are examined, and any dependencies referenced will be rewritten
so that they are fully-qualified and relative to the current directory.
All rewritten dependencies are suitable for use as keys to |targets| or a
similar dict.
"""
all_dependency_sections = [dep + op
for dep in dependency_sections
for op in ('', '!', '/')]
for target, target_dict in targets.iteritems():
target_build_file = gyp.common.BuildFile(target)
toolset = target_dict['toolset']
for dependency_key in all_dependency_sections:
dependencies = target_dict.get(dependency_key, [])
for index in xrange(0, len(dependencies)):
dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget(
target_build_file, dependencies[index], toolset)
if not multiple_toolsets:
# Ignore toolset specification in the dependency if it is specified.
dep_toolset = toolset
dependency = gyp.common.QualifiedTarget(dep_file,
dep_target,
dep_toolset)
dependencies[index] = dependency
# Make sure anything appearing in a list other than "dependencies" also
# appears in the "dependencies" list.
if dependency_key != 'dependencies' and \
dependency not in target_dict['dependencies']:
raise GypError('Found ' + dependency + ' in ' + dependency_key +
' of ' + target + ', but not in dependencies')
def ExpandWildcardDependencies(targets, data):
"""Expands dependencies specified as build_file:*.
For each target in |targets|, examines sections containing links to other
targets. If any such section contains a link of the form build_file:*, it
is taken as a wildcard link, and is expanded to list each target in
build_file. The |data| dict provides access to build file dicts.
Any target that does not wish to be included by wildcard can provide an
optional "suppress_wildcard" key in its target dict. When present and
true, a wildcard dependency link will not include such targets.
All dependency names, including the keys to |targets| and the values in each
dependency list, must be qualified when this function is called.
"""
for target, target_dict in targets.iteritems():
toolset = target_dict['toolset']
target_build_file = gyp.common.BuildFile(target)
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
# Loop this way instead of "for dependency in" or "for index in xrange"
# because the dependencies list will be modified within the loop body.
index = 0
while index < len(dependencies):
(dependency_build_file, dependency_target, dependency_toolset) = \
gyp.common.ParseQualifiedTarget(dependencies[index])
if dependency_target != '*' and dependency_toolset != '*':
# Not a wildcard. Keep it moving.
index = index + 1
continue
if dependency_build_file == target_build_file:
# It's an error for a target to depend on all other targets in
# the same file, because a target cannot depend on itself.
raise GypError('Found wildcard in ' + dependency_key + ' of ' +
target + ' referring to same build file')
# Take the wildcard out and adjust the index so that the next
# dependency in the list will be processed the next time through the
# loop.
del dependencies[index]
index = index - 1
# Loop through the targets in the other build file, adding them to
# this target's list of dependencies in place of the removed
# wildcard.
dependency_target_dicts = data[dependency_build_file]['targets']
for dependency_target_dict in dependency_target_dicts:
if int(dependency_target_dict.get('suppress_wildcard', False)):
continue
dependency_target_name = dependency_target_dict['target_name']
if (dependency_target != '*' and
dependency_target != dependency_target_name):
continue
dependency_target_toolset = dependency_target_dict['toolset']
if (dependency_toolset != '*' and
dependency_toolset != dependency_target_toolset):
continue
dependency = gyp.common.QualifiedTarget(dependency_build_file,
dependency_target_name,
dependency_target_toolset)
index = index + 1
dependencies.insert(index, dependency)
index = index + 1
def Unify(l):
"""Removes duplicate elements from l, keeping the first element."""
seen = {}
return [seen.setdefault(e, e) for e in l if e not in seen]
def RemoveDuplicateDependencies(targets):
"""Makes sure every dependency appears only once in all targets's dependency
lists."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
target_dict[dependency_key] = Unify(dependencies)
def Filter(l, item):
"""Removes item from l."""
res = {}
return [res.setdefault(e, e) for e in l if e != item]
def RemoveSelfDependencies(targets):
"""Remove self dependencies from targets that have the prune_self_dependency
variable set."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if t == target_name:
if targets[t].get('variables', {}).get('prune_self_dependency', 0):
target_dict[dependency_key] = Filter(dependencies, target_name)
class DependencyGraphNode(object):
"""
Attributes:
ref: A reference to an object that this DependencyGraphNode represents.
dependencies: List of DependencyGraphNodes on which this one depends.
dependents: List of DependencyGraphNodes that depend on this one.
"""
class CircularException(GypError):
pass
def __init__(self, ref):
self.ref = ref
self.dependencies = []
self.dependents = []
def FlattenToList(self):
# flat_list is the sorted list of dependencies - actually, the list items
# are the "ref" attributes of DependencyGraphNodes. Every target will
# appear in flat_list after all of its dependencies, and before all of its
# dependents.
flat_list = []
# in_degree_zeros is the list of DependencyGraphNodes that have no
# dependencies not in flat_list. Initially, it is a copy of the children
# of this node, because when the graph was built, nodes with no
# dependencies were made implicit dependents of the root node.
in_degree_zeros = set(self.dependents[:])
while in_degree_zeros:
# Nodes in in_degree_zeros have no dependencies not in flat_list, so they
# can be appended to flat_list. Take these nodes out of in_degree_zeros
# as work progresses, so that the next node to process from the list can
# always be accessed at a consistent position.
node = in_degree_zeros.pop()
flat_list.append(node.ref)
# Look at dependents of the node just added to flat_list. Some of them
# may now belong in in_degree_zeros.
for node_dependent in node.dependents:
is_in_degree_zero = True
for node_dependent_dependency in node_dependent.dependencies:
if not node_dependent_dependency.ref in flat_list:
# The dependent one or more dependencies not in flat_list. There
# will be more chances to add it to flat_list when examining
# it again as a dependent of those other dependencies, provided
# that there are no cycles.
is_in_degree_zero = False
break
if is_in_degree_zero:
# All of the dependent's dependencies are already in flat_list. Add
# it to in_degree_zeros where it will be processed in a future
# iteration of the outer loop.
in_degree_zeros.add(node_dependent)
return flat_list
def DirectDependencies(self, dependencies=None):
"""Returns a list of just direct dependencies."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
return dependencies
def _AddImportedDependencies(self, targets, dependencies=None):
"""Given a list of direct dependencies, adds indirect dependencies that
other dependencies have declared to export their settings.
This method does not operate on self. Rather, it operates on the list
of dependencies in the |dependencies| argument. For each dependency in
that list, if any declares that it exports the settings of one of its
own dependencies, those dependencies whose settings are "passed through"
are added to the list. As new items are added to the list, they too will
be processed, so it is possible to import settings through multiple levels
of dependencies.
This method is not terribly useful on its own, it depends on being
"primed" with a list of direct dependencies such as one provided by
DirectDependencies. DirectAndImportedDependencies is intended to be the
public entry point.
"""
if dependencies == None:
dependencies = []
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Add any dependencies whose settings should be imported to the list
# if not already present. Newly-added items will be checked for
# their own imports when the list iteration reaches them.
# Rather than simply appending new items, insert them after the
# dependency that exported them. This is done to more closely match
# the depth-first method used by DeepDependencies.
add_index = 1
for imported_dependency in \
dependency_dict.get('export_dependent_settings', []):
if imported_dependency not in dependencies:
dependencies.insert(index + add_index, imported_dependency)
add_index = add_index + 1
index = index + 1
return dependencies
def DirectAndImportedDependencies(self, targets, dependencies=None):
"""Returns a list of a target's direct dependencies and all indirect
dependencies that a dependency has advertised settings should be exported
through the dependency for.
"""
dependencies = self.DirectDependencies(dependencies)
return self._AddImportedDependencies(targets, dependencies)
def DeepDependencies(self, dependencies=None):
"""Returns a list of all of a target's dependencies, recursively."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
dependency.DeepDependencies(dependencies)
return dependencies
def LinkDependencies(self, targets, dependencies=None, initial=True):
"""Returns a list of dependency targets that are linked into this target.
This function has a split personality, depending on the setting of
|initial|. Outside callers should always leave |initial| at its default
setting.
When adding a target to the list of dependencies, this function will
recurse into itself with |initial| set to False, to collect dependencies
that are linked into the linkable target for which the list is being built.
"""
if dependencies == None:
dependencies = []
# Check for None, corresponding to the root node.
if self.ref == None:
return dependencies
# It's kind of sucky that |targets| has to be passed into this function,
# but that's presently the easiest way to access the target dicts so that
# this function can find target types.
if 'target_name' not in targets[self.ref]:
raise GypError("Missing 'target_name' field in target.")
if 'type' not in targets[self.ref]:
raise GypError("Missing 'type' field in target %s" %
targets[self.ref]['target_name'])
target_type = targets[self.ref]['type']
is_linkable = target_type in linkable_types
if initial and not is_linkable:
# If this is the first target being examined and it's not linkable,
# return an empty list of link dependencies, because the link
# dependencies are intended to apply to the target itself (initial is
# True) and this target won't be linked.
return dependencies
# Don't traverse 'none' targets if explicitly excluded.
if (target_type == 'none' and
not targets[self.ref].get('dependencies_traverse', True)):
if self.ref not in dependencies:
dependencies.append(self.ref)
return dependencies
# Executables and loadable modules are already fully and finally linked.
# Nothing else can be a link dependency of them, there can only be
# dependencies in the sense that a dependent target might run an
# executable or load the loadable_module.
if not initial and target_type in ('executable', 'loadable_module'):
return dependencies
# The target is linkable, add it to the list of link dependencies.
if self.ref not in dependencies:
dependencies.append(self.ref)
if initial or not is_linkable:
# If this is a subsequent target and it's linkable, don't look any
# further for linkable dependencies, as they'll already be linked into
# this target linkable. Always look at dependencies of the initial
# target, and always look at dependencies of non-linkables.
for dependency in self.dependencies:
dependency.LinkDependencies(targets, dependencies, False)
return dependencies
def BuildDependencyList(targets):
# Create a DependencyGraphNode for each target. Put it into a dict for easy
# access.
dependency_nodes = {}
for target, spec in targets.iteritems():
if target not in dependency_nodes:
dependency_nodes[target] = DependencyGraphNode(target)
# Set up the dependency links. Targets that have no dependencies are treated
# as dependent on root_node.
root_node = DependencyGraphNode(None)
for target, spec in targets.iteritems():
target_node = dependency_nodes[target]
target_build_file = gyp.common.BuildFile(target)
dependencies = spec.get('dependencies')
if not dependencies:
target_node.dependencies = [root_node]
root_node.dependents.append(target_node)
else:
for dependency in dependencies:
dependency_node = dependency_nodes.get(dependency)
if not dependency_node:
raise GypError("Dependency '%s' not found while "
"trying to load target %s" % (dependency, target))
target_node.dependencies.append(dependency_node)
dependency_node.dependents.append(target_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle). If you need to figure out what's wrong, look for elements of
# targets that are not in flat_list.
if len(flat_list) != len(targets):
raise DependencyGraphNode.CircularException(
'Some targets not reachable, cycle in dependency graph detected: ' +
' '.join(set(flat_list) ^ set(targets)))
return [dependency_nodes, flat_list]
def VerifyNoGYPFileCircularDependencies(targets):
# Create a DependencyGraphNode for each gyp file containing a target. Put
# it into a dict for easy access.
dependency_nodes = {}
for target in targets.iterkeys():
build_file = gyp.common.BuildFile(target)
if not build_file in dependency_nodes:
dependency_nodes[build_file] = DependencyGraphNode(build_file)
# Set up the dependency links.
for target, spec in targets.iteritems():
build_file = gyp.common.BuildFile(target)
build_file_node = dependency_nodes[build_file]
target_dependencies = spec.get('dependencies', [])
for dependency in target_dependencies:
try:
dependency_build_file = gyp.common.BuildFile(dependency)
except GypError, e:
gyp.common.ExceptionAppend(
e, 'while computing dependencies of .gyp file %s' % build_file)
raise
if dependency_build_file == build_file:
# A .gyp file is allowed to refer back to itself.
continue
dependency_node = dependency_nodes.get(dependency_build_file)
if not dependency_node:
raise GypError("Dependancy '%s' not found" % dependency_build_file)
if dependency_node not in build_file_node.dependencies:
build_file_node.dependencies.append(dependency_node)
dependency_node.dependents.append(build_file_node)
# Files that have no dependencies are treated as dependent on root_node.
root_node = DependencyGraphNode(None)
for build_file_node in dependency_nodes.itervalues():
if len(build_file_node.dependencies) == 0:
build_file_node.dependencies.append(root_node)
root_node.dependents.append(build_file_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(dependency_nodes):
bad_files = []
for file in dependency_nodes.iterkeys():
if not file in flat_list:
bad_files.append(file)
raise DependencyGraphNode.CircularException, \
'Some files not reachable, cycle in .gyp file dependency graph ' + \
'detected involving some or all of: ' + \
' '.join(bad_files)
def DoDependentSettings(key, flat_list, targets, dependency_nodes):
# key should be one of all_dependent_settings, direct_dependent_settings,
# or link_settings.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
if key == 'all_dependent_settings':
dependencies = dependency_nodes[target].DeepDependencies()
elif key == 'direct_dependent_settings':
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
elif key == 'link_settings':
dependencies = dependency_nodes[target].LinkDependencies(targets)
else:
raise GypError("DoDependentSettings doesn't know how to determine "
'dependencies for ' + key)
for dependency in dependencies:
dependency_dict = targets[dependency]
if not key in dependency_dict:
continue
dependency_build_file = gyp.common.BuildFile(dependency)
MergeDicts(target_dict, dependency_dict[key],
build_file, dependency_build_file)
def AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
sort_dependencies):
# Recompute target "dependencies" properties. For each static library
# target, remove "dependencies" entries referring to other static libraries,
# unless the dependency has the "hard_dependency" attribute set. For each
# linkable target, add a "dependencies" entry referring to all of the
# target's computed list of link dependencies (including static libraries
# if no such entry is already present.
for target in flat_list:
target_dict = targets[target]
target_type = target_dict['type']
if target_type == 'static_library':
if not 'dependencies' in target_dict:
continue
target_dict['dependencies_original'] = target_dict.get(
'dependencies', [])[:]
# A static library should not depend on another static library unless
# the dependency relationship is "hard," which should only be done when
# a dependent relies on some side effect other than just the build
# product, like a rule or action output. Further, if a target has a
# non-hard dependency, but that dependency exports a hard dependency,
# the non-hard dependency can safely be removed, but the exported hard
# dependency must be added to the target to keep the same dependency
# ordering.
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Remove every non-hard static library dependency and remove every
# non-static library dependency that isn't a direct dependency.
if (dependency_dict['type'] == 'static_library' and \
not dependency_dict.get('hard_dependency', False)) or \
(dependency_dict['type'] != 'static_library' and \
not dependency in target_dict['dependencies']):
# Take the dependency out of the list, and don't increment index
# because the next dependency to analyze will shift into the index
# formerly occupied by the one being removed.
del dependencies[index]
else:
index = index + 1
# Update the dependencies. If the dependencies list is empty, it's not
# needed, so unhook it.
if len(dependencies) > 0:
target_dict['dependencies'] = dependencies
else:
del target_dict['dependencies']
elif target_type in linkable_types:
# Get a list of dependency targets that should be linked into this
# target. Add them to the dependencies list if they're not already
# present.
link_dependencies = dependency_nodes[target].LinkDependencies(targets)
for dependency in link_dependencies:
if dependency == target:
continue
if not 'dependencies' in target_dict:
target_dict['dependencies'] = []
if not dependency in target_dict['dependencies']:
target_dict['dependencies'].append(dependency)
# Sort the dependencies list in the order from dependents to dependencies.
# e.g. If A and B depend on C and C depends on D, sort them in A, B, C, D.
# Note: flat_list is already sorted in the order from dependencies to
# dependents.
if sort_dependencies and 'dependencies' in target_dict:
target_dict['dependencies'] = [dep for dep in reversed(flat_list)
if dep in target_dict['dependencies']]
# Initialize this here to speed up MakePathRelative.
exception_re = re.compile(r'''["']?[-/$<>^]''')
def MakePathRelative(to_file, fro_file, item):
# If item is a relative path, it's relative to the build file dict that it's
# coming from. Fix it up to make it relative to the build file dict that
# it's going into.
# Exception: any |item| that begins with these special characters is
# returned without modification.
# / Used when a path is already absolute (shortcut optimization;
# such paths would be returned as absolute anyway)
# $ Used for build environment variables
# - Used for some build environment flags (such as -lapr-1 in a
# "libraries" section)
# < Used for our own variable and command expansions (see ExpandVariables)
# > Used for our own variable and command expansions (see ExpandVariables)
# ^ Used for our own variable and command expansions (see ExpandVariables)
#
# "/' Used when a value is quoted. If these are present, then we
# check the second character instead.
#
if to_file == fro_file or exception_re.match(item):
return item
else:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
ret = os.path.normpath(os.path.join(
gyp.common.RelativePath(os.path.dirname(fro_file),
os.path.dirname(to_file)),
item)).replace('\\', '/')
if item[-1] == '/':
ret += '/'
return ret
def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True):
# Python documentation recommends objects which do not support hash
# set this value to None. Python library objects follow this rule.
is_hashable = lambda val: val.__hash__
# If x is hashable, returns whether x is in s. Else returns whether x is in l.
def is_in_set_or_list(x, s, l):
if is_hashable(x):
return x in s
return x in l
prepend_index = 0
# Make membership testing of hashables in |to| (in particular, strings)
# faster.
hashable_to_set = set(x for x in to if is_hashable(x))
for item in fro:
singleton = False
if isinstance(item, str) or isinstance(item, int):
# The cheap and easy case.
if is_paths:
to_item = MakePathRelative(to_file, fro_file, item)
else:
to_item = item
if not isinstance(item, str) or not item.startswith('-'):
# Any string that doesn't begin with a "-" is a singleton - it can
# only appear once in a list, to be enforced by the list merge append
# or prepend.
singleton = True
elif isinstance(item, dict):
# Make a copy of the dictionary, continuing to look for paths to fix.
# The other intelligent aspects of merge processing won't apply because
# item is being merged into an empty dict.
to_item = {}
MergeDicts(to_item, item, to_file, fro_file)
elif isinstance(item, list):
# Recurse, making a copy of the list. If the list contains any
# descendant dicts, path fixing will occur. Note that here, custom
# values for is_paths and append are dropped; those are only to be
# applied to |to| and |fro|, not sublists of |fro|. append shouldn't
# matter anyway because the new |to_item| list is empty.
to_item = []
MergeLists(to_item, item, to_file, fro_file)
else:
raise TypeError, \
'Attempt to merge list item of unsupported type ' + \
item.__class__.__name__
if append:
# If appending a singleton that's already in the list, don't append.
# This ensures that the earliest occurrence of the item will stay put.
if not singleton or not is_in_set_or_list(to_item, hashable_to_set, to):
to.append(to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
else:
# If prepending a singleton that's already in the list, remove the
# existing instance and proceed with the prepend. This ensures that the
# item appears at the earliest possible position in the list.
while singleton and to_item in to:
to.remove(to_item)
# Don't just insert everything at index 0. That would prepend the new
# items to the list in reverse order, which would be an unwelcome
# surprise.
to.insert(prepend_index, to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
prepend_index = prepend_index + 1
def MergeDicts(to, fro, to_file, fro_file):
# I wanted to name the parameter "from" but it's a Python keyword...
for k, v in fro.iteritems():
# It would be nice to do "if not k in to: to[k] = v" but that wouldn't give
# copy semantics. Something else may want to merge from the |fro| dict
# later, and having the same dict ref pointed to twice in the tree isn't
# what anyone wants considering that the dicts may subsequently be
# modified.
if k in to:
bad_merge = False
if isinstance(v, str) or isinstance(v, int):
if not (isinstance(to[k], str) or isinstance(to[k], int)):
bad_merge = True
elif v.__class__ != to[k].__class__:
bad_merge = True
if bad_merge:
raise TypeError, \
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[k].__class__.__name__ + \
' for key ' + k
if isinstance(v, str) or isinstance(v, int):
# Overwrite the existing value, if any. Cheap and easy.
is_path = IsPathSection(k)
if is_path:
to[k] = MakePathRelative(to_file, fro_file, v)
else:
to[k] = v
elif isinstance(v, dict):
# Recurse, guaranteeing copies will be made of objects that require it.
if not k in to:
to[k] = {}
MergeDicts(to[k], v, to_file, fro_file)
elif isinstance(v, list):
# Lists in dicts can be merged with different policies, depending on
# how the key in the "from" dict (k, the from-key) is written.
#
# If the from-key has ...the to-list will have this action
# this character appended:... applied when receiving the from-list:
# = replace
# + prepend
# ? set, only if to-list does not yet exist
# (none) append
#
# This logic is list-specific, but since it relies on the associated
# dict key, it's checked in this dict-oriented function.
ext = k[-1]
append = True
if ext == '=':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '?']
to[list_base] = []
elif ext == '+':
list_base = k[:-1]
lists_incompatible = [list_base + '=', list_base + '?']
append = False
elif ext == '?':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '=', list_base + '+']
else:
list_base = k
lists_incompatible = [list_base + '=', list_base + '?']
# Some combinations of merge policies appearing together are meaningless.
# It's stupid to replace and append simultaneously, for example. Append
# and prepend are the only policies that can coexist.
for list_incompatible in lists_incompatible:
if list_incompatible in fro:
raise GypError('Incompatible list policies ' + k + ' and ' +
list_incompatible)
if list_base in to:
if ext == '?':
# If the key ends in "?", the list will only be merged if it doesn't
# already exist.
continue
if not isinstance(to[list_base], list):
# This may not have been checked above if merging in a list with an
# extension character.
raise TypeError, \
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[list_base].__class__.__name__ + \
' for key ' + list_base + '(' + k + ')'
else:
to[list_base] = []
# Call MergeLists, which will make copies of objects that require it.
# MergeLists can recurse back into MergeDicts, although this will be
# to make copies of dicts (with paths fixed), there will be no
# subsequent dict "merging" once entering a list because lists are
# always replaced, appended to, or prepended to.
is_paths = IsPathSection(list_base)
MergeLists(to[list_base], v, to_file, fro_file, is_paths, append)
else:
raise TypeError, \
'Attempt to merge dict value of unsupported type ' + \
v.__class__.__name__ + ' for key ' + k
def MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, visited):
# Skip if previously visted.
if configuration in visited:
return
# Look at this configuration.
configuration_dict = target_dict['configurations'][configuration]
# Merge in parents.
for parent in configuration_dict.get('inherit_from', []):
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, parent, visited + [configuration])
# Merge it into the new config.
MergeDicts(new_configuration_dict, configuration_dict,
build_file, build_file)
# Drop abstract.
if 'abstract' in new_configuration_dict:
del new_configuration_dict['abstract']
def SetUpConfigurations(target, target_dict):
# key_suffixes is a list of key suffixes that might appear on key names.
# These suffixes are handled in conditional evaluations (for =, +, and ?)
# and rules/exclude processing (for ! and /). Keys with these suffixes
# should be treated the same as keys without.
key_suffixes = ['=', '+', '?', '!', '/']
build_file = gyp.common.BuildFile(target)
# Provide a single configuration by default if none exists.
# TODO(mark): Signal an error if default_configurations exists but
# configurations does not.
if not 'configurations' in target_dict:
target_dict['configurations'] = {'Default': {}}
if not 'default_configuration' in target_dict:
concrete = [i for i in target_dict['configurations'].iterkeys()
if not target_dict['configurations'][i].get('abstract')]
target_dict['default_configuration'] = sorted(concrete)[0]
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
# Skip abstract configurations (saves work only).
if old_configuration_dict.get('abstract'):
continue
# Configurations inherit (most) settings from the enclosing target scope.
# Get the inheritance relationship right by making a copy of the target
# dict.
new_configuration_dict = copy.deepcopy(target_dict)
# Take out the bits that don't belong in a "configurations" section.
# Since configuration setup is done before conditional, exclude, and rules
# processing, be careful with handling of the suffix characters used in
# those phases.
delete_keys = []
for key in new_configuration_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del new_configuration_dict[key]
# Merge in configuration (with all its parents first).
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, [])
# Put the new result back into the target dict as a configuration.
target_dict['configurations'][configuration] = new_configuration_dict
# Now drop all the abstract ones.
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
if old_configuration_dict.get('abstract'):
del target_dict['configurations'][configuration]
# Now that all of the target's configurations have been built, go through
# the target dict's keys and remove everything that's been moved into a
# "configurations" section.
delete_keys = []
for key in target_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del target_dict[key]
# Check the configurations to see if they contain invalid keys.
for configuration in target_dict['configurations'].keys():
configuration_dict = target_dict['configurations'][configuration]
for key in configuration_dict.keys():
if key in invalid_configuration_keys:
raise GypError('%s not allowed in the %s configuration, found in '
'target %s' % (key, configuration, target))
def ProcessListFiltersInDict(name, the_dict):
"""Process regular expression and exclusion-based filters on lists.
An exclusion list is in a dict key named with a trailing "!", like
"sources!". Every item in such a list is removed from the associated
main list, which in this example, would be "sources". Removed items are
placed into a "sources_excluded" list in the dict.
Regular expression (regex) filters are contained in dict keys named with a
trailing "/", such as "sources/" to operate on the "sources" list. Regex
filters in a dict take the form:
'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'],
['include', '_mac\\.cc$'] ],
The first filter says to exclude all files ending in _linux.cc, _mac.cc, and
_win.cc. The second filter then includes all files ending in _mac.cc that
are now or were once in the "sources" list. Items matching an "exclude"
filter are subject to the same processing as would occur if they were listed
by name in an exclusion list (ending in "!"). Items matching an "include"
filter are brought back into the main list if previously excluded by an
exclusion list or exclusion regex filter. Subsequent matching "exclude"
patterns can still cause items to be excluded after matching an "include".
"""
# Look through the dictionary for any lists whose keys end in "!" or "/".
# These are lists that will be treated as exclude lists and regular
# expression-based exclude/include lists. Collect the lists that are
# needed first, looking for the lists that they operate on, and assemble
# then into |lists|. This is done in a separate loop up front, because
# the _included and _excluded keys need to be added to the_dict, and that
# can't be done while iterating through it.
lists = []
del_lists = []
for key, value in the_dict.iteritems():
operation = key[-1]
if operation != '!' and operation != '/':
continue
if not isinstance(value, list):
raise ValueError, name + ' key ' + key + ' must be list, not ' + \
value.__class__.__name__
list_key = key[:-1]
if list_key not in the_dict:
# This happens when there's a list like "sources!" but no corresponding
# "sources" list. Since there's nothing for it to operate on, queue up
# the "sources!" list for deletion now.
del_lists.append(key)
continue
if not isinstance(the_dict[list_key], list):
raise ValueError, name + ' key ' + list_key + \
' must be list, not ' + \
value.__class__.__name__ + ' when applying ' + \
{'!': 'exclusion', '/': 'regex'}[operation]
if not list_key in lists:
lists.append(list_key)
# Delete the lists that are known to be unneeded at this point.
for del_list in del_lists:
del the_dict[del_list]
for list_key in lists:
the_list = the_dict[list_key]
# Initialize the list_actions list, which is parallel to the_list. Each
# item in list_actions identifies whether the corresponding item in
# the_list should be excluded, unconditionally preserved (included), or
# whether no exclusion or inclusion has been applied. Items for which
# no exclusion or inclusion has been applied (yet) have value -1, items
# excluded have value 0, and items included have value 1. Includes and
# excludes override previous actions. All items in list_actions are
# initialized to -1 because no excludes or includes have been processed
# yet.
list_actions = list((-1,) * len(the_list))
exclude_key = list_key + '!'
if exclude_key in the_dict:
for exclude_item in the_dict[exclude_key]:
for index in xrange(0, len(the_list)):
if exclude_item == the_list[index]:
# This item matches the exclude_item, so set its action to 0
# (exclude).
list_actions[index] = 0
# The "whatever!" list is no longer needed, dump it.
del the_dict[exclude_key]
regex_key = list_key + '/'
if regex_key in the_dict:
for regex_item in the_dict[regex_key]:
[action, pattern] = regex_item
pattern_re = re.compile(pattern)
if action == 'exclude':
# This item matches an exclude regex, so set its value to 0 (exclude).
action_value = 0
elif action == 'include':
# This item matches an include regex, so set its value to 1 (include).
action_value = 1
else:
# This is an action that doesn't make any sense.
raise ValueError, 'Unrecognized action ' + action + ' in ' + name + \
' key ' + regex_key
for index in xrange(0, len(the_list)):
list_item = the_list[index]
if list_actions[index] == action_value:
# Even if the regex matches, nothing will change so continue (regex
# searches are expensive).
continue
if pattern_re.search(list_item):
# Regular expression match.
list_actions[index] = action_value
# The "whatever/" list is no longer needed, dump it.
del the_dict[regex_key]
# Add excluded items to the excluded list.
#
# Note that exclude_key ("sources!") is different from excluded_key
# ("sources_excluded"). The exclude_key list is input and it was already
# processed and deleted; the excluded_key list is output and it's about
# to be created.
excluded_key = list_key + '_excluded'
if excluded_key in the_dict:
raise GypError(name + ' key ' + excluded_key +
' must not be present prior '
' to applying exclusion/regex filters for ' + list_key)
excluded_list = []
# Go backwards through the list_actions list so that as items are deleted,
# the indices of items that haven't been seen yet don't shift. That means
# that things need to be prepended to excluded_list to maintain them in the
# same order that they existed in the_list.
for index in xrange(len(list_actions) - 1, -1, -1):
if list_actions[index] == 0:
# Dump anything with action 0 (exclude). Keep anything with action 1
# (include) or -1 (no include or exclude seen for the item).
excluded_list.insert(0, the_list[index])
del the_list[index]
# If anything was excluded, put the excluded list into the_dict at
# excluded_key.
if len(excluded_list) > 0:
the_dict[excluded_key] = excluded_list
# Now recurse into subdicts and lists that may contain dicts.
for key, value in the_dict.iteritems():
if isinstance(value, dict):
ProcessListFiltersInDict(key, value)
elif isinstance(value, list):
ProcessListFiltersInList(key, value)
def ProcessListFiltersInList(name, the_list):
for item in the_list:
if isinstance(item, dict):
ProcessListFiltersInDict(name, item)
elif isinstance(item, list):
ProcessListFiltersInList(name, item)
def ValidateTargetType(target, target_dict):
"""Ensures the 'type' field on the target is one of the known types.
Arguments:
target: string, name of target.
target_dict: dict, target spec.
Raises an exception on error.
"""
VALID_TARGET_TYPES = ('executable', 'loadable_module',
'static_library', 'shared_library',
'none')
target_type = target_dict.get('type', None)
if target_type not in VALID_TARGET_TYPES:
raise GypError("Target %s has an invalid target type '%s'. "
"Must be one of %s." %
(target, target_type, '/'.join(VALID_TARGET_TYPES)))
if (target_dict.get('standalone_static_library', 0) and
not target_type == 'static_library'):
raise GypError('Target %s has type %s but standalone_static_library flag is'
' only valid for static_library type.' % (target,
target_type))
def ValidateSourcesInTarget(target, target_dict, build_file):
# TODO: Check if MSVC allows this for loadable_module targets.
if target_dict.get('type', None) not in ('static_library', 'shared_library'):
return
sources = target_dict.get('sources', [])
basenames = {}
for source in sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
target + error + 'Some build systems, e.g. MSVC08, '
'cannot handle that.')
raise GypError('Duplicate basenames in sources section, see list above')
def ValidateRulesInTarget(target, target_dict, extra_sources_for_rules):
"""Ensures that the rules sections in target_dict are valid and consistent,
and determines which sources they apply to.
Arguments:
target: string, name of target.
target_dict: dict, target spec containing "rules" and "sources" lists.
extra_sources_for_rules: a list of keys to scan for rule matches in
addition to 'sources'.
"""
# Dicts to map between values found in rules' 'rule_name' and 'extension'
# keys and the rule dicts themselves.
rule_names = {}
rule_extensions = {}
rules = target_dict.get('rules', [])
for rule in rules:
# Make sure that there's no conflict among rule names and extensions.
rule_name = rule['rule_name']
if rule_name in rule_names:
raise GypError('rule %s exists in duplicate, target %s' %
(rule_name, target))
rule_names[rule_name] = rule
rule_extension = rule['extension']
if rule_extension in rule_extensions:
raise GypError(('extension %s associated with multiple rules, ' +
'target %s rules %s and %s') %
(rule_extension, target,
rule_extensions[rule_extension]['rule_name'],
rule_name))
rule_extensions[rule_extension] = rule
# Make sure rule_sources isn't already there. It's going to be
# created below if needed.
if 'rule_sources' in rule:
raise GypError(
'rule_sources must not exist in input, target %s rule %s' %
(target, rule_name))
extension = rule['extension']
rule_sources = []
source_keys = ['sources']
source_keys.extend(extra_sources_for_rules)
for source_key in source_keys:
for source in target_dict.get(source_key, []):
(source_root, source_extension) = os.path.splitext(source)
if source_extension.startswith('.'):
source_extension = source_extension[1:]
if source_extension == extension:
rule_sources.append(source)
if len(rule_sources) > 0:
rule['rule_sources'] = rule_sources
def ValidateRunAsInTarget(target, target_dict, build_file):
target_name = target_dict.get('target_name')
run_as = target_dict.get('run_as')
if not run_as:
return
if not isinstance(run_as, dict):
raise GypError("The 'run_as' in target %s from file %s should be a "
"dictionary." %
(target_name, build_file))
action = run_as.get('action')
if not action:
raise GypError("The 'run_as' in target %s from file %s must have an "
"'action' section." %
(target_name, build_file))
if not isinstance(action, list):
raise GypError("The 'action' for 'run_as' in target %s from file %s "
"must be a list." %
(target_name, build_file))
working_directory = run_as.get('working_directory')
if working_directory and not isinstance(working_directory, str):
raise GypError("The 'working_directory' for 'run_as' in target %s "
"in file %s should be a string." %
(target_name, build_file))
environment = run_as.get('environment')
if environment and not isinstance(environment, dict):
raise GypError("The 'environment' for 'run_as' in target %s "
"in file %s should be a dictionary." %
(target_name, build_file))
def ValidateActionsInTarget(target, target_dict, build_file):
'''Validates the inputs to the actions in a target.'''
target_name = target_dict.get('target_name')
actions = target_dict.get('actions', [])
for action in actions:
action_name = action.get('action_name')
if not action_name:
raise GypError("Anonymous action in target %s. "
"An action must have an 'action_name' field." %
target_name)
inputs = action.get('inputs', None)
if inputs is None:
raise GypError('Action in target %s has no inputs.' % target_name)
action_command = action.get('action')
if action_command and not action_command[0]:
raise GypError("Empty action as command in target %s." % target_name)
def TurnIntIntoStrInDict(the_dict):
"""Given dict the_dict, recursively converts all integers into strings.
"""
# Use items instead of iteritems because there's no need to try to look at
# reinserted keys and their associated values.
for k, v in the_dict.items():
if isinstance(v, int):
v = str(v)
the_dict[k] = v
elif isinstance(v, dict):
TurnIntIntoStrInDict(v)
elif isinstance(v, list):
TurnIntIntoStrInList(v)
if isinstance(k, int):
the_dict[str(k)] = v
del the_dict[k]
def TurnIntIntoStrInList(the_list):
"""Given list the_list, recursively converts all integers into strings.
"""
for index in xrange(0, len(the_list)):
item = the_list[index]
if isinstance(item, int):
the_list[index] = str(item)
elif isinstance(item, dict):
TurnIntIntoStrInDict(item)
elif isinstance(item, list):
TurnIntIntoStrInList(item)
def VerifyNoCollidingTargets(targets):
"""Verify that no two targets in the same directory share the same name.
Arguments:
targets: A list of targets in the form 'path/to/file.gyp:target_name'.
"""
# Keep a dict going from 'subdirectory:target_name' to 'foo.gyp'.
used = {}
for target in targets:
# Separate out 'path/to/file.gyp, 'target_name' from
# 'path/to/file.gyp:target_name'.
path, name = target.rsplit(':', 1)
# Separate out 'path/to', 'file.gyp' from 'path/to/file.gyp'.
subdir, gyp = os.path.split(path)
# Use '.' for the current directory '', so that the error messages make
# more sense.
if not subdir:
subdir = '.'
# Prepare a key like 'path/to:target_name'.
key = subdir + ':' + name
if key in used:
# Complain if this target is already used.
raise GypError('Duplicate target name "%s" in directory "%s" used both '
'in "%s" and "%s".' % (name, subdir, gyp, used[key]))
used[key] = gyp
def Load(build_files, variables, includes, depth, generator_input_info, check,
circular_check, parallel):
# Set up path_sections and non_configuration_keys with the default data plus
# the generator-specifc data.
global path_sections
path_sections = base_path_sections[:]
path_sections.extend(generator_input_info['path_sections'])
global non_configuration_keys
non_configuration_keys = base_non_configuration_keys[:]
non_configuration_keys.extend(generator_input_info['non_configuration_keys'])
# TODO(mark) handle variants if the generator doesn't want them directly.
generator_handles_variants = \
generator_input_info['generator_handles_variants']
global absolute_build_file_paths
absolute_build_file_paths = \
generator_input_info['generator_wants_absolute_build_file_paths']
global multiple_toolsets
multiple_toolsets = generator_input_info[
'generator_supports_multiple_toolsets']
# A generator can have other lists (in addition to sources) be processed
# for rules.
extra_sources_for_rules = generator_input_info['extra_sources_for_rules']
# Load build files. This loads every target-containing build file into
# the |data| dictionary such that the keys to |data| are build file names,
# and the values are the entire build file contents after "early" or "pre"
# processing has been done and includes have been resolved.
# NOTE: data contains both "target" files (.gyp) and "includes" (.gypi), as
# well as meta-data (e.g. 'included_files' key). 'target_build_files' keeps
# track of the keys corresponding to "target" files.
data = {'target_build_files': set()}
aux_data = {}
for build_file in build_files:
# Normalize paths everywhere. This is important because paths will be
# used as keys to the data dict and for references between input files.
build_file = os.path.normpath(build_file)
try:
if parallel:
print >>sys.stderr, 'Using parallel processing.'
LoadTargetBuildFileParallel(build_file, data, aux_data,
variables, includes, depth, check)
else:
LoadTargetBuildFile(build_file, data, aux_data,
variables, includes, depth, check, True)
except Exception, e:
gyp.common.ExceptionAppend(e, 'while trying to load %s' % build_file)
raise
# Build a dict to access each target's subdict by qualified name.
targets = BuildTargetsDict(data)
# Fully qualify all dependency links.
QualifyDependencies(targets)
# Remove self-dependencies from targets that have 'prune_self_dependencies'
# set to 1.
RemoveSelfDependencies(targets)
# Expand dependencies specified as build_file:*.
ExpandWildcardDependencies(targets, data)
# Apply exclude (!) and regex (/) list filters only for dependency_sections.
for target_name, target_dict in targets.iteritems():
tmp_dict = {}
for key_base in dependency_sections:
for op in ('', '!', '/'):
key = key_base + op
if key in target_dict:
tmp_dict[key] = target_dict[key]
del target_dict[key]
ProcessListFiltersInDict(target_name, tmp_dict)
# Write the results back to |target_dict|.
for key in tmp_dict:
target_dict[key] = tmp_dict[key]
# Make sure every dependency appears at most once.
RemoveDuplicateDependencies(targets)
if circular_check:
# Make sure that any targets in a.gyp don't contain dependencies in other
# .gyp files that further depend on a.gyp.
VerifyNoGYPFileCircularDependencies(targets)
[dependency_nodes, flat_list] = BuildDependencyList(targets)
# Check that no two targets in the same directory have the same name.
VerifyNoCollidingTargets(flat_list)
# Handle dependent settings of various types.
for settings_type in ['all_dependent_settings',
'direct_dependent_settings',
'link_settings']:
DoDependentSettings(settings_type, flat_list, targets, dependency_nodes)
# Take out the dependent settings now that they've been published to all
# of the targets that require them.
for target in flat_list:
if settings_type in targets[target]:
del targets[target][settings_type]
# Make sure static libraries don't declare dependencies on other static
# libraries, but that linkables depend on all unlinked static libraries
# that they need so that their link steps will be correct.
gii = generator_input_info
if gii['generator_wants_static_library_dependencies_adjusted']:
AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
gii['generator_wants_sorted_dependencies'])
# Apply "post"/"late"/"target" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATE, variables, build_file)
# Move everything that can go into a "configurations" section into one.
for target in flat_list:
target_dict = targets[target]
SetUpConfigurations(target, target_dict)
# Apply exclude (!) and regex (/) list filters.
for target in flat_list:
target_dict = targets[target]
ProcessListFiltersInDict(target, target_dict)
# Apply "latelate" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATELATE, variables, build_file)
# Make sure that the rules make sense, and build up rule_sources lists as
# needed. Not all generators will need to use the rule_sources lists, but
# some may, and it seems best to build the list in a common spot.
# Also validate actions and run_as elements in targets.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ValidateTargetType(target, target_dict)
# TODO(thakis): Get vpx_scale/arm/scalesystemdependent.c to be renamed to
# scalesystemdependent_arm_additions.c or similar.
if 'arm' not in variables.get('target_arch', ''):
ValidateSourcesInTarget(target, target_dict, build_file)
ValidateRulesInTarget(target, target_dict, extra_sources_for_rules)
ValidateRunAsInTarget(target, target_dict, build_file)
ValidateActionsInTarget(target, target_dict, build_file)
# Generators might not expect ints. Turn them into strs.
TurnIntIntoStrInDict(data)
# TODO(mark): Return |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
return [flat_list, targets, data]
|
xuecai/OpenCC
|
refs/heads/master
|
deps/gtest-1.7.0/scripts/gen_gtest_pred_impl.py
|
2538
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""gen_gtest_pred_impl.py v0.1
Generates the implementation of Google Test predicate assertions and
accompanying tests.
Usage:
gen_gtest_pred_impl.py MAX_ARITY
where MAX_ARITY is a positive integer.
The command generates the implementation of up-to MAX_ARITY-ary
predicate assertions, and writes it to file gtest_pred_impl.h in the
directory where the script is. It also generates the accompanying
unit test in file gtest_pred_impl_unittest.cc.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
import time
# Where this script is.
SCRIPT_DIR = os.path.dirname(sys.argv[0])
# Where to store the generated header.
HEADER = os.path.join(SCRIPT_DIR, '../include/gtest/gtest_pred_impl.h')
# Where to store the generated unit test.
UNIT_TEST = os.path.join(SCRIPT_DIR, '../test/gtest_pred_impl_unittest.cc')
def HeaderPreamble(n):
"""Returns the preamble for the header file.
Args:
n: the maximum arity of the predicate macros to be generated.
"""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), n),
'n' : n
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
//
// Implements a family of generic predicate assertion macros.
#ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
#define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
// Makes sure this header is not included before gtest.h.
#ifndef GTEST_INCLUDE_GTEST_GTEST_H_
# error Do not include gtest_pred_impl.h directly. Include gtest.h instead.
#endif // GTEST_INCLUDE_GTEST_GTEST_H_
// This header implements a family of generic predicate assertion
// macros:
//
// ASSERT_PRED_FORMAT1(pred_format, v1)
// ASSERT_PRED_FORMAT2(pred_format, v1, v2)
// ...
//
// where pred_format is a function or functor that takes n (in the
// case of ASSERT_PRED_FORMATn) values and their source expression
// text, and returns a testing::AssertionResult. See the definition
// of ASSERT_EQ in gtest.h for an example.
//
// If you don't care about formatting, you can use the more
// restrictive version:
//
// ASSERT_PRED1(pred, v1)
// ASSERT_PRED2(pred, v1, v2)
// ...
//
// where pred is an n-ary function or functor that returns bool,
// and the values v1, v2, ..., must support the << operator for
// streaming to std::ostream.
//
// We also define the EXPECT_* variations.
//
// For now we only support predicates whose arity is at most %(n)s.
// Please email googletestframework@googlegroups.com if you need
// support for higher arities.
// GTEST_ASSERT_ is the basic statement to which all of the assertions
// in this file reduce. Don't use this in your code.
#define GTEST_ASSERT_(expression, on_failure) \\
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \\
if (const ::testing::AssertionResult gtest_ar = (expression)) \\
; \\
else \\
on_failure(gtest_ar.failure_message())
""" % DEFS)
def Arity(n):
"""Returns the English name of the given arity."""
if n < 0:
return None
elif n <= 3:
return ['nullary', 'unary', 'binary', 'ternary'][n]
else:
return '%s-ary' % n
def Title(word):
"""Returns the given word in title case. The difference between
this and string's title() method is that Title('4-ary') is '4-ary'
while '4-ary'.title() is '4-Ary'."""
return word[0].upper() + word[1:]
def OneTo(n):
"""Returns the list [1, 2, 3, ..., n]."""
return range(1, n + 1)
def Iter(n, format, sep=''):
"""Given a positive integer n, a format string that contains 0 or
more '%s' format specs, and optionally a separator string, returns
the join of n strings, each formatted with the format string on an
iterator ranged from 1 to n.
Example:
Iter(3, 'v%s', sep=', ') returns 'v1, v2, v3'.
"""
# How many '%s' specs are in format?
spec_count = len(format.split('%s')) - 1
return sep.join([format % (spec_count * (i,)) for i in OneTo(n)])
def ImplementationForArity(n):
"""Returns the implementation of n-ary predicate assertions."""
# A map the defines the values used in the implementation template.
DEFS = {
'n' : str(n),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'arity' : Arity(n),
'Arity' : Title(Arity(n))
}
impl = """
// Helper function for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
template <typename Pred""" % DEFS
impl += Iter(n, """,
typename T%s""")
impl += """>
AssertionResult AssertPred%(n)sHelper(const char* pred_text""" % DEFS
impl += Iter(n, """,
const char* e%s""")
impl += """,
Pred pred"""
impl += Iter(n, """,
const T%s& v%s""")
impl += """) {
if (pred(%(vs)s)) return AssertionSuccess();
""" % DEFS
impl += ' return AssertionFailure() << pred_text << "("'
impl += Iter(n, """
<< e%s""", sep=' << ", "')
impl += ' << ") evaluates to false, where"'
impl += Iter(n, """
<< "\\n" << e%s << " evaluates to " << v%s""")
impl += """;
}
// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
// Don't use this in your code.
#define GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, on_failure)\\
GTEST_ASSERT_(pred_format(%(vts)s, %(vs)s), \\
on_failure)
// Internal macro for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
#define GTEST_PRED%(n)s_(pred, %(vs)s, on_failure)\\
GTEST_ASSERT_(::testing::AssertPred%(n)sHelper(#pred""" % DEFS
impl += Iter(n, """, \\
#v%s""")
impl += """, \\
pred"""
impl += Iter(n, """, \\
v%s""")
impl += """), on_failure)
// %(Arity)s predicate assertion macros.
#define EXPECT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define ASSERT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_FATAL_FAILURE_)
""" % DEFS
return impl
def HeaderPostamble():
"""Returns the postamble for the header file."""
return """
#endif // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
"""
def GenerateFile(path, content):
"""Given a file path and a content string, overwrites it with the
given content."""
print 'Updating file %s . . .' % path
f = file(path, 'w+')
print >>f, content,
f.close()
print 'File %s has been updated.' % path
def GenerateHeader(n):
"""Given the maximum arity n, updates the header file that implements
the predicate assertions."""
GenerateFile(HEADER,
HeaderPreamble(n)
+ ''.join([ImplementationForArity(i) for i in OneTo(n)])
+ HeaderPostamble())
def UnitTestPreamble():
"""Returns the preamble for the unit test file."""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), sys.argv[1]),
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
// Regression test for gtest_pred_impl.h
//
// This file is generated by a script and quite long. If you intend to
// learn how Google Test works by reading its unit tests, read
// gtest_unittest.cc instead.
//
// This is intended as a regression test for the Google Test predicate
// assertions. We compile it as part of the gtest_unittest target
// only to keep the implementation tidy and compact, as it is quite
// involved to set up the stage for testing Google Test using Google
// Test itself.
//
// Currently, gtest_unittest takes ~11 seconds to run in the testing
// daemon. In the future, if it grows too large and needs much more
// time to finish, we should consider separating this file into a
// stand-alone regression test.
#include <iostream>
#include "gtest/gtest.h"
#include "gtest/gtest-spi.h"
// A user-defined data type.
struct Bool {
explicit Bool(int val) : value(val != 0) {}
bool operator>(int n) const { return value > Bool(n).value; }
Bool operator+(const Bool& rhs) const { return Bool(value + rhs.value); }
bool operator==(const Bool& rhs) const { return value == rhs.value; }
bool value;
};
// Enables Bool to be used in assertions.
std::ostream& operator<<(std::ostream& os, const Bool& x) {
return os << (x.value ? "true" : "false");
}
""" % DEFS)
def TestsForArity(n):
"""Returns the tests for n-ary predicate assertions."""
# A map that defines the values used in the template for the tests.
DEFS = {
'n' : n,
'es' : Iter(n, 'e%s', sep=', '),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'tvs' : Iter(n, 'T%s v%s', sep=', '),
'int_vs' : Iter(n, 'int v%s', sep=', '),
'Bool_vs' : Iter(n, 'Bool v%s', sep=', '),
'types' : Iter(n, 'typename T%s', sep=', '),
'v_sum' : Iter(n, 'v%s', sep=' + '),
'arity' : Arity(n),
'Arity' : Title(Arity(n)),
}
tests = (
"""// Sample functions/functors for testing %(arity)s predicate assertions.
// A %(arity)s predicate function.
template <%(types)s>
bool PredFunction%(n)s(%(tvs)s) {
return %(v_sum)s > 0;
}
// The following two functions are needed to circumvent a bug in
// gcc 2.95.3, which sometimes has problem with the above template
// function.
bool PredFunction%(n)sInt(%(int_vs)s) {
return %(v_sum)s > 0;
}
bool PredFunction%(n)sBool(%(Bool_vs)s) {
return %(v_sum)s > 0;
}
""" % DEFS)
tests += """
// A %(arity)s predicate functor.
struct PredFunctor%(n)s {
template <%(types)s>
bool operator()(""" % DEFS
tests += Iter(n, 'const T%s& v%s', sep=""",
""")
tests += """) {
return %(v_sum)s > 0;
}
};
""" % DEFS
tests += """
// A %(arity)s predicate-formatter function.
template <%(types)s>
testing::AssertionResult PredFormatFunction%(n)s(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) {
if (PredFunction%(n)s(%(vs)s))
return testing::AssertionSuccess();
return testing::AssertionFailure()
<< """ % DEFS
tests += Iter(n, 'e%s', sep=' << " + " << ')
tests += """
<< " is expected to be positive, but evaluates to "
<< %(v_sum)s << ".";
}
""" % DEFS
tests += """
// A %(arity)s predicate-formatter functor.
struct PredFormatFunctor%(n)s {
template <%(types)s>
testing::AssertionResult operator()(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) const {
return PredFormatFunction%(n)s(%(es)s, %(vs)s);
}
};
""" % DEFS
tests += """
// Tests for {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
class Predicate%(n)sTest : public testing::Test {
protected:
virtual void SetUp() {
expected_to_finish_ = true;
finished_ = false;""" % DEFS
tests += """
""" + Iter(n, 'n%s_ = ') + """0;
}
"""
tests += """
virtual void TearDown() {
// Verifies that each of the predicate's arguments was evaluated
// exactly once."""
tests += ''.join(["""
EXPECT_EQ(1, n%s_) <<
"The predicate assertion didn't evaluate argument %s "
"exactly once.";""" % (i, i + 1) for i in OneTo(n)])
tests += """
// Verifies that the control flow in the test function is expected.
if (expected_to_finish_ && !finished_) {
FAIL() << "The predicate assertion unexpactedly aborted the test.";
} else if (!expected_to_finish_ && finished_) {
FAIL() << "The failed predicate assertion didn't abort the test "
"as expected.";
}
}
// true iff the test function is expected to run to finish.
static bool expected_to_finish_;
// true iff the test function did run to finish.
static bool finished_;
""" % DEFS
tests += Iter(n, """
static int n%s_;""")
tests += """
};
bool Predicate%(n)sTest::expected_to_finish_;
bool Predicate%(n)sTest::finished_;
""" % DEFS
tests += Iter(n, """int Predicate%%(n)sTest::n%s_;
""") % DEFS
tests += """
typedef Predicate%(n)sTest EXPECT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest EXPECT_PRED%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED%(n)sTest;
""" % DEFS
def GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type):
"""Returns the test for a predicate assertion macro.
Args:
use_format: true iff the assertion is a *_PRED_FORMAT*.
use_assert: true iff the assertion is a ASSERT_*.
expect_failure: true iff the assertion is expected to fail.
use_functor: true iff the first argument of the assertion is
a functor (as opposed to a function)
use_user_type: true iff the predicate functor/function takes
argument(s) of a user-defined type.
Example:
GenTest(1, 0, 0, 1, 0) returns a test that tests the behavior
of a successful EXPECT_PRED_FORMATn() that takes a functor
whose arguments have built-in types."""
if use_assert:
assrt = 'ASSERT' # 'assert' is reserved, so we cannot use
# that identifier here.
else:
assrt = 'EXPECT'
assertion = assrt + '_PRED'
if use_format:
pred_format = 'PredFormat'
assertion += '_FORMAT'
else:
pred_format = 'Pred'
assertion += '%(n)s' % DEFS
if use_functor:
pred_format_type = 'functor'
pred_format += 'Functor%(n)s()'
else:
pred_format_type = 'function'
pred_format += 'Function%(n)s'
if not use_format:
if use_user_type:
pred_format += 'Bool'
else:
pred_format += 'Int'
test_name = pred_format_type.title()
if use_user_type:
arg_type = 'user-defined type (Bool)'
test_name += 'OnUserType'
if expect_failure:
arg = 'Bool(n%s_++)'
else:
arg = 'Bool(++n%s_)'
else:
arg_type = 'built-in type (int)'
test_name += 'OnBuiltInType'
if expect_failure:
arg = 'n%s_++'
else:
arg = '++n%s_'
if expect_failure:
successful_or_failed = 'failed'
expected_or_not = 'expected.'
test_name += 'Failure'
else:
successful_or_failed = 'successful'
expected_or_not = 'UNEXPECTED!'
test_name += 'Success'
# A map that defines the values used in the test template.
defs = DEFS.copy()
defs.update({
'assert' : assrt,
'assertion' : assertion,
'test_name' : test_name,
'pf_type' : pred_format_type,
'pf' : pred_format,
'arg_type' : arg_type,
'arg' : arg,
'successful' : successful_or_failed,
'expected' : expected_or_not,
})
test = """
// Tests a %(successful)s %(assertion)s where the
// predicate-formatter is a %(pf_type)s on a %(arg_type)s.
TEST_F(%(assertion)sTest, %(test_name)s) {""" % defs
indent = (len(assertion) + 3)*' '
extra_indent = ''
if expect_failure:
extra_indent = ' '
if use_assert:
test += """
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE({ // NOLINT"""
else:
test += """
EXPECT_NONFATAL_FAILURE({ // NOLINT"""
test += '\n' + extra_indent + """ %(assertion)s(%(pf)s""" % defs
test = test % defs
test += Iter(n, ',\n' + indent + extra_indent + '%(arg)s' % defs)
test += ');\n' + extra_indent + ' finished_ = true;\n'
if expect_failure:
test += ' }, "");\n'
test += '}\n'
return test
# Generates tests for all 2**6 = 64 combinations.
tests += ''.join([GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type)
for use_format in [0, 1]
for use_assert in [0, 1]
for expect_failure in [0, 1]
for use_functor in [0, 1]
for use_user_type in [0, 1]
])
return tests
def UnitTestPostamble():
"""Returns the postamble for the tests."""
return ''
def GenerateUnitTest(n):
"""Returns the tests for up-to n-ary predicate assertions."""
GenerateFile(UNIT_TEST,
UnitTestPreamble()
+ ''.join([TestsForArity(i) for i in OneTo(n)])
+ UnitTestPostamble())
def _Main():
"""The entry point of the script. Generates the header file and its
unit test."""
if len(sys.argv) != 2:
print __doc__
print 'Author: ' + __author__
sys.exit(1)
n = int(sys.argv[1])
GenerateHeader(n)
GenerateUnitTest(n)
if __name__ == '__main__':
_Main()
|
isandlaTech/cohorte-devtools
|
refs/heads/master
|
org.cohorte.eclipse.runner.basic/files/jython/Lib/_abcoll.py
|
17
|
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) for collections, according to PEP 3119.
DON'T USE THIS MODULE DIRECTLY! The classes here should be imported
via collections; they are defined here only to alleviate certain
bootstrapping issues. Unit tests are in test_collections.
"""
from abc import ABCMeta, abstractmethod
import sys
__all__ = ["Hashable", "Iterable", "Iterator",
"Sized", "Container", "Callable",
"Set", "MutableSet",
"Mapping", "MutableMapping",
"MappingView", "KeysView", "ItemsView", "ValuesView",
"Sequence", "MutableSequence",
]
### ONE-TRICK PONIES ###
def _hasattr(C, attr):
try:
return any(attr in B.__dict__ for B in C.__mro__)
except AttributeError:
# Old-style class
return hasattr(C, attr)
class Hashable:
__metaclass__ = ABCMeta
@abstractmethod
def __hash__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Hashable:
try:
for B in C.__mro__:
if "__hash__" in B.__dict__:
if B.__dict__["__hash__"]:
return True
break
except AttributeError:
# Old-style class
if getattr(C, "__hash__", None):
return True
return NotImplemented
class Iterable:
__metaclass__ = ABCMeta
@abstractmethod
def __iter__(self):
while False:
yield None
@classmethod
def __subclasshook__(cls, C):
if cls is Iterable:
if _hasattr(C, "__iter__"):
return True
return NotImplemented
Iterable.register(str)
class Iterator(Iterable):
@abstractmethod
def next(self):
raise StopIteration
def __iter__(self):
return self
@classmethod
def __subclasshook__(cls, C):
if cls is Iterator:
if _hasattr(C, "next") and _hasattr(C, "__iter__"):
return True
return NotImplemented
class Sized:
__metaclass__ = ABCMeta
@abstractmethod
def __len__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Sized:
if _hasattr(C, "__len__"):
return True
return NotImplemented
class Container:
__metaclass__ = ABCMeta
@abstractmethod
def __contains__(self, x):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Container:
if _hasattr(C, "__contains__"):
return True
return NotImplemented
class Callable:
__metaclass__ = ABCMeta
@abstractmethod
def __call__(self, *args, **kwds):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Callable:
if _hasattr(C, "__call__"):
return True
return NotImplemented
### SETS ###
class Set(Sized, Iterable, Container):
"""A set is a finite, iterable container.
This class provides concrete generic implementations of all
methods except for __contains__, __iter__ and __len__.
To override the comparisons (presumably for speed, as the
semantics are fixed), all you have to do is redefine __le__ and
then the other operations will automatically follow suit.
"""
def __le__(self, other):
if not isinstance(other, Set):
return NotImplemented
if len(self) > len(other):
return False
for elem in self:
if elem not in other:
return False
return True
def __lt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) < len(other) and self.__le__(other)
def __gt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return other < self
def __ge__(self, other):
if not isinstance(other, Set):
return NotImplemented
return other <= self
def __eq__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) == len(other) and self.__le__(other)
def __ne__(self, other):
return not (self == other)
@classmethod
def _from_iterable(cls, it):
'''Construct an instance of the class from any iterable input.
Must override this method if the class constructor signature
does not accept an iterable for an input.
'''
return cls(it)
def __and__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
return self._from_iterable(value for value in other if value in self)
def isdisjoint(self, other):
for value in other:
if value in self:
return False
return True
def __or__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
chain = (e for s in (self, other) for e in s)
return self._from_iterable(chain)
def __sub__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return self._from_iterable(value for value in self
if value not in other)
def __xor__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return (self - other) | (other - self)
# Sets are not hashable by default, but subclasses can change this
__hash__ = None
def _hash(self):
"""Compute the hash value of a set.
Note that we don't define __hash__: not all sets are hashable.
But if you define a hashable set type, its __hash__ should
call this function.
This must be compatible __eq__.
All sets ought to compare equal if they contain the same
elements, regardless of how they are implemented, and
regardless of the order of the elements; so there's not much
freedom for __eq__ or __hash__. We match the algorithm used
by the built-in frozenset type.
"""
MAX = sys.maxint
MASK = 2 * MAX + 1
n = len(self)
h = 1927868237 * (n + 1)
h &= MASK
for x in self:
hx = hash(x)
h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167
h &= MASK
h = h * 69069 + 907133923
h &= MASK
if h > MAX:
h -= MASK + 1
if h == -1:
h = 590923713
return h
Set.register(frozenset)
class MutableSet(Set):
@abstractmethod
def add(self, value):
"""Add an element."""
raise NotImplementedError
@abstractmethod
def discard(self, value):
"""Remove an element. Do not raise an exception if absent."""
raise NotImplementedError
def remove(self, value):
"""Remove an element. If not a member, raise a KeyError."""
if value not in self:
raise KeyError(value)
self.discard(value)
def pop(self):
"""Return the popped value. Raise KeyError if empty."""
it = iter(self)
try:
value = next(it)
except StopIteration:
raise KeyError
self.discard(value)
return value
def clear(self):
"""This is slow (creates N new iterators!) but effective."""
try:
while True:
self.pop()
except KeyError:
pass
def __ior__(self, it):
for value in it:
self.add(value)
return self
def __iand__(self, it):
for value in (self - it):
self.discard(value)
return self
def __ixor__(self, it):
if it is self:
self.clear()
else:
if not isinstance(it, Set):
it = self._from_iterable(it)
for value in it:
if value in self:
self.discard(value)
else:
self.add(value)
return self
def __isub__(self, it):
if it is self:
self.clear()
else:
for value in it:
self.discard(value)
return self
MutableSet.register(set)
### MAPPINGS ###
class Mapping(Sized, Iterable, Container):
@abstractmethod
def __getitem__(self, key):
raise KeyError
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def iterkeys(self):
return iter(self)
def itervalues(self):
for key in self:
yield self[key]
def iteritems(self):
for key in self:
yield (key, self[key])
def keys(self):
return list(self)
def items(self):
return [(key, self[key]) for key in self]
def values(self):
return [self[key] for key in self]
# Mappings are not hashable by default, but subclasses can change this
__hash__ = None
def __eq__(self, other):
if not isinstance(other, Mapping):
return NotImplemented
return dict(self.items()) == dict(other.items())
def __ne__(self, other):
return not (self == other)
class MappingView(Sized):
def __init__(self, mapping):
self._mapping = mapping
def __len__(self):
return len(self._mapping)
def __repr__(self):
return '{0.__class__.__name__}({0._mapping!r})'.format(self)
class KeysView(MappingView, Set):
@classmethod
def _from_iterable(self, it):
return set(it)
def __contains__(self, key):
return key in self._mapping
def __iter__(self):
for key in self._mapping:
yield key
class ItemsView(MappingView, Set):
@classmethod
def _from_iterable(self, it):
return set(it)
def __contains__(self, item):
key, value = item
try:
v = self._mapping[key]
except KeyError:
return False
else:
return v == value
def __iter__(self):
for key in self._mapping:
yield (key, self._mapping[key])
class ValuesView(MappingView):
def __contains__(self, value):
for key in self._mapping:
if value == self._mapping[key]:
return True
return False
def __iter__(self):
for key in self._mapping:
yield self._mapping[key]
class MutableMapping(Mapping):
@abstractmethod
def __setitem__(self, key, value):
raise KeyError
@abstractmethod
def __delitem__(self, key):
raise KeyError
__marker = object()
def pop(self, key, default=__marker):
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def popitem(self):
try:
key = next(iter(self))
except StopIteration:
raise KeyError
value = self[key]
del self[key]
return key, value
def clear(self):
try:
while True:
self.popitem()
except KeyError:
pass
def update(*args, **kwds):
if len(args) > 2:
raise TypeError("update() takes at most 2 positional "
"arguments ({} given)".format(len(args)))
elif not args:
raise TypeError("update() takes at least 1 argument (0 given)")
self = args[0]
other = args[1] if len(args) >= 2 else ()
if isinstance(other, Mapping):
for key in other:
self[key] = other[key]
elif hasattr(other, "keys"):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
MutableMapping.register(dict)
### SEQUENCES ###
class Sequence(Sized, Iterable, Container):
"""All the operations on a read-only sequence.
Concrete subclasses must override __new__ or __init__,
__getitem__, and __len__.
"""
@abstractmethod
def __getitem__(self, index):
raise IndexError
def __iter__(self):
i = 0
try:
while True:
v = self[i]
yield v
i += 1
except IndexError:
return
def __contains__(self, value):
for v in self:
if v == value:
return True
return False
def __reversed__(self):
for i in reversed(range(len(self))):
yield self[i]
def index(self, value):
for i, v in enumerate(self):
if v == value:
return i
raise ValueError
def count(self, value):
return sum(1 for v in self if v == value)
Sequence.register(tuple)
Sequence.register(basestring)
Sequence.register(buffer)
Sequence.register(xrange)
class MutableSequence(Sequence):
@abstractmethod
def __setitem__(self, index, value):
raise IndexError
@abstractmethod
def __delitem__(self, index):
raise IndexError
@abstractmethod
def insert(self, index, value):
raise IndexError
def append(self, value):
self.insert(len(self), value)
def reverse(self):
n = len(self)
for i in range(n//2):
self[i], self[n-i-1] = self[n-i-1], self[i]
def extend(self, values):
for v in values:
self.append(v)
def pop(self, index=-1):
v = self[index]
del self[index]
return v
def remove(self, value):
del self[self.index(value)]
def __iadd__(self, values):
self.extend(values)
return self
MutableSequence.register(list)
if sys.platform.startswith("java"):
from org.python.core import PyFastSequenceIter
# the only name conflict is with Set, but be consistent
from java.util import List as JList, Map as JMap, Set as JSet
MutableSequence.register(JList)
MutableMapping.register(JMap)
MutableSet.register(JSet)
Iterator.register(PyFastSequenceIter)
del PyFastSequenceIter
del JList
del JMap
del JSet
|
alsrgv/tensorflow
|
refs/heads/master
|
tensorflow/python/distribute/numpy_dataset_test.py
|
32
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for numpy_dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.distribute import numpy_dataset
from tensorflow.python.eager import test
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variable_scope
class InitVarFromNumpyTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_creating_var_with_numpy_arrays(self):
with self.cached_session() as session:
x = np.asarray(np.random.random((64, 3)), dtype=np.float32)
initial = np.zeros_like(x)
var_x = variable_scope.variable(initial)
numpy_dataset.init_var_from_numpy(var_x, x, session)
val = self.evaluate(var_x.value())
# Verify that the numpy value is copied to the variable.
self.assertAllEqual(x, val)
if __name__ == '__main__':
test.main()
|
ftomassetti/intellij-community
|
refs/heads/master
|
python/testData/refactoring/move/module/after/src/p1/p2/__init__.py
|
12133432
| |
runekaagaard/django-contrib-locking
|
refs/heads/master
|
tests/migrations/test_migrations/__init__.py
|
12133432
| |
Viderl/test-gae
|
refs/heads/master
|
appengine-try-python-django/hello/__init__.py
|
12133432
| |
fidomason/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Tools/demo/rpythond.py
|
107
|
#!/usr/bin/env python3
"""
Remote python server.
Execute Python commands remotely and send output back.
WARNING: This version has a gaping security hole -- it accepts requests
from any host on the Internet!
"""
import sys
from socket import socket, AF_INET, SOCK_STREAM
import io
import traceback
PORT = 4127
BUFSIZE = 1024
def main():
if len(sys.argv) > 1:
port = int(sys.argv[1])
else:
port = PORT
s = socket(AF_INET, SOCK_STREAM)
s.bind(('', port))
s.listen(1)
while True:
conn, (remotehost, remoteport) = s.accept()
print('connection from', remotehost, remoteport)
request = b''
while 1:
data = conn.recv(BUFSIZE)
if not data:
break
request += data
reply = execute(request.decode())
conn.send(reply.encode())
conn.close()
def execute(request):
stdout = sys.stdout
stderr = sys.stderr
sys.stdout = sys.stderr = fakefile = io.StringIO()
try:
try:
exec(request, {}, {})
except:
print()
traceback.print_exc(100)
finally:
sys.stderr = stderr
sys.stdout = stdout
return fakefile.getvalue()
try:
main()
except KeyboardInterrupt:
pass
|
marco-lancini/Showcase
|
refs/heads/master
|
djangoappengine/tests/transactions.py
|
36
|
from django.db.models import F
from django.test import TestCase
from .testmodels import EmailModel
class TransactionTest(TestCase):
emails = ['app-engine@scholardocs.com', 'sharingan@uchias.com',
'rinnengan@sage.de', 'rasengan@naruto.com']
def setUp(self):
EmailModel(email=self.emails[0], number=1).save()
EmailModel(email=self.emails[0], number=2).save()
EmailModel(email=self.emails[1], number=3).save()
def test_update(self):
self.assertEqual(2, len(EmailModel.objects.all().filter(
email=self.emails[0])))
self.assertEqual(1, len(EmailModel.objects.all().filter(
email=self.emails[1])))
EmailModel.objects.all().filter(email=self.emails[0]).update(
email=self.emails[1])
self.assertEqual(0, len(EmailModel.objects.all().filter(
email=self.emails[0])))
self.assertEqual(3, len(EmailModel.objects.all().filter(
email=self.emails[1])))
def test_f_object_updates(self):
self.assertEqual(1, len(EmailModel.objects.all().filter(
number=1)))
self.assertEqual(1, len(EmailModel.objects.all().filter(
number=2)))
# Test add.
EmailModel.objects.all().filter(email=self.emails[0]).update(
number=F('number') + F('number'))
self.assertEqual(1, len(EmailModel.objects.all().filter(
number=2)))
self.assertEqual(1, len(EmailModel.objects.all().filter(
number=4)))
EmailModel.objects.all().filter(email=self.emails[1]).update(
number=F('number') + 10, email=self.emails[0])
self.assertEqual(1, len(EmailModel.objects.all().filter(number=13)))
self.assertEqual(self.emails[0],
EmailModel.objects.all().get(number=13).email)
# Complex expression test.
EmailModel.objects.all().filter(number=13).update(
number=F('number') * (F('number') + 10) - 5, email=self.emails[0])
self.assertEqual(1, len(EmailModel.objects.all().filter(number=294)))
# TODO: Tests for: sub, muld, div, mod, ....
|
Pablo126/SSBW
|
refs/heads/master
|
Tarea4/tarea4/lib/python3.5/site-packages/setuptools/__init__.py
|
130
|
"""Extensions to the 'distutils' for large or complex distributions"""
import os
import functools
import distutils.core
import distutils.filelist
from distutils.util import convert_path
from fnmatch import fnmatchcase
from six.moves import filter, map
import setuptools.version
from setuptools.extension import Extension
from setuptools.dist import Distribution, Feature
from setuptools.depends import Require
from . import monkey
__all__ = [
'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',
'find_packages',
]
__version__ = setuptools.version.__version__
bootstrap_install_from = None
# If we run 2to3 on .py files, should we also convert docstrings?
# Default: yes; assume that we can detect doctests reliably
run_2to3_on_doctests = True
# Standard package names for fixer packages
lib2to3_fixer_packages = ['lib2to3.fixes']
class PackageFinder(object):
"""
Generate a list of all Python packages found within a directory
"""
@classmethod
def find(cls, where='.', exclude=(), include=('*',)):
"""Return a list all Python packages found within directory 'where'
'where' is the root directory which will be searched for packages. It
should be supplied as a "cross-platform" (i.e. URL-style) path; it will
be converted to the appropriate local path syntax.
'exclude' is a sequence of package names to exclude; '*' can be used
as a wildcard in the names, such that 'foo.*' will exclude all
subpackages of 'foo' (but not 'foo' itself).
'include' is a sequence of package names to include. If it's
specified, only the named packages will be included. If it's not
specified, all found packages will be included. 'include' can contain
shell style wildcard patterns just like 'exclude'.
"""
return list(cls._find_packages_iter(
convert_path(where),
cls._build_filter('ez_setup', '*__pycache__', *exclude),
cls._build_filter(*include)))
@classmethod
def _find_packages_iter(cls, where, exclude, include):
"""
All the packages found in 'where' that pass the 'include' filter, but
not the 'exclude' filter.
"""
for root, dirs, files in os.walk(where, followlinks=True):
# Copy dirs to iterate over it, then empty dirs.
all_dirs = dirs[:]
dirs[:] = []
for dir in all_dirs:
full_path = os.path.join(root, dir)
rel_path = os.path.relpath(full_path, where)
package = rel_path.replace(os.path.sep, '.')
# Skip directory trees that are not valid packages
if ('.' in dir or not cls._looks_like_package(full_path)):
continue
# Should this package be included?
if include(package) and not exclude(package):
yield package
# Keep searching subdirectories, as there may be more packages
# down there, even if the parent was excluded.
dirs.append(dir)
@staticmethod
def _looks_like_package(path):
"""Does a directory look like a package?"""
return os.path.isfile(os.path.join(path, '__init__.py'))
@staticmethod
def _build_filter(*patterns):
"""
Given a list of patterns, return a callable that will be true only if
the input matches at least one of the patterns.
"""
return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
class PEP420PackageFinder(PackageFinder):
@staticmethod
def _looks_like_package(path):
return True
find_packages = PackageFinder.find
setup = distutils.core.setup
_Command = monkey.get_unpatched(distutils.core.Command)
class Command(_Command):
__doc__ = _Command.__doc__
command_consumes_arguments = False
def __init__(self, dist, **kw):
"""
Construct the command for dist, updating
vars(self) with any keyword parameters.
"""
_Command.__init__(self, dist)
vars(self).update(kw)
def reinitialize_command(self, command, reinit_subcommands=0, **kw):
cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
vars(cmd).update(kw)
return cmd
def _find_all_simple(path):
"""
Find all files under 'path'
"""
results = (
os.path.join(base, file)
for base, dirs, files in os.walk(path, followlinks=True)
for file in files
)
return filter(os.path.isfile, results)
def findall(dir=os.curdir):
"""
Find all files under 'dir' and return the list of full filenames.
Unless dir is '.', return full filenames with dir prepended.
"""
files = _find_all_simple(dir)
if dir == os.curdir:
make_rel = functools.partial(os.path.relpath, start=dir)
files = map(make_rel, files)
return list(files)
monkey.patch_all()
|
mlyundin/scrapy
|
refs/heads/master
|
tests/mocks/__init__.py
|
12133432
| |
campbe13/openhatch
|
refs/heads/master
|
vendor/packages/Django/django/conf/locale/sk/__init__.py
|
12133432
| |
ahmadiga/min_edx
|
refs/heads/master
|
common/test/acceptance/tests/lms/test_lms_cohorted_courseware_search.py
|
36
|
"""
Test courseware search
"""
import os
import json
import uuid
from ..helpers import remove_file
from ...pages.common.logout import LogoutPage
from ...pages.studio.overview import CourseOutlinePage
from ...pages.lms.courseware_search import CoursewareSearchPage
from ...pages.lms.staff_view import StaffPage
from ...fixtures.course import XBlockFixtureDesc
from nose.plugins.attrib import attr
from ..studio.base_studio_test import ContainerBase
from ...pages.studio.settings_group_configurations import GroupConfigurationsPage
from ...pages.studio.auto_auth import AutoAuthPage as StudioAutoAuthPage
from ...fixtures import LMS_BASE_URL
from ...pages.studio.component_editor import ComponentVisibilityEditorView
from ...pages.lms.instructor_dashboard import InstructorDashboardPage
from bok_choy.promise import EmptyPromise
@attr('shard_1')
class CoursewareSearchCohortTest(ContainerBase):
"""
Test courseware search.
"""
TEST_INDEX_FILENAME = "test_root/index_file.dat"
def setUp(self, is_staff=True):
"""
Create search page and course content to search
"""
# create test file in which index for this test will live
with open(self.TEST_INDEX_FILENAME, "w+") as index_file:
json.dump({}, index_file)
self.addCleanup(remove_file, self.TEST_INDEX_FILENAME)
super(CoursewareSearchCohortTest, self).setUp(is_staff=is_staff)
self.staff_user = self.user
self.course_outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.content_group_a = "Content Group A"
self.content_group_b = "Content Group B"
# Create a student who will be in "Cohort A"
self.cohort_a_student_username = "cohort_a_" + str(uuid.uuid4().hex)[:12]
self.cohort_a_student_email = self.cohort_a_student_username + "@example.com"
StudioAutoAuthPage(
self.browser, username=self.cohort_a_student_username, email=self.cohort_a_student_email, no_login=True
).visit()
# Create a student who will be in "Cohort B"
self.cohort_b_student_username = "cohort_b_" + str(uuid.uuid4().hex)[:12]
self.cohort_b_student_email = self.cohort_b_student_username + "@example.com"
StudioAutoAuthPage(
self.browser, username=self.cohort_b_student_username, email=self.cohort_b_student_email, no_login=True
).visit()
# Create a student who will end up in the default cohort group
self.cohort_default_student_username = "cohort_default_student"
self.cohort_default_student_email = "cohort_default_student@example.com"
StudioAutoAuthPage(
self.browser, username=self.cohort_default_student_username,
email=self.cohort_default_student_email, no_login=True
).visit()
self.courseware_search_page = CoursewareSearchPage(self.browser, self.course_id)
# Enable Cohorting and assign cohorts and content groups
self._auto_auth(self.staff_user["username"], self.staff_user["email"], True)
self.enable_cohorting(self.course_fixture)
self.create_content_groups()
self.link_html_to_content_groups_and_publish()
self.create_cohorts_and_assign_students()
self._studio_reindex()
def _auto_auth(self, username, email, staff):
"""
Logout and login with given credentials.
"""
LogoutPage(self.browser).visit()
StudioAutoAuthPage(self.browser, username=username, email=email,
course_id=self.course_id, staff=staff).visit()
def _studio_reindex(self):
"""
Reindex course content on studio course page
"""
self._auto_auth(self.staff_user["username"], self.staff_user["email"], True)
self.course_outline.visit()
self.course_outline.start_reindex()
self.course_outline.wait_for_ajax()
def _goto_staff_page(self):
"""
Open staff page with assertion
"""
self.courseware_search_page.visit()
staff_page = StaffPage(self.browser, self.course_id)
self.assertEqual(staff_page.staff_view_mode, 'Staff')
return staff_page
def populate_course_fixture(self, course_fixture):
"""
Populate the children of the test course fixture.
"""
self.group_a_html = 'GROUPACONTENT'
self.group_b_html = 'GROUPBCONTENT'
self.group_a_and_b_html = 'GROUPAANDBCONTENT'
self.visible_to_all_html = 'VISIBLETOALLCONTENT'
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('html', self.group_a_html, data='<html>GROUPACONTENT</html>'),
XBlockFixtureDesc('html', self.group_b_html, data='<html>GROUPBCONTENT</html>'),
XBlockFixtureDesc('html', self.group_a_and_b_html, data='<html>GROUPAANDBCONTENT</html>'),
XBlockFixtureDesc('html', self.visible_to_all_html, data='<html>VISIBLETOALLCONTENT</html>')
)
)
)
)
def enable_cohorting(self, course_fixture):
"""
Enables cohorting for the current course.
"""
url = LMS_BASE_URL + "/courses/" + course_fixture._course_key + '/cohorts/settings' # pylint: disable=protected-access
data = json.dumps({'is_cohorted': True})
response = course_fixture.session.patch(url, data=data, headers=course_fixture.headers)
self.assertTrue(response.ok, "Failed to enable cohorts")
def create_content_groups(self):
"""
Creates two content groups in Studio Group Configurations Settings.
"""
group_configurations_page = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
group_configurations_page.visit()
group_configurations_page.create_first_content_group()
config = group_configurations_page.content_groups[0]
config.name = self.content_group_a
config.save()
group_configurations_page.add_content_group()
config = group_configurations_page.content_groups[1]
config.name = self.content_group_b
config.save()
def link_html_to_content_groups_and_publish(self):
"""
Updates 3 of the 4 existing html to limit their visibility by content group.
Publishes the modified units.
"""
container_page = self.go_to_unit_page()
def set_visibility(html_block_index, content_group, second_content_group=None):
"""
Set visibility on html blocks to specified groups.
"""
html_block = container_page.xblocks[html_block_index]
html_block.edit_visibility()
if second_content_group:
ComponentVisibilityEditorView(self.browser, html_block.locator).select_option(
second_content_group, save=False
)
ComponentVisibilityEditorView(self.browser, html_block.locator).select_option(content_group)
set_visibility(1, self.content_group_a)
set_visibility(2, self.content_group_b)
set_visibility(3, self.content_group_a, self.content_group_b)
set_visibility(4, 'All Students and Staff') # Does not work without this
container_page.publish_action.click()
def create_cohorts_and_assign_students(self):
"""
Adds 2 manual cohorts, linked to content groups, to the course.
Each cohort is assigned one student.
"""
instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course_id)
instructor_dashboard_page.visit()
cohort_management_page = instructor_dashboard_page.select_cohort_management()
def add_cohort_with_student(cohort_name, content_group, student):
"""
Create cohort and assign student to it.
"""
cohort_management_page.add_cohort(cohort_name, content_group=content_group)
# After adding the cohort, it should automatically be selected
EmptyPromise(
lambda: cohort_name == cohort_management_page.get_selected_cohort(), "Waiting for new cohort"
).fulfill()
cohort_management_page.add_students_to_selected_cohort([student])
add_cohort_with_student("Cohort A", self.content_group_a, self.cohort_a_student_username)
add_cohort_with_student("Cohort B", self.content_group_b, self.cohort_b_student_username)
cohort_management_page.wait_for_ajax()
def test_page_existence(self):
"""
Make sure that the page is accessible.
"""
self._auto_auth(self.cohort_default_student_username, self.cohort_default_student_email, False)
self.courseware_search_page.visit()
def test_cohorted_search_user_a_a_content(self):
"""
Test user can search content restricted to his cohort.
"""
self._auto_auth(self.cohort_a_student_username, self.cohort_a_student_email, False)
self.courseware_search_page.visit()
self.courseware_search_page.search_for_term(self.group_a_html)
assert self.group_a_html in self.courseware_search_page.search_results.html[0]
def test_cohorted_search_user_b_a_content(self):
"""
Test user can not search content restricted to his cohort.
"""
self._auto_auth(self.cohort_b_student_username, self.cohort_b_student_email, False)
self.courseware_search_page.visit()
self.courseware_search_page.search_for_term(self.group_a_html)
assert self.group_a_html not in self.courseware_search_page.search_results.html[0]
def test_cohorted_search_user_default_ab_content(self):
"""
Test user not enrolled in any cohorts can't see any of restricted content.
"""
self._auto_auth(self.cohort_default_student_username, self.cohort_default_student_email, False)
self.courseware_search_page.visit()
self.courseware_search_page.search_for_term(self.group_a_and_b_html)
assert self.group_a_and_b_html not in self.courseware_search_page.search_results.html[0]
def test_cohorted_search_user_default_all_content(self):
"""
Test user can search public content if cohorts used on course.
"""
self._auto_auth(self.cohort_default_student_username, self.cohort_default_student_email, False)
self.courseware_search_page.visit()
self.courseware_search_page.search_for_term(self.visible_to_all_html)
assert self.visible_to_all_html in self.courseware_search_page.search_results.html[0]
def test_cohorted_search_user_staff_all_content(self):
"""
Test staff user can search all public content if cohorts used on course.
"""
self._auto_auth(self.staff_user["username"], self.staff_user["email"], False)
self._goto_staff_page().set_staff_view_mode('Staff')
self.courseware_search_page.search_for_term(self.visible_to_all_html)
assert self.visible_to_all_html in self.courseware_search_page.search_results.html[0]
self.courseware_search_page.clear_search()
self.courseware_search_page.search_for_term(self.group_a_and_b_html)
assert self.group_a_and_b_html in self.courseware_search_page.search_results.html[0]
self.courseware_search_page.clear_search()
self.courseware_search_page.search_for_term(self.group_a_html)
assert self.group_a_html in self.courseware_search_page.search_results.html[0]
self.courseware_search_page.clear_search()
self.courseware_search_page.search_for_term(self.group_b_html)
assert self.group_b_html in self.courseware_search_page.search_results.html[0]
def test_cohorted_search_user_staff_masquerade_student_content(self):
"""
Test staff user can search just student public content if selected from preview menu.
"""
self._auto_auth(self.staff_user["username"], self.staff_user["email"], False)
self._goto_staff_page().set_staff_view_mode('Student')
self.courseware_search_page.search_for_term(self.visible_to_all_html)
assert self.visible_to_all_html in self.courseware_search_page.search_results.html[0]
self.courseware_search_page.clear_search()
self.courseware_search_page.search_for_term(self.group_a_and_b_html)
assert self.group_a_and_b_html not in self.courseware_search_page.search_results.html[0]
self.courseware_search_page.clear_search()
self.courseware_search_page.search_for_term(self.group_a_html)
assert self.group_a_html not in self.courseware_search_page.search_results.html[0]
self.courseware_search_page.clear_search()
self.courseware_search_page.search_for_term(self.group_b_html)
assert self.group_b_html not in self.courseware_search_page.search_results.html[0]
def test_cohorted_search_user_staff_masquerade_cohort_content(self):
"""
Test staff user can search cohort and public content if selected from preview menu.
"""
self._auto_auth(self.staff_user["username"], self.staff_user["email"], False)
self._goto_staff_page().set_staff_view_mode('Student in ' + self.content_group_a)
self.courseware_search_page.search_for_term(self.visible_to_all_html)
assert self.visible_to_all_html in self.courseware_search_page.search_results.html[0]
self.courseware_search_page.clear_search()
self.courseware_search_page.search_for_term(self.group_a_and_b_html)
assert self.group_a_and_b_html in self.courseware_search_page.search_results.html[0]
self.courseware_search_page.clear_search()
self.courseware_search_page.search_for_term(self.group_a_html)
assert self.group_a_html in self.courseware_search_page.search_results.html[0]
self.courseware_search_page.clear_search()
self.courseware_search_page.search_for_term(self.group_b_html)
assert self.group_b_html not in self.courseware_search_page.search_results.html[0]
|
crate/crate
|
refs/heads/master
|
blackbox/test_jmx.py
|
1
|
# -*- coding: utf-8; -*-
#
# Licensed to Crate.io GmbH ("Crate") under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. Crate licenses
# this file to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# However, if you have executed another commercial license agreement
# with Crate these terms will supersede the license and you may use the
# software solely pursuant to the terms of the relevant commercial agreement.
import os
import re
import unittest
from crate.client import connect
from testutils.ports import bind_port
from testutils.paths import crate_path
from cr8.run_crate import CrateNode
from subprocess import PIPE, Popen
from urllib.request import urlretrieve
JMX_PORT = bind_port()
JMX_OPTS = '''
-Dcom.sun.management.jmxremote
-Dcom.sun.management.jmxremote.port={}
-Dcom.sun.management.jmxremote.ssl=false
-Dcom.sun.management.jmxremote.authenticate=false
-Dio.netty.leakDetection.level=paranoid
'''
env = os.environ.copy()
env['CRATE_JAVA_OPTS'] = JMX_OPTS.format(JMX_PORT)
env['CRATE_HEAP_SIZE'] = '256M'
enterprise_crate = CrateNode(
crate_dir=crate_path(),
settings={
'transport.tcp.port': 0,
'psql.port': 0,
'node.name': 'crate-enterprise',
},
env=env,
version=(4, 0, 0)
)
class JmxClient:
SJK_JAR_URL = "https://repository.sonatype.org/service/local/artifact/maven/redirect?r=central-proxy&g=org.gridkit.jvmtool&a=sjk&v=LATEST"
CACHE_DIR = os.environ.get(
'XDG_CACHE_HOME',
os.path.join(os.path.expanduser('~'), '.cache', 'crate-tests')
)
def __init__(self, jmx_port):
self.jmx_port = jmx_port
self.jmx_path = self._get_jmx()
def _get_jmx(self):
jar_name = 'sjk.jar'
jmx_path = os.path.join(JmxClient.CACHE_DIR, 'jmx')
jar_path = os.path.join(jmx_path, jar_name)
if not os.path.exists(jar_path):
os.makedirs(jmx_path, exist_ok=True)
urlretrieve(JmxClient.SJK_JAR_URL, jar_path)
return jar_path
def query_jmx(self, bean, attribute):
env = os.environ.copy()
env.setdefault('JAVA_HOME', '/usr/lib/jvm/java-11-openjdk')
with Popen(
[
'java',
'-jar', self.jmx_path,
'mx',
'-s', f'localhost:{self.jmx_port}',
'-mg',
'-b', bean,
'-f', attribute
],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
env=env,
universal_newlines=True
) as p:
stdout, stderr = p.communicate()
restart_msg = 'Restarting java with unlocked package access\n'
if stderr.startswith(restart_msg):
stderr = stderr[len(restart_msg):]
# Bean name is printed in the first line. Remove it
return (stdout[len(bean) + 1:], stderr)
class JmxIntegrationTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
enterprise_crate.start()
@classmethod
def tearDownClass(cls):
enterprise_crate.stop()
def test_mbean_select_total_count(self):
jmx_client = JmxClient(JMX_PORT)
with connect(enterprise_crate.http_url) as conn:
c = conn.cursor()
c.execute("select 1")
stdout, stderr = jmx_client.query_jmx(
'io.crate.monitoring:type=QueryStats',
'SelectQueryTotalCount'
)
self.assertEqual(stderr, '')
self.assertGreater(int(stdout), 0)
def test_mbean_select_ready(self):
jmx_client = JmxClient(JMX_PORT)
stdout, stderr = jmx_client.query_jmx(
'io.crate.monitoring:type=NodeStatus',
'Ready'
)
self.assertEqual(stderr, '')
self.assertEqual(stdout.rstrip(), 'true')
def test_mbean_node_name(self):
jmx_client = JmxClient(JMX_PORT)
stdout, stderr = jmx_client.query_jmx(
'io.crate.monitoring:type=NodeInfo',
'NodeName'
)
self.assertEqual(stderr, '')
self.assertEqual(stdout.rstrip(), 'crate-enterprise')
def test_mbean_node_id(self):
jmx_client = JmxClient(JMX_PORT)
stdout, stderr = jmx_client.query_jmx(
'io.crate.monitoring:type=NodeInfo',
'NodeId'
)
self.assertEqual(stderr, '')
self.assertNotEqual(stdout.rstrip(), '', 'node id must not be empty')
def test_mbean_shards(self):
jmx_client = JmxClient(JMX_PORT)
with connect(enterprise_crate.http_url) as conn:
c = conn.cursor()
c.execute('''create table test(id integer) clustered into 1 shards with (number_of_replicas=0)''')
stdout, stderr = jmx_client.query_jmx(
'io.crate.monitoring:type=NodeInfo',
'ShardStats'
)
result = [line.strip() for line in stdout.split('\n') if line.strip()]
result.sort()
self.assertEqual(result[0], 'primaries: 1')
self.assertEqual(result[1], 'replicas: 0')
self.assertEqual(result[2], 'total: 1')
self.assertEqual(result[3], 'unassigned: 0')
self.assertEqual(stderr, '')
stdout, stderr = jmx_client.query_jmx(
'io.crate.monitoring:type=NodeInfo',
'ShardInfo'
)
self.assertNotEqual(stdout.rstrip(), '', 'ShardInfo must not be empty')
self.assertEqual(stderr, '')
c.execute('''drop table test''')
def test_mbean_cluster_state_version(self):
jmx_client = JmxClient(JMX_PORT)
stdout, stderr = jmx_client.query_jmx(
'io.crate.monitoring:type=NodeInfo', 'ClusterStateVersion')
self.assertGreater(int(stdout), 0)
self.assertEqual(stderr, '')
def test_number_of_open_connections(self):
jmx_client = JmxClient(JMX_PORT)
with connect(enterprise_crate.http_url) as _:
stdout, stderr = jmx_client.query_jmx(
'io.crate.monitoring:type=Connections', 'HttpOpen')
self.assertGreater(int(stdout), 0)
self.assertEqual(stderr, '')
def test_search_pool(self):
jmx_client = JmxClient(JMX_PORT)
stdout, stderr = jmx_client.query_jmx(
'io.crate.monitoring:type=ThreadPools', 'Search')
self.assertEqual(
'\n'.join((line.strip() for line in stdout.split('\n'))),
'''\
active: 0
completed: 1
largestPoolSize: 1
name: search
poolSize: 1
queueSize: 0
rejected: 0
''')
self.assertEqual(stderr, '')
def test_parent_breaker(self):
jmx_client = JmxClient(JMX_PORT)
stdout, stderr = jmx_client.query_jmx(
'io.crate.monitoring:type=CircuitBreakers', 'Parent')
self.assert_valid_circuit_breaker_jmx_output('parent', stdout)
self.assertEqual(stderr, '')
stdout, stderr = jmx_client.query_jmx(
'io.crate.monitoring:type=CircuitBreakers', 'Query')
self.assert_valid_circuit_breaker_jmx_output('query', stdout)
self.assertEqual(stderr, '')
def assert_valid_circuit_breaker_jmx_output(self, cb_name, output):
limit = re.search(r'limit:\s+([0-9]+)', output)
self.assertGreater(int(limit.group(1)), 0)
self.assertRegex(output, rf'name:\s+{cb_name}')
self.assertRegex(output, r'overhead:\s+(\d+\.?\d+)')
self.assertRegex(output, r'trippedCount:\s+(\d+)')
self.assertRegex(output, r'used:\s+(\d+)')
|
theguardian/KodiDB
|
refs/heads/master
|
cherrypy/_cpnative_server.py
|
41
|
"""Native adapter for serving CherryPy via its builtin server."""
import logging
import sys
import cherrypy
from cherrypy._cpcompat import BytesIO
from cherrypy._cperror import format_exc, bare_error
from cherrypy.lib import httputil
from cherrypy import wsgiserver
class NativeGateway(wsgiserver.Gateway):
recursive = False
def respond(self):
req = self.req
try:
# Obtain a Request object from CherryPy
local = req.server.bind_addr
local = httputil.Host(local[0], local[1], "")
remote = req.conn.remote_addr, req.conn.remote_port
remote = httputil.Host(remote[0], remote[1], "")
scheme = req.scheme
sn = cherrypy.tree.script_name(req.uri or "/")
if sn is None:
self.send_response('404 Not Found', [], [''])
else:
app = cherrypy.tree.apps[sn]
method = req.method
path = req.path
qs = req.qs or ""
headers = req.inheaders.items()
rfile = req.rfile
prev = None
try:
redirections = []
while True:
request, response = app.get_serving(
local, remote, scheme, "HTTP/1.1")
request.multithread = True
request.multiprocess = False
request.app = app
request.prev = prev
# Run the CherryPy Request object and obtain the response
try:
request.run(method, path, qs, req.request_protocol, headers, rfile)
break
except cherrypy.InternalRedirect:
ir = sys.exc_info()[1]
app.release_serving()
prev = request
if not self.recursive:
if ir.path in redirections:
raise RuntimeError("InternalRedirector visited the "
"same URL twice: %r" % ir.path)
else:
# Add the *previous* path_info + qs to redirections.
if qs:
qs = "?" + qs
redirections.append(sn + path + qs)
# Munge environment and try again.
method = "GET"
path = ir.path
qs = ir.query_string
rfile = BytesIO()
self.send_response(
response.output_status, response.header_list,
response.body)
finally:
app.release_serving()
except:
tb = format_exc()
#print tb
cherrypy.log(tb, 'NATIVE_ADAPTER', severity=logging.ERROR)
s, h, b = bare_error()
self.send_response(s, h, b)
def send_response(self, status, headers, body):
req = self.req
# Set response status
req.status = str(status or "500 Server Error")
# Set response headers
for header, value in headers:
req.outheaders.append((header, value))
if (req.ready and not req.sent_headers):
req.sent_headers = True
req.send_headers()
# Set response body
for seg in body:
req.write(seg)
class CPHTTPServer(wsgiserver.HTTPServer):
"""Wrapper for wsgiserver.HTTPServer.
wsgiserver has been designed to not reference CherryPy in any way,
so that it can be used in other frameworks and applications.
Therefore, we wrap it here, so we can apply some attributes
from config -> cherrypy.server -> HTTPServer.
"""
def __init__(self, server_adapter=cherrypy.server):
self.server_adapter = server_adapter
server_name = (self.server_adapter.socket_host or
self.server_adapter.socket_file or
None)
wsgiserver.HTTPServer.__init__(
self, server_adapter.bind_addr, NativeGateway,
minthreads=server_adapter.thread_pool,
maxthreads=server_adapter.thread_pool_max,
server_name=server_name)
self.max_request_header_size = self.server_adapter.max_request_header_size or 0
self.max_request_body_size = self.server_adapter.max_request_body_size or 0
self.request_queue_size = self.server_adapter.socket_queue_size
self.timeout = self.server_adapter.socket_timeout
self.shutdown_timeout = self.server_adapter.shutdown_timeout
self.protocol = self.server_adapter.protocol_version
self.nodelay = self.server_adapter.nodelay
ssl_module = self.server_adapter.ssl_module or 'pyopenssl'
if self.server_adapter.ssl_context:
adapter_class = wsgiserver.get_ssl_adapter_class(ssl_module)
self.ssl_adapter = adapter_class(
self.server_adapter.ssl_certificate,
self.server_adapter.ssl_private_key,
self.server_adapter.ssl_certificate_chain)
self.ssl_adapter.context = self.server_adapter.ssl_context
elif self.server_adapter.ssl_certificate:
adapter_class = wsgiserver.get_ssl_adapter_class(ssl_module)
self.ssl_adapter = adapter_class(
self.server_adapter.ssl_certificate,
self.server_adapter.ssl_private_key,
self.server_adapter.ssl_certificate_chain)
|
laufercenter/meld
|
refs/heads/master
|
meld/test/functional/comm/broadcast_states.py
|
1
|
#
# Copyright 2015 by Justin MacCallum, Alberto Perez, Ken Dill
# All rights reserved
#
import numpy as np
from meld import comm
from meld.system.state import SystemState
N_ATOMS = 500
N_REPLICAS = 4
def generate_state(index):
coords = index * np.ones((N_ATOMS, 3))
vels = index * np.ones((N_ATOMS, 3))
alpha = float(index) / 10.
energy = float(index)
return SystemState(coords, vels, alpha, energy)
def check_state(state, index):
assert state.positions[0, 0] == index
assert state.velocities[0, 0] == index
assert state.alpha == index / 10.
assert state.energy == index
def main():
c = comm.MPICommunicator(N_ATOMS, N_REPLICAS)
c.initialize()
if c.is_master():
states = [generate_state(index) for index in range(4)]
my_state = c.broadcast_states_to_slaves(states)
check_state(my_state, 0)
else:
my_state = c.receive_state_from_master()
check_state(my_state, c.rank)
if __name__ == '__main__':
main()
|
bosstb/YGY60W
|
refs/heads/master
|
Test.py
|
2
|
#coding=utf-8
from datetime import datetime
import random
#生成100个随机0,1之间的浮点数序列l
l=0.1
l = random.randint(1, 100)
l=float(l)/100
print datetime.today()
|
Vegasvikk/django-cms
|
refs/heads/develop
|
cms/tests/plugins.py
|
7
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
import base64
import datetime
import json
import os
from django import http
from django.conf import settings
from django.conf.urls import url
from django.contrib import admin
from django.core import urlresolvers
from django.core.cache import cache
from django.core.exceptions import ValidationError, ImproperlyConfigured
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.management import call_command
from django.forms.widgets import Media
from django.test.testcases import TestCase
from django.utils import timezone
from cms import api
from cms.constants import PLUGIN_MOVE_ACTION, PLUGIN_COPY_ACTION
from cms.exceptions import PluginAlreadyRegistered, PluginNotRegistered, DontUsePageAttributeWarning
from cms.models import Page, Placeholder
from cms.models.pluginmodel import CMSPlugin
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cms.sitemaps.cms_sitemap import CMSSitemap
from cms.test_utils.project.pluginapp.plugins.manytomany_rel.models import (
Article, Section, ArticlePluginModel)
from cms.test_utils.project.pluginapp.plugins.meta.cms_plugins import (
TestPlugin, TestPlugin2, TestPlugin3, TestPlugin4, TestPlugin5)
from cms.test_utils.project.pluginapp.plugins.validation.cms_plugins import (
NonExisitngRenderTemplate, NoRender, NoRenderButChildren, DynTemplate)
from cms.test_utils.testcases import (
CMSTestCase, URL_CMS_PAGE, URL_CMS_PLUGIN_MOVE, URL_CMS_PAGE_ADD,
URL_CMS_PLUGIN_ADD, URL_CMS_PLUGIN_EDIT, URL_CMS_PAGE_CHANGE,
URL_CMS_PLUGIN_REMOVE, URL_CMS_PAGE_PUBLISH)
from cms.test_utils.util.fuzzy_int import FuzzyInt
from cms.toolbar.toolbar import CMSToolbar
from cms.utils.conf import get_cms_setting
from cms.utils.copy_plugins import copy_plugins_to
from cms.utils.plugins import get_plugins_for_page, get_plugins
from djangocms_googlemap.models import GoogleMap
from djangocms_inherit.cms_plugins import InheritPagePlaceholderPlugin
from djangocms_file.models import File
from djangocms_inherit.models import InheritPagePlaceholder
from djangocms_link.forms import LinkForm
from djangocms_link.models import Link
from djangocms_picture.models import Picture
from djangocms_text_ckeditor.models import Text
from djangocms_text_ckeditor.utils import plugin_tags_to_id_list, plugin_to_tag
class DumbFixturePlugin(CMSPluginBase):
model = CMSPlugin
name = "Dumb Test Plugin. It does nothing."
render_template = ""
admin_preview = False
render_plugin = False
def render(self, context, instance, placeholder):
return context
class DumbFixturePluginWithUrls(DumbFixturePlugin):
name = DumbFixturePlugin.name + " With custom URLs."
render_plugin = False
def _test_view(self, request):
return http.HttpResponse("It works")
def get_plugin_urls(self):
return [
url(r'^testview/$', admin.site.admin_view(self._test_view), name='dumbfixtureplugin'),
]
plugin_pool.register_plugin(DumbFixturePluginWithUrls)
class PluginsTestBaseCase(CMSTestCase):
def setUp(self):
self.super_user = self._create_user("test", True, True)
self.slave = self._create_user("slave", True)
self.FIRST_LANG = settings.LANGUAGES[0][0]
self.SECOND_LANG = settings.LANGUAGES[1][0]
self._login_context = self.login_user_context(self.super_user)
self._login_context.__enter__()
def tearDown(self):
self._login_context.__exit__(None, None, None)
def approve_page(self, page):
response = self.client.get(URL_CMS_PAGE + "%d/approve/" % page.pk)
self.assertRedirects(response, URL_CMS_PAGE)
# reload page
return self.reload_page(page)
def get_request(self, *args, **kwargs):
request = super(PluginsTestBaseCase, self).get_request(*args, **kwargs)
request.placeholder_media = Media()
request.toolbar = CMSToolbar(request)
return request
def get_response_pk(self, response):
return int(response.content.decode('utf8').split("/edit-plugin/")[1].split("/")[0])
class PluginsTestCase(PluginsTestBaseCase):
def _create_text_plugin_on_page(self, page):
plugin_data = {
'plugin_type': "TextPlugin",
'plugin_language': settings.LANGUAGES[0][0],
'placeholder_id': page.placeholders.get(slot="body").pk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEqual(CMSPlugin.objects.count(), 1)
self.assertEqual(response.status_code, 200)
created_plugin_id = self.get_response_pk(response)
self.assertEqual(created_plugin_id, CMSPlugin.objects.all()[0].pk)
return created_plugin_id
def _edit_text_plugin(self, plugin_id, text):
edit_url = "%s%s/" % (URL_CMS_PLUGIN_EDIT, plugin_id)
response = self.client.get(edit_url)
self.assertEqual(response.status_code, 200)
data = {
"body": text
}
response = self.client.post(edit_url, data)
self.assertEqual(response.status_code, 200)
txt = Text.objects.get(pk=plugin_id)
return txt
def test_add_edit_plugin(self):
"""
Test that you can add a text plugin
"""
# add a new text plugin
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
created_plugin_id = self._create_text_plugin_on_page(page)
# now edit the plugin
txt = self._edit_text_plugin(created_plugin_id, "Hello World")
self.assertEqual("Hello World", txt.body)
# edit body, but click cancel button
data = {
"body": "Hello World!!",
"_cancel": True,
}
edit_url = '%s%d/' % (URL_CMS_PLUGIN_EDIT, created_plugin_id)
response = self.client.post(edit_url, data)
self.assertEqual(response.status_code, 200)
txt = Text.objects.all()[0]
self.assertEqual("Hello World", txt.body)
def test_plugin_edit_marks_page_dirty(self):
page_data = self.get_new_page_data()
response = self.client.post(URL_CMS_PAGE_ADD, page_data)
self.assertEqual(response.status_code, 302)
page = Page.objects.all()[0]
response = self.client.post(URL_CMS_PAGE_PUBLISH % (page.pk, 'en'))
self.assertEqual(response.status_code, 302)
created_plugin_id = self._create_text_plugin_on_page(page)
page = Page.objects.all()[0]
self.assertEqual(page.is_dirty('en'), True)
response = self.client.post(URL_CMS_PAGE_PUBLISH % (page.pk, 'en'))
self.assertEqual(response.status_code, 302)
page = Page.objects.all()[0]
self.assertEqual(page.is_dirty('en'), False)
self._edit_text_plugin(created_plugin_id, "Hello World")
page = Page.objects.all()[0]
self.assertEqual(page.is_dirty('en'), True)
def test_plugin_order(self):
"""
Test that plugin position is saved after creation
"""
page_en = api.create_page("PluginOrderPage", "col_two.html", "en",
slug="page1", published=True, in_navigation=True)
ph_en = page_en.placeholders.get(slot="col_left")
# We check created objects and objects from the DB to be sure the position value
# has been saved correctly
text_plugin_1 = api.add_plugin(ph_en, "TextPlugin", "en", body="I'm the first")
text_plugin_2 = api.add_plugin(ph_en, "TextPlugin", "en", body="I'm the second")
db_plugin_1 = CMSPlugin.objects.get(pk=text_plugin_1.pk)
db_plugin_2 = CMSPlugin.objects.get(pk=text_plugin_2.pk)
with self.settings(CMS_PERMISSION=False):
self.assertEqual(text_plugin_1.position, 0)
self.assertEqual(db_plugin_1.position, 0)
self.assertEqual(text_plugin_2.position, 1)
self.assertEqual(db_plugin_2.position, 1)
## Finally we render the placeholder to test the actual content
rendered_placeholder = ph_en.render(self.get_context(page_en.get_absolute_url(), page=page_en), None)
self.assertEqual(rendered_placeholder, "I'm the firstI'm the second")
def test_plugin_order_alt(self):
"""
Test that plugin position is saved after creation
"""
draft_page = api.create_page("PluginOrderPage", "col_two.html", "en",
slug="page1", published=False, in_navigation=True)
placeholder = draft_page.placeholders.get(slot="col_left")
# We check created objects and objects from the DB to be sure the position value
# has been saved correctly
text_plugin_2 = api.add_plugin(placeholder, "TextPlugin", "en", body="I'm the second")
text_plugin_3 = api.add_plugin(placeholder, "TextPlugin", "en", body="I'm the third")
# Publish to create a 'live' version
draft_page.publish('en')
draft_page = draft_page.reload()
placeholder = draft_page.placeholders.get(slot="col_left")
# Add a plugin and move it to the first position
text_plugin_1 = api.add_plugin(placeholder, "TextPlugin", "en", body="I'm the first")
data = {
'placeholder_id': placeholder.id,
'plugin_id': text_plugin_1.id,
'plugin_parent': '',
'plugin_language': 'en',
'plugin_order[]': [text_plugin_1.id, text_plugin_2.id, text_plugin_3.id],
}
self.client.post(URL_CMS_PLUGIN_MOVE, data)
draft_page.publish('en')
draft_page = draft_page.reload()
live_page = draft_page.get_public_object()
placeholder = draft_page.placeholders.get(slot="col_left")
live_placeholder = live_page.placeholders.get(slot="col_left")
with self.settings(CMS_PERMISSION=False):
self.assertEqual(CMSPlugin.objects.get(pk=text_plugin_1.pk).position, 0)
self.assertEqual(CMSPlugin.objects.get(pk=text_plugin_2.pk).position, 1)
self.assertEqual(CMSPlugin.objects.get(pk=text_plugin_3.pk).position, 2)
## Finally we render the placeholder to test the actual content
rendered_placeholder = placeholder.render(self.get_context(draft_page.get_absolute_url(), page=draft_page), None)
self.assertEqual(rendered_placeholder, "I'm the firstI'm the secondI'm the third")
rendered_live_placeholder = live_placeholder.render(self.get_context(live_page.get_absolute_url(), page=live_page), None)
self.assertEqual(rendered_live_placeholder, "I'm the firstI'm the secondI'm the third")
def test_plugin_breadcrumbs(self):
"""
Test the plugin breadcrumbs order
"""
draft_page = api.create_page("home", "col_two.html", "en",
slug="page1", published=False, in_navigation=True)
placeholder = draft_page.placeholders.get(slot="col_left")
columns = api.add_plugin(placeholder, "MultiColumnPlugin", "en")
column = api.add_plugin(placeholder, "ColumnPlugin", "en", target=columns, width='10%')
text_plugin = api.add_plugin(placeholder, "TextPlugin", "en", target=column, body="I'm the second")
text_breadcrumbs = text_plugin.get_breadcrumb()
self.assertEqual(len(columns.get_breadcrumb()), 1)
self.assertEqual(len(column.get_breadcrumb()), 2)
self.assertEqual(len(text_breadcrumbs), 3)
self.assertTrue(text_breadcrumbs[0]['title'], columns.get_plugin_class().name)
self.assertTrue(text_breadcrumbs[1]['title'], column.get_plugin_class().name)
self.assertTrue(text_breadcrumbs[2]['title'], text_plugin.get_plugin_class().name)
self.assertTrue('/edit-plugin/%s/'% columns.pk in text_breadcrumbs[0]['url'])
self.assertTrue('/edit-plugin/%s/'% column.pk, text_breadcrumbs[1]['url'])
self.assertTrue('/edit-plugin/%s/'% text_plugin.pk, text_breadcrumbs[2]['url'])
def test_add_cancel_plugin(self):
"""
Test that you can cancel a new plugin before editing and
that the plugin is removed.
"""
# add a new text plugin
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
plugin_data = {
'plugin_type': "TextPlugin",
'plugin_language': settings.LANGUAGES[0][0],
'placeholder_id': page.placeholders.get(slot="body").pk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEqual(response.status_code, 200)
pk = CMSPlugin.objects.all()[0].pk
expected = {
"url": "/en/admin/cms/page/edit-plugin/%s/" % pk,
"breadcrumb": [
{
"url": "/en/admin/cms/page/edit-plugin/%s/" % pk,
"title": "Text"
}
],
'delete': '/en/admin/cms/page/delete-plugin/%s/' % pk
}
output = json.loads(response.content.decode('utf8'))
self.assertEqual(output, expected)
# now click cancel instead of editing
response = self.client.get(output['url'])
self.assertEqual(response.status_code, 200)
data = {
"body": "Hello World",
"_cancel": True,
}
response = self.client.post(output['url'], data)
self.assertEqual(response.status_code, 200)
self.assertEqual(0, Text.objects.count())
def test_extract_images_from_text(self):
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
plugin_data = {
'plugin_type': "TextPlugin",
'plugin_language': settings.LANGUAGES[0][0],
'placeholder_id': page.placeholders.get(slot="body").pk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEqual(response.status_code, 200)
# now edit the plugin
edit_url = URL_CMS_PLUGIN_EDIT + "%s/" % CMSPlugin.objects.all()[0].pk
response = self.client.get(edit_url)
self.assertEqual(response.status_code, 200)
img_path = os.path.join(os.path.dirname(__file__), 'data', 'image.jpg')
with open(img_path, 'rb') as fobj:
img_data = base64.b64encode(fobj.read()).decode('utf-8')
body = """<p>
<img alt='' src='data:image/jpeg;base64,{data}' />
</p>""".format(data=img_data)
data = {
"body": body
}
response = self.client.post(edit_url, data)
self.assertEqual(response.status_code, 200)
txt = Text.objects.all()[0]
self.assertTrue('id="plugin_obj_%s"' % (txt.pk + 1) in txt.body)
def test_add_text_plugin_empty_tag(self):
"""
Test that you can add a text plugin
"""
# add a new text plugin
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
plugin_data = {
'plugin_type': "TextPlugin",
'plugin_language': settings.LANGUAGES[0][0],
'placeholder_id': page.placeholders.get(slot="body").pk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEqual(response.status_code, 200)
# now edit the plugin
edit_url = URL_CMS_PLUGIN_EDIT + "%s/" % CMSPlugin.objects.all()[0].pk
response = self.client.get(edit_url)
self.assertEqual(response.status_code, 200)
data = {
"body": '<div class="someclass"></div><p>foo</p>'
}
response = self.client.post(edit_url, data)
self.assertEqual(response.status_code, 200)
txt = Text.objects.all()[0]
self.assertEqual('<div class="someclass"></div><p>foo</p>', txt.body)
def test_add_text_plugin_html_sanitizer(self):
"""
Test that you can add a text plugin
"""
# add a new text plugin
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
plugin_data = {
'plugin_type': "TextPlugin",
'plugin_language': settings.LANGUAGES[0][0],
'placeholder_id': page.placeholders.get(slot="body").pk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEqual(response.status_code, 200)
# now edit the plugin
edit_url = URL_CMS_PLUGIN_EDIT + "%s/" % CMSPlugin.objects.all()[0].pk
response = self.client.get(edit_url)
self.assertEqual(response.status_code, 200)
data = {
"body": '<script>var bar="hacked"</script>'
}
response = self.client.post(edit_url, data)
self.assertEqual(response.status_code, 200)
txt = Text.objects.all()[0]
self.assertEqual('<script>var bar="hacked"</script>', txt.body)
def test_copy_plugins_method(self):
"""
Test that CMSPlugin copy does not have side effects
"""
# create some objects
page_en = api.create_page("CopyPluginTestPage (EN)", "nav_playground.html", "en")
page_de = api.create_page("CopyPluginTestPage (DE)", "nav_playground.html", "de")
ph_en = page_en.placeholders.get(slot="body")
ph_de = page_de.placeholders.get(slot="body")
# add the text plugin
text_plugin_en = api.add_plugin(ph_en, "TextPlugin", "en", body="Hello World")
self.assertEqual(text_plugin_en.pk, CMSPlugin.objects.all()[0].pk)
# add a *nested* link plugin
link_plugin_en = api.add_plugin(ph_en, "LinkPlugin", "en", target=text_plugin_en,
name="A Link", url="https://www.django-cms.org")
#
text_plugin_en.body += plugin_to_tag(link_plugin_en)
text_plugin_en.save()
# the call above to add a child makes a plugin reload required here.
text_plugin_en = self.reload(text_plugin_en)
# setup the plugins to copy
plugins = [text_plugin_en, link_plugin_en]
# save the old ids for check
old_ids = [plugin.pk for plugin in plugins]
new_plugins = []
plugins_ziplist = []
old_parent_cache = {}
# This is a stripped down version of cms.copy_plugins.copy_plugins_to
# to low-level testing the copy process
for plugin in plugins:
new_plugins.append(plugin.copy_plugin(ph_de, 'de', old_parent_cache))
plugins_ziplist.append((new_plugins[-1], plugin))
for idx, plugin in enumerate(plugins):
inst, _ = new_plugins[idx].get_plugin_instance()
new_plugins[idx] = inst
new_plugins[idx].post_copy(plugin, plugins_ziplist)
for idx, plugin in enumerate(plugins):
# original plugin instance reference should stay unmodified
self.assertEqual(old_ids[idx], plugin.pk)
# new plugin instance should be different from the original
self.assertNotEqual(new_plugins[idx], plugin.pk)
# text plugins (both old and new) should contain a reference
# to the link plugins
if plugin.plugin_type == 'TextPlugin':
self.assertTrue('link.png' in plugin.body)
self.assertTrue('plugin_obj_%s' % plugin.get_children()[0].pk in plugin.body)
self.assertTrue('link.png' in new_plugins[idx].body)
self.assertTrue('plugin_obj_%s' % new_plugins[idx].get_children()[0].pk in new_plugins[idx].body)
def test_plugin_position(self):
page_en = api.create_page("CopyPluginTestPage (EN)", "nav_playground.html", "en")
placeholder = page_en.placeholders.get(slot="body")
placeholder_right = page_en.placeholders.get(slot="right-column")
columns = api.add_plugin(placeholder, "MultiColumnPlugin", "en")
column_1 = api.add_plugin(placeholder, "ColumnPlugin", "en", target=columns, width='10%')
column_2 = api.add_plugin(placeholder, "ColumnPlugin", "en", target=columns, width='30%')
api.add_plugin(placeholder, "TextPlugin", "en", target=column_1, body="I'm the first")
text_plugin = api.add_plugin(placeholder, "TextPlugin", "en", target=column_1, body="I'm the second")
returned_1 = copy_plugins_to([text_plugin], placeholder, 'en', column_1.pk)
returned_2 = copy_plugins_to([text_plugin], placeholder_right, 'en')
returned_3 = copy_plugins_to([text_plugin], placeholder, 'en', column_2.pk)
# Second plugin in the plugin branch
self.assertEqual(text_plugin.position, 1)
# Added as third plugin in the same branch as the above
self.assertEqual(returned_1[0][0].position, 2)
# First plugin in a placeholder
self.assertEqual(returned_2[0][0].position, 0)
# First plugin nested in a plugin
self.assertEqual(returned_3[0][0].position, 0)
def test_copy_plugins(self):
"""
Test that copying plugins works as expected.
"""
# create some objects
page_en = api.create_page("CopyPluginTestPage (EN)", "nav_playground.html", "en")
page_de = api.create_page("CopyPluginTestPage (DE)", "nav_playground.html", "de")
ph_en = page_en.placeholders.get(slot="body")
ph_de = page_de.placeholders.get(slot="body")
# add the text plugin
text_plugin_en = api.add_plugin(ph_en, "TextPlugin", "en", body="Hello World")
self.assertEqual(text_plugin_en.pk, CMSPlugin.objects.all()[0].pk)
# add a *nested* link plugin
link_plugin_en = api.add_plugin(ph_en, "LinkPlugin", "en", target=text_plugin_en,
name="A Link", url="https://www.django-cms.org")
# the call above to add a child makes a plugin reload required here.
text_plugin_en = self.reload(text_plugin_en)
# check the relations
self.assertEqual(text_plugin_en.get_children().count(), 1)
self.assertEqual(link_plugin_en.parent.pk, text_plugin_en.pk)
# just sanity check that so far everything went well
self.assertEqual(CMSPlugin.objects.count(), 2)
# copy the plugins to the german placeholder
copy_plugins_to(ph_en.get_plugins(), ph_de, 'de')
self.assertEqual(ph_de.cmsplugin_set.filter(parent=None).count(), 1)
text_plugin_de = ph_de.cmsplugin_set.get(parent=None).get_plugin_instance()[0]
self.assertEqual(text_plugin_de.get_children().count(), 1)
link_plugin_de = text_plugin_de.get_children().get().get_plugin_instance()[0]
# check we have twice as many plugins as before
self.assertEqual(CMSPlugin.objects.count(), 4)
# check language plugins
self.assertEqual(CMSPlugin.objects.filter(language='de').count(), 2)
self.assertEqual(CMSPlugin.objects.filter(language='en').count(), 2)
text_plugin_en = self.reload(text_plugin_en)
link_plugin_en = self.reload(link_plugin_en)
# check the relations in english didn't change
self.assertEqual(text_plugin_en.get_children().count(), 1)
self.assertEqual(link_plugin_en.parent.pk, text_plugin_en.pk)
self.assertEqual(link_plugin_de.name, link_plugin_en.name)
self.assertEqual(link_plugin_de.url, link_plugin_en.url)
self.assertEqual(text_plugin_de.body, text_plugin_en.body)
# test subplugin copy
copy_plugins_to([link_plugin_en], ph_de, 'de')
def test_deep_copy_plugins(self):
page_en = api.create_page("CopyPluginTestPage (EN)", "nav_playground.html", "en")
page_de = api.create_page("CopyPluginTestPage (DE)", "nav_playground.html", "de")
ph_en = page_en.placeholders.get(slot="body")
ph_de = page_de.placeholders.get(slot="body")
# add the text plugin
mcol1 = api.add_plugin(ph_en, "MultiColumnPlugin", "en", position="first-child")
mcol2 = api.add_plugin(ph_en, "MultiColumnPlugin", "en", position="first-child")
mcol1 = self.reload(mcol1)
col1 = api.add_plugin(ph_en, "ColumnPlugin", "en", position="first-child", target=mcol1)
mcol1 = self.reload(mcol1)
col2 = api.add_plugin(ph_en, "ColumnPlugin", "en", position="first-child", target=mcol1)
mcol2 = self.reload(mcol2)
col3 = api.add_plugin(ph_en, "ColumnPlugin", "en", position="first-child", target=mcol2)
mcol2 = self.reload(mcol2)
api.add_plugin(ph_en, "ColumnPlugin", "en", position="first-child", target=mcol2)
mcol1 = api.add_plugin(ph_de, "MultiColumnPlugin", "de", position="first-child")
# add a *nested* link plugin
mcol1 = self.reload(mcol1)
mcol2 = self.reload(mcol2)
col3 = self.reload(col3)
col2 = self.reload(col2)
col1 = self.reload(col1)
link_plugin_en = api.add_plugin(ph_en, "LinkPlugin", "en", target=col2,
name="A Link", url="https://www.django-cms.org")
mcol1 = self.reload(mcol1)
mcol2 = self.reload(mcol2)
col3 = self.reload(col3)
col2 = self.reload(col2)
col1 = self.reload(col1)
copy_plugins_to([col2, link_plugin_en], ph_de, 'de', mcol1.pk)
mcol1 = self.reload(mcol1)
mcol2 = self.reload(mcol2)
self.reload(col3)
self.reload(col2)
self.reload(col1)
self.reload(link_plugin_en)
mcol1 = self.reload(mcol1)
self.assertEqual(mcol1.get_descendants().count(), 2)
with self.assertNumQueries(FuzzyInt(0, 207)):
page_en.publish('en')
def test_plugin_validation(self):
self.assertRaises(ImproperlyConfigured, plugin_pool.register_plugin, NonExisitngRenderTemplate)
self.assertRaises(ImproperlyConfigured, plugin_pool.register_plugin, NoRender)
self.assertRaises(ImproperlyConfigured, plugin_pool.register_plugin, NoRenderButChildren)
plugin_pool.register_plugin(DynTemplate)
def test_remove_plugin_before_published(self):
"""
When removing a draft plugin we would expect the public copy of the plugin to also be removed
"""
# add a page
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
# add a plugin
plugin_data = {
'plugin_type': "TextPlugin",
'plugin_language': settings.LANGUAGES[0][0],
'placeholder_id': page.placeholders.get(slot="body").pk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.get_response_pk(response), CMSPlugin.objects.all()[0].pk)
# there should be only 1 plugin
self.assertEqual(CMSPlugin.objects.all().count(), 1)
# delete the plugin
plugin_data = {
'plugin_id': self.get_response_pk(response)
}
remove_url = URL_CMS_PLUGIN_REMOVE + "%s/" % self.get_response_pk(response)
response = self.client.post(remove_url, plugin_data)
self.assertEqual(response.status_code, 302)
# there should be no plugins
self.assertEqual(0, CMSPlugin.objects.all().count())
def test_remove_plugin_after_published(self):
# add a page
api.create_page("home", "nav_playground.html", "en")
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
# add a plugin
plugin_data = {
'plugin_type': "TextPlugin",
'plugin_language': settings.LANGUAGES[0][0],
'placeholder_id': page.placeholders.get(slot="body").pk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
plugin_id = self.get_response_pk(response)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.get_response_pk(response), CMSPlugin.objects.all()[0].pk)
# there should be only 1 plugin
self.assertEqual(CMSPlugin.objects.all().count(), 1)
self.assertEqual(CMSPlugin.objects.filter(placeholder__page__publisher_is_draft=True).count(), 1)
# publish page
response = self.client.post(URL_CMS_PAGE + "%d/en/publish/" % page.pk, {1: 1})
self.assertEqual(response.status_code, 302)
self.assertEqual(Page.objects.count(), 3)
# there should now be two plugins - 1 draft, 1 public
self.assertEqual(CMSPlugin.objects.all().count(), 2)
# delete the plugin
plugin_data = {
'plugin_id': plugin_id
}
remove_url = URL_CMS_PLUGIN_REMOVE + "%s/" % plugin_id
response = self.client.post(remove_url, plugin_data)
self.assertEqual(response.status_code, 302)
# there should be no plugins
self.assertEqual(CMSPlugin.objects.all().count(), 1)
self.assertEqual(CMSPlugin.objects.filter(placeholder__page__publisher_is_draft=False).count(), 1)
def test_remove_plugin_not_associated_to_page(self):
"""
Test case for PlaceholderField
"""
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
# add a plugin
plugin_data = {
'plugin_type': "TextPlugin",
'plugin_language': settings.LANGUAGES[0][0],
'placeholder_id': page.placeholders.get(slot="body").pk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.get_response_pk(response), CMSPlugin.objects.all()[0].pk)
# there should be only 1 plugin
self.assertEqual(CMSPlugin.objects.all().count(), 1)
ph = Placeholder(slot="subplugin")
ph.save()
plugin_data = {
'plugin_type': "TextPlugin",
'language': settings.LANGUAGES[0][0],
'placeholder': ph.pk,
'parent': self.get_response_pk(response)
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
# no longer allowed for security reasons
self.assertEqual(response.status_code, 404)
def test_register_plugin_twice_should_raise(self):
number_of_plugins_before = len(plugin_pool.get_all_plugins())
# The first time we register the plugin is should work
plugin_pool.register_plugin(DumbFixturePlugin)
# Let's add it a second time. We should catch and exception
raised = False
try:
plugin_pool.register_plugin(DumbFixturePlugin)
except PluginAlreadyRegistered:
raised = True
self.assertTrue(raised)
# Let's also unregister the plugin now, and assert it's not in the
# pool anymore
plugin_pool.unregister_plugin(DumbFixturePlugin)
# Let's make sure we have the same number of plugins as before:
number_of_plugins_after = len(plugin_pool.get_all_plugins())
self.assertEqual(number_of_plugins_before, number_of_plugins_after)
def test_unregister_non_existing_plugin_should_raise(self):
number_of_plugins_before = len(plugin_pool.get_all_plugins())
raised = False
try:
# There should not be such a plugin registered if the others tests
# don't leak plugins
plugin_pool.unregister_plugin(DumbFixturePlugin)
except PluginNotRegistered:
raised = True
self.assertTrue(raised)
# Let's count, to make sure we didn't remove a plugin accidentally.
number_of_plugins_after = len(plugin_pool.get_all_plugins())
self.assertEqual(number_of_plugins_before, number_of_plugins_after)
def test_inheritplugin_media(self):
"""
Test case for InheritPagePlaceholder
"""
inheritfrompage = api.create_page('page to inherit from',
'nav_playground.html',
'en')
body = inheritfrompage.placeholders.get(slot="body")
plugin = GoogleMap(
plugin_type='GoogleMapPlugin',
placeholder=body,
position=1,
language=settings.LANGUAGE_CODE,
address="Riedtlistrasse 16",
zipcode="8006",
city="Zurich",
)
plugin.add_root(instance=plugin)
inheritfrompage.publish('en')
page = api.create_page('inherit from page',
'nav_playground.html',
'en',
published=True)
inherited_body = page.placeholders.get(slot="body")
inherit_plugin = InheritPagePlaceholder(
plugin_type='InheritPagePlaceholderPlugin',
placeholder=inherited_body,
position=1,
language=settings.LANGUAGE_CODE,
from_page=inheritfrompage,
from_language=settings.LANGUAGE_CODE)
inherit_plugin.add_root(instance=inherit_plugin)
page.publish('en')
self.client.logout()
cache.clear()
response = self.client.get(page.get_absolute_url())
self.assertTrue(
'https://maps-api-ssl.google.com/maps/api/js?v=3&sensor=true' in response.content.decode('utf8').replace("&", "&"))
def test_inherit_plugin_with_empty_plugin(self):
inheritfrompage = api.create_page('page to inherit from',
'nav_playground.html',
'en', published=True)
body = inheritfrompage.placeholders.get(slot="body")
empty_plugin = CMSPlugin(
plugin_type='TextPlugin', # create an empty plugin
placeholder=body,
position=1,
language='en',
)
empty_plugin.add_root(instance=empty_plugin)
other_page = api.create_page('other page', 'nav_playground.html', 'en', published=True)
inherited_body = other_page.placeholders.get(slot="body")
api.add_plugin(inherited_body, InheritPagePlaceholderPlugin, 'en', position='last-child',
from_page=inheritfrompage, from_language='en')
api.add_plugin(inherited_body, "TextPlugin", "en", body="foobar")
# this should not fail, even if there in an empty plugin
rendered = inherited_body.render(context=self.get_context(other_page.get_absolute_url(), page=other_page), width=200)
self.assertIn("foobar", rendered)
def test_render_textplugin(self):
# Setup
page = api.create_page("render test", "nav_playground.html", "en")
ph = page.placeholders.get(slot="body")
text_plugin = api.add_plugin(ph, "TextPlugin", "en", body="Hello World")
link_plugins = []
for i in range(0, 10):
text_plugin = Text.objects.get(pk=text_plugin.pk)
link_plugins.append(api.add_plugin(ph, "LinkPlugin", "en",
target=text_plugin,
name="A Link %d" % i,
url="http://django-cms.org"))
text_plugin.text.body += '<img src="/static/cms/img/icons/plugins/link.png" alt="Link - %s" id="plugin_obj_%d" title="Link - %s" />' % (
link_plugins[-1].name,
link_plugins[-1].pk,
link_plugins[-1].name,
)
text_plugin.save()
txt = text_plugin.text
ph = Placeholder.objects.get(pk=ph.pk)
txt.body = '\n'.join(['<img id="plugin_obj_%d" src=""/>' % l.cmsplugin_ptr_id for l in link_plugins])
txt.save()
text_plugin = self.reload(text_plugin)
with self.assertNumQueries(2):
rendered = text_plugin.render_plugin(placeholder=ph)
for i in range(0, 10):
self.assertTrue('A Link %d' % i in rendered)
def test_copy_textplugin(self):
"""
Test that copying of textplugins replaces references to copied plugins
"""
page = api.create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
plugin_base = CMSPlugin(
plugin_type='TextPlugin',
placeholder=placeholder,
position=0,
language=self.FIRST_LANG)
plugin_base = plugin_base.add_root(instance=plugin_base)
plugin = Text(body='')
plugin_base.set_base_attr(plugin)
plugin.save()
plugin_ref_1_base = CMSPlugin(
plugin_type='EmptyPlugin',
placeholder=placeholder,
position=0,
language=self.FIRST_LANG)
plugin_ref_1_base = plugin_base.add_child(instance=plugin_ref_1_base)
plugin_ref_2_base = CMSPlugin(
plugin_type='TextPlugin',
placeholder=placeholder,
position=1,
language=self.FIRST_LANG)
plugin_ref_2_base = plugin_base.add_child(instance=plugin_ref_2_base)
plugin_ref_2 = Text(body='')
plugin_ref_2_base.set_base_attr(plugin_ref_2)
plugin_ref_2.save()
plugin.body = ' <img id="plugin_obj_%s" src=""/><img id="plugin_obj_%s" src=""/>' % (
str(plugin_ref_1_base.pk), str(plugin_ref_2.pk))
plugin.save()
page_data = self.get_new_page_data()
#create 2nd language page
page_data.update({
'language': self.SECOND_LANG,
'title': "%s %s" % (page.get_title(), self.SECOND_LANG),
})
response = self.client.post(URL_CMS_PAGE_CHANGE % page.pk + "?language=%s" % self.SECOND_LANG, page_data)
self.assertRedirects(response, URL_CMS_PAGE)
self.assertEqual(CMSPlugin.objects.filter(language=self.FIRST_LANG).count(), 3)
self.assertEqual(CMSPlugin.objects.filter(language=self.SECOND_LANG).count(), 0)
self.assertEqual(CMSPlugin.objects.count(), 3)
self.assertEqual(Page.objects.all().count(), 1)
copy_data = {
'source_placeholder_id': placeholder.pk,
'target_placeholder_id': placeholder.pk,
'target_language': self.SECOND_LANG,
'source_language': self.FIRST_LANG,
}
response = self.client.post(URL_CMS_PAGE + "copy-plugins/", copy_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content.decode('utf8').count('"position":'), 3)
# assert copy success
self.assertEqual(CMSPlugin.objects.filter(language=self.FIRST_LANG).count(), 3)
self.assertEqual(CMSPlugin.objects.filter(language=self.SECOND_LANG).count(), 3)
self.assertEqual(CMSPlugin.objects.count(), 6)
plugins = list(CMSPlugin.objects.all())
new_plugin = plugins[3].get_plugin_instance()[0]
idlist = sorted(plugin_tags_to_id_list(new_plugin.body))
expected = sorted([plugins[4].pk, plugins[5].pk])
self.assertEqual(idlist, expected)
def test_search_pages(self):
"""
Test search for pages
"""
page = api.create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
text = Text(body="hello", language="en", placeholder=placeholder, plugin_type="TextPlugin", position=1)
text.save()
page.publish('en')
pages = Page.objects.search("hi")
self.assertEqual(pages.count(), 0)
self.assertEqual(Page.objects.search("hello").count(),1)
def test_empty_plugin_is_not_ignored(self):
page = api.create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
plugin = CMSPlugin(
plugin_type='TextPlugin',
placeholder=placeholder,
position=1,
language=self.FIRST_LANG)
plugin.add_root(instance=plugin)
# this should not raise any errors, but just ignore the empty plugin
out = placeholder.render(self.get_context(), width=300)
self.assertFalse(len(out))
self.assertTrue(len(placeholder._plugins_cache))
def test_defer_pickel(self):
page = api.create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
api.add_plugin(placeholder, "TextPlugin", 'en', body="Hello World")
plugins = Text.objects.all().defer('path')
import pickle
import io
a = io.BytesIO()
pickle.dump(plugins[0], a)
def test_empty_plugin_description(self):
page = api.create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
a = CMSPlugin(
plugin_type='TextPlugin',
placeholder=placeholder,
position=1,
language=self.FIRST_LANG
)
self.assertEqual(a.get_short_description(), "<Empty>")
def test_page_attribute_warns(self):
page = api.create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
a = CMSPlugin(
plugin_type='TextPlugin',
placeholder=placeholder,
position=1,
language=self.FIRST_LANG
)
a.save()
def get_page(plugin):
return plugin.page
self.assertWarns(
DontUsePageAttributeWarning,
"Don't use the page attribute on CMSPlugins! CMSPlugins are not guaranteed to have a page associated with them!",
get_page, a
)
def test_set_translatable_content(self):
a = Text(body="hello")
self.assertTrue(a.set_translatable_content({'body': 'world'}))
b = Link(name="hello")
self.assertTrue(b.set_translatable_content({'name': 'world'}))
def test_editing_plugin_changes_page_modification_time_in_sitemap(self):
now = timezone.now()
one_day_ago = now - datetime.timedelta(days=1)
page = api.create_page("page", "nav_playground.html", "en", published=True)
title = page.get_title_obj('en')
page.creation_date = one_day_ago
page.changed_date = one_day_ago
plugin_id = self._create_text_plugin_on_page(page)
plugin = self._edit_text_plugin(plugin_id, "fnord")
actual_last_modification_time = CMSSitemap().lastmod(title)
actual_last_modification_time -= datetime.timedelta(microseconds=actual_last_modification_time.microsecond)
self.assertEqual(plugin.changed_date.date(), actual_last_modification_time.date())
def test_moving_plugin_to_different_placeholder(self):
plugin_pool.register_plugin(DumbFixturePlugin)
page = api.create_page("page", "nav_playground.html", "en", published=True)
plugin_data = {
'plugin_type': 'DumbFixturePlugin',
'plugin_language': settings.LANGUAGES[0][0],
'placeholder_id': page.placeholders.get(slot='body').pk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEqual(response.status_code, 200)
plugin_data['plugin_parent'] = self.get_response_pk(response)
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEqual(response.status_code, 200)
post = {
'plugin_id': self.get_response_pk(response),
'placeholder_id': page.placeholders.get(slot='right-column').pk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_MOVE, post)
self.assertEqual(response.status_code, 200)
from cms.utils.plugins import build_plugin_tree
build_plugin_tree(page.placeholders.get(slot='right-column').get_plugins_list())
plugin_pool.unregister_plugin(DumbFixturePlugin)
def test_get_plugins_for_page(self):
page_en = api.create_page("PluginOrderPage", "col_two.html", "en",
slug="page1", published=True, in_navigation=True)
ph_en = page_en.placeholders.get(slot="col_left")
text_plugin_1 = api.add_plugin(ph_en, "TextPlugin", "en", body="I'm inside an existing placeholder.")
# This placeholder is not in the template.
ph_en_not_used = page_en.placeholders.create(slot="not_used")
text_plugin_2 = api.add_plugin(ph_en_not_used, "TextPlugin", "en", body="I'm inside a non-existent placeholder.")
page_plugins = get_plugins_for_page(None, page_en, page_en.get_title_obj_attribute('language'))
db_text_plugin_1 = page_plugins.get(pk=text_plugin_1.pk)
self.assertRaises(CMSPlugin.DoesNotExist, page_plugins.get, pk=text_plugin_2.pk)
self.assertEqual(db_text_plugin_1.pk, text_plugin_1.pk)
def test_plugin_move_with_reload(self):
action_options = {
PLUGIN_MOVE_ACTION: {
'requires_reload': True
},
PLUGIN_COPY_ACTION: {
'requires_reload': True
},
}
non_reload_action_options = {
PLUGIN_MOVE_ACTION: {
'requires_reload': False
},
PLUGIN_COPY_ACTION: {
'requires_reload': False
},
}
ReloadDrivenPlugin = type('ReloadDrivenPlugin', (CMSPluginBase,), dict(action_options=action_options, render_plugin=False))
NonReloadDrivenPlugin = type('NonReloadDrivenPlugin', (CMSPluginBase,), dict(action_options=non_reload_action_options, render_plugin=False))
plugin_pool.register_plugin(ReloadDrivenPlugin)
plugin_pool.register_plugin(NonReloadDrivenPlugin)
page = api.create_page("page", "nav_playground.html", "en", published=True)
source_placeholder = page.placeholders.get(slot='body')
target_placeholder = page.placeholders.get(slot='right-column')
reload_expected = {'reload': True}
no_reload_expected = {'reload': False}
plugin_1 = api.add_plugin(source_placeholder, ReloadDrivenPlugin, settings.LANGUAGES[0][0])
plugin_2 = api.add_plugin(source_placeholder, NonReloadDrivenPlugin, settings.LANGUAGES[0][0])
# Test Plugin reload == True on Move
post = {
'plugin_id': plugin_1.pk,
'placeholder_id': target_placeholder.pk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_MOVE, post)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content.decode('utf8')), reload_expected)
# Test Plugin reload == False on Move
post = {
'plugin_id': plugin_2.pk,
'placeholder_id': target_placeholder.pk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_MOVE, post)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content.decode('utf8')), no_reload_expected)
plugin_pool.unregister_plugin(ReloadDrivenPlugin)
plugin_pool.unregister_plugin(NonReloadDrivenPlugin)
def test_plugin_copy_with_reload(self):
action_options = {
PLUGIN_MOVE_ACTION: {
'requires_reload': True
},
PLUGIN_COPY_ACTION: {
'requires_reload': True
},
}
non_reload_action_options = {
PLUGIN_MOVE_ACTION: {
'requires_reload': False
},
PLUGIN_COPY_ACTION: {
'requires_reload': False
},
}
ReloadDrivenPlugin = type('ReloadDrivenPlugin', (CMSPluginBase,), dict(action_options=action_options, render_plugin=False))
NonReloadDrivenPlugin = type('NonReloadDrivenPlugin', (CMSPluginBase,), dict(action_options=non_reload_action_options, render_plugin=False))
plugin_pool.register_plugin(ReloadDrivenPlugin)
plugin_pool.register_plugin(NonReloadDrivenPlugin)
page = api.create_page("page", "nav_playground.html", "en", published=True)
source_placeholder = page.placeholders.get(slot='body')
target_placeholder = page.placeholders.get(slot='right-column')
api.add_plugin(source_placeholder, ReloadDrivenPlugin, settings.LANGUAGES[0][0])
plugin_2 = api.add_plugin(source_placeholder, NonReloadDrivenPlugin, settings.LANGUAGES[0][0])
# Test Plugin reload == True on Copy
copy_data = {
'source_placeholder_id': source_placeholder.pk,
'target_placeholder_id': target_placeholder.pk,
'target_language': settings.LANGUAGES[0][0],
'source_language': settings.LANGUAGES[0][0],
}
response = self.client.post(URL_CMS_PAGE + "copy-plugins/", copy_data)
self.assertEqual(response.status_code, 200)
json_response = json.loads(response.content.decode('utf8'))
self.assertEqual(json_response['reload'], True)
# Test Plugin reload == False on Copy
copy_data = {
'source_placeholder_id': source_placeholder.pk,
'source_plugin_id': plugin_2.pk,
'target_placeholder_id': target_placeholder.pk,
'target_language': settings.LANGUAGES[0][0],
'source_language': settings.LANGUAGES[0][0],
}
response = self.client.post(URL_CMS_PAGE + "copy-plugins/", copy_data)
self.assertEqual(response.status_code, 200)
json_response = json.loads(response.content.decode('utf8'))
self.assertEqual(json_response['reload'], False)
plugin_pool.unregister_plugin(ReloadDrivenPlugin)
plugin_pool.unregister_plugin(NonReloadDrivenPlugin)
def test_custom_plugin_urls(self):
plugin_url = urlresolvers.reverse('admin:dumbfixtureplugin')
response = self.client.get(plugin_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"It works")
def test_plugin_require_parent(self):
"""
Assert that a plugin marked as 'require_parent' is not listed
in the plugin pool when a placeholder is specified
"""
ParentRequiredPlugin = type('ParentRequiredPlugin', (CMSPluginBase,),
dict(require_parent=True, render_plugin=False))
plugin_pool.register_plugin(ParentRequiredPlugin)
page = api.create_page("page", "nav_playground.html", "en", published=True)
placeholder = page.placeholders.get(slot='body')
plugin_list = plugin_pool.get_all_plugins(placeholder=placeholder, page=page)
self.assertFalse(ParentRequiredPlugin in plugin_list)
plugin_pool.unregister_plugin(ParentRequiredPlugin)
def test_plugin_parent_classes(self):
"""
Assert that a plugin with a list of parent classes only appears in the
toolbar plugin struct for those given parent Plugins
"""
ParentClassesPlugin = type('ParentClassesPlugin', (CMSPluginBase,),
dict(parent_classes=['GenericParentPlugin'], render_plugin=False))
GenericParentPlugin = type('GenericParentPlugin', (CMSPluginBase,), {'render_plugin':False})
KidnapperPlugin = type('KidnapperPlugin', (CMSPluginBase,), {'render_plugin':False})
expected_struct = {'module': u'Generic',
'name': u'Parent Classes Plugin',
'value': 'ParentClassesPlugin'}
for plugin in [ParentClassesPlugin, GenericParentPlugin, KidnapperPlugin]:
plugin_pool.register_plugin(plugin)
page = api.create_page("page", "nav_playground.html", "en", published=True)
placeholder = page.placeholders.get(slot='body')
from cms.utils.placeholder import get_toolbar_plugin_struct
toolbar_struct = get_toolbar_plugin_struct([ParentClassesPlugin],
placeholder.slot,
page,
parent=GenericParentPlugin)
self.assertTrue(expected_struct in toolbar_struct)
toolbar_struct = get_toolbar_plugin_struct([ParentClassesPlugin],
placeholder.slot,
page,
parent=KidnapperPlugin)
self.assertFalse(expected_struct in toolbar_struct)
toolbar_struct = get_toolbar_plugin_struct([ParentClassesPlugin, GenericParentPlugin],
placeholder.slot,
page)
expected_struct = {'module': u'Generic',
'name': u'Generic Parent Plugin',
'value': 'GenericParentPlugin'}
self.assertTrue(expected_struct in toolbar_struct)
for plugin in [ParentClassesPlugin, GenericParentPlugin, KidnapperPlugin]:
plugin_pool.unregister_plugin(plugin)
def test_plugin_child_classes_from_settings(self):
page = api.create_page("page", "nav_playground.html", "en", published=True)
placeholder = page.placeholders.get(slot='body')
ChildClassesPlugin = type('ChildClassesPlugin', (CMSPluginBase,),
dict(child_classes=['TextPlugin'], render_template='allow_children_plugin.html'))
plugin_pool.register_plugin(ChildClassesPlugin)
plugin = api.add_plugin(placeholder, ChildClassesPlugin, settings.LANGUAGES[0][0])
plugin = plugin.get_plugin_class_instance()
## assert baseline
self.assertEqual(['TextPlugin'], plugin.get_child_classes(placeholder.slot, page))
CMS_PLACEHOLDER_CONF = {
'body': {
'child_classes': {
'ChildClassesPlugin': ['LinkPlugin', 'PicturePlugin'],
}
}
}
with self.settings(CMS_PLACEHOLDER_CONF=CMS_PLACEHOLDER_CONF):
self.assertEqual(['LinkPlugin', 'PicturePlugin'],
plugin.get_child_classes(placeholder.slot, page))
plugin_pool.unregister_plugin(ChildClassesPlugin)
def test_plugin_parent_classes_from_settings(self):
page = api.create_page("page", "nav_playground.html", "en", published=True)
placeholder = page.placeholders.get(slot='body')
ParentClassesPlugin = type('ParentClassesPlugin', (CMSPluginBase,),
dict(parent_classes=['TextPlugin'], render_plugin=False))
plugin_pool.register_plugin(ParentClassesPlugin)
plugin = api.add_plugin(placeholder, ParentClassesPlugin, settings.LANGUAGES[0][0])
plugin = plugin.get_plugin_class_instance()
## assert baseline
self.assertEqual(['TextPlugin'], plugin.get_parent_classes(placeholder.slot, page))
CMS_PLACEHOLDER_CONF = {
'body': {
'parent_classes': {
'ParentClassesPlugin': ['TestPlugin'],
}
}
}
with self.settings(CMS_PLACEHOLDER_CONF=CMS_PLACEHOLDER_CONF):
self.assertEqual(['TestPlugin'],
plugin.get_parent_classes(placeholder.slot, page))
plugin_pool.unregister_plugin(ParentClassesPlugin)
def test_plugin_translatable_content_getter_setter(self):
"""
Test that you can add a text plugin
"""
# add a new text plugin
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
created_plugin_id = self._create_text_plugin_on_page(page)
# now edit the plugin
plugin = self._edit_text_plugin(created_plugin_id, "Hello World")
self.assertEqual("Hello World", plugin.body)
# see if the getter works
self.assertEqual({'body': "Hello World"}, plugin.get_translatable_content())
# change the content
self.assertEqual(True, plugin.set_translatable_content({'body': "It works!"}))
# check if it changed
self.assertEqual("It works!", plugin.body)
# double check through the getter
self.assertEqual({'body': "It works!"}, plugin.get_translatable_content())
def test_plugin_pool_register_returns_plugin_class(self):
@plugin_pool.register_plugin
class DecoratorTestPlugin(CMSPluginBase):
render_plugin = False
name = "Test Plugin"
self.assertIsNotNone(DecoratorTestPlugin)
class FileSystemPluginTests(PluginsTestBaseCase):
def setUp(self):
super(FileSystemPluginTests, self).setUp()
call_command('collectstatic', interactive=False, verbosity=0, link=True)
def tearDown(self):
for directory in [settings.STATIC_ROOT, settings.MEDIA_ROOT]:
for root, dirs, files in os.walk(directory, topdown=False):
# We need to walk() the directory tree since rmdir() does not allow
# to remove non-empty directories...
for name in files:
# Start by killing all files we walked
os.remove(os.path.join(root, name))
for name in dirs:
# Now all directories we walked...
os.rmdir(os.path.join(root, name))
super(FileSystemPluginTests, self).tearDown()
def test_fileplugin_icon_uppercase(self):
page = api.create_page('testpage', 'nav_playground.html', 'en')
body = page.placeholders.get(slot="body")
plugin = File(
plugin_type='FilePlugin',
placeholder=body,
position=1,
language=settings.LANGUAGE_CODE,
)
plugin.file.save("UPPERCASE.JPG", SimpleUploadedFile("UPPERCASE.jpg", b"content"), False)
plugin.add_root(instance=plugin)
self.assertNotEquals(plugin.get_icon_url().find('jpg'), -1)
class PluginManyToManyTestCase(PluginsTestBaseCase):
def setUp(self):
self.super_user = self._create_user("test", True, True)
self.slave = self._create_user("slave", True)
self._login_context = self.login_user_context(self.super_user)
self._login_context.__enter__()
# create 3 sections
self.sections = []
self.section_pks = []
for i in range(3):
section = Section.objects.create(name="section %s" % i)
self.sections.append(section)
self.section_pks.append(section.pk)
self.section_count = len(self.sections)
# create 10 articles by section
for section in self.sections:
for j in range(10):
Article.objects.create(
title="article %s" % j,
section=section
)
self.FIRST_LANG = settings.LANGUAGES[0][0]
self.SECOND_LANG = settings.LANGUAGES[1][0]
def test_dynamic_plugin_template(self):
page_en = api.create_page("CopyPluginTestPage (EN)", "nav_playground.html", "en")
ph_en = page_en.placeholders.get(slot="body")
api.add_plugin(ph_en, "ArticleDynamicTemplatePlugin", "en", title="a title")
api.add_plugin(ph_en, "ArticleDynamicTemplatePlugin", "en", title="custom template")
request = self.get_request(path=page_en.get_absolute_url())
plugins = get_plugins(request, ph_en, page_en.template)
for plugin in plugins:
if plugin.title == 'custom template':
self.assertEqual(plugin.get_plugin_class_instance().get_render_template({}, plugin, ph_en), 'articles_custom.html')
self.assertTrue('Articles Custom template' in plugin.render_plugin({}, ph_en))
else:
self.assertEqual(plugin.get_plugin_class_instance().get_render_template({}, plugin, ph_en), 'articles.html')
self.assertFalse('Articles Custom template' in plugin.render_plugin({}, ph_en))
def test_add_plugin_with_m2m(self):
# add a new text plugin
self.assertEqual(ArticlePluginModel.objects.count(), 0)
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
page.publish('en')
placeholder = page.placeholders.get(slot="body")
plugin_data = {
'plugin_type': "ArticlePlugin",
'plugin_language': self.FIRST_LANG,
'plugin_parent': '',
'placeholder_id': placeholder.pk,
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEqual(response.status_code, 200)
pk = CMSPlugin.objects.all()[0].pk
expected = {
"url": "/en/admin/cms/page/edit-plugin/%s/" % pk,
"breadcrumb": [
{
"url": "/en/admin/cms/page/edit-plugin/%s/" % pk,
"title": "Articles"
}
],
'delete': '/en/admin/cms/page/delete-plugin/%s/' % pk
}
self.assertEqual(json.loads(response.content.decode('utf8')), expected)
# now edit the plugin
edit_url = URL_CMS_PLUGIN_EDIT + str(CMSPlugin.objects.all()[0].pk) + "/"
response = self.client.get(edit_url)
self.assertEqual(response.status_code, 200)
data = {
'title': "Articles Plugin 1",
"sections": self.section_pks
}
response = self.client.post(edit_url, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(ArticlePluginModel.objects.count(), 1)
plugin = ArticlePluginModel.objects.all()[0]
self.assertEqual(self.section_count, plugin.sections.count())
response = self.client.get('/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
self.assertEqual(response.status_code, 200)
self.assertEqual(plugin.sections.through._meta.db_table, 'manytomany_rel_articlepluginmodel_sections')
def test_add_plugin_with_m2m_and_publisher(self):
self.assertEqual(ArticlePluginModel.objects.count(), 0)
page_data = self.get_new_page_data()
response = self.client.post(URL_CMS_PAGE_ADD, page_data)
self.assertEqual(response.status_code, 302)
page = Page.objects.all()[0]
placeholder = page.placeholders.get(slot="body")
# add a plugin
plugin_data = {
'plugin_type': "ArticlePlugin",
'plugin_language': self.FIRST_LANG,
'plugin_parent': '',
'placeholder_id': placeholder.pk,
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEqual(response.status_code, 200)
pk = CMSPlugin.objects.all()[0].pk
expected = {
"url": "/en/admin/cms/page/edit-plugin/%s/" % pk,
"breadcrumb": [
{
"url": "/en/admin/cms/page/edit-plugin/%s/" % pk,
"title": "Articles"
}
],
'delete': '/en/admin/cms/page/delete-plugin/%s/' % pk
}
self.assertEqual(json.loads(response.content.decode('utf8')), expected)
# there should be only 1 plugin
self.assertEqual(1, CMSPlugin.objects.all().count())
articles_plugin_pk = CMSPlugin.objects.all()[0].pk
self.assertEqual(articles_plugin_pk, CMSPlugin.objects.all()[0].pk)
# now edit the plugin
edit_url = URL_CMS_PLUGIN_EDIT + str(CMSPlugin.objects.all()[0].pk) + "/"
data = {
'title': "Articles Plugin 1",
'sections': self.section_pks
}
response = self.client.post(edit_url, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(1, ArticlePluginModel.objects.count())
articles_plugin = ArticlePluginModel.objects.all()[0]
self.assertEqual(u'Articles Plugin 1', articles_plugin.title)
self.assertEqual(self.section_count, articles_plugin.sections.count())
# check publish box
page = api.publish_page(page, self.super_user, 'en')
# there should now be two plugins - 1 draft, 1 public
self.assertEqual(2, CMSPlugin.objects.all().count())
self.assertEqual(2, ArticlePluginModel.objects.all().count())
db_counts = [plugin.sections.count() for plugin in ArticlePluginModel.objects.all()]
expected = [self.section_count for i in range(len(db_counts))]
self.assertEqual(expected, db_counts)
def test_copy_plugin_with_m2m(self):
page = api.create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
plugin = ArticlePluginModel(
plugin_type='ArticlePlugin',
placeholder=placeholder,
position=1,
language=self.FIRST_LANG)
plugin.add_root(instance=plugin)
edit_url = URL_CMS_PLUGIN_EDIT + str(plugin.pk) + "/"
data = {
'title': "Articles Plugin 1",
"sections": self.section_pks
}
response = self.client.post(edit_url, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(ArticlePluginModel.objects.count(), 1)
self.assertEqual(ArticlePluginModel.objects.all()[0].sections.count(), self.section_count)
page_data = self.get_new_page_data()
#create 2nd language page
page_data.update({
'language': self.SECOND_LANG,
'title': "%s %s" % (page.get_title(), self.SECOND_LANG),
})
response = self.client.post(URL_CMS_PAGE_CHANGE % page.pk + "?language=%s" % self.SECOND_LANG, page_data)
self.assertRedirects(response, URL_CMS_PAGE)
self.assertEqual(CMSPlugin.objects.filter(language=self.FIRST_LANG).count(), 1)
self.assertEqual(CMSPlugin.objects.filter(language=self.SECOND_LANG).count(), 0)
self.assertEqual(CMSPlugin.objects.count(), 1)
self.assertEqual(Page.objects.all().count(), 1)
copy_data = {
'source_placeholder_id': placeholder.pk,
'target_placeholder_id': placeholder.pk,
'target_language': self.SECOND_LANG,
'source_language': self.FIRST_LANG,
}
response = self.client.post(URL_CMS_PAGE + "copy-plugins/", copy_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content.decode('utf8').count('"position":'), 1)
# assert copy success
self.assertEqual(CMSPlugin.objects.filter(language=self.FIRST_LANG).count(), 1)
self.assertEqual(CMSPlugin.objects.filter(language=self.SECOND_LANG).count(), 1)
self.assertEqual(CMSPlugin.objects.count(), 2)
db_counts = [plgn.sections.count() for plgn in ArticlePluginModel.objects.all()]
expected = [self.section_count for _ in range(len(db_counts))]
self.assertEqual(expected, db_counts)
class PluginsMetaOptionsTests(TestCase):
''' TestCase set for ensuring that bugs like #992 are caught '''
# these plugins are inlined because, due to the nature of the #992
# ticket, we cannot actually import a single file with all the
# plugin variants in, because that calls __new__, at which point the
# error with splitted occurs.
def test_meta_options_as_defaults(self):
''' handling when a CMSPlugin meta options are computed defaults '''
# this plugin relies on the base CMSPlugin and Model classes to
# decide what the app_label and db_table should be
plugin = TestPlugin.model
self.assertEqual(plugin._meta.db_table, 'meta_testpluginmodel')
self.assertEqual(plugin._meta.app_label, 'meta')
def test_meta_options_as_declared_defaults(self):
''' handling when a CMSPlugin meta options are declared as per defaults '''
# here, we declare the db_table and app_label explicitly, but to the same
# values as would be computed, thus making sure it's not a problem to
# supply options.
plugin = TestPlugin2.model
self.assertEqual(plugin._meta.db_table, 'meta_testpluginmodel2')
self.assertEqual(plugin._meta.app_label, 'meta')
def test_meta_options_custom_app_label(self):
''' make sure customised meta options on CMSPlugins don't break things '''
plugin = TestPlugin3.model
self.assertEqual(plugin._meta.db_table, 'one_thing_testpluginmodel3')
self.assertEqual(plugin._meta.app_label, 'one_thing')
def test_meta_options_custom_db_table(self):
''' make sure custom database table names are OK. '''
plugin = TestPlugin4.model
self.assertEqual(plugin._meta.db_table, 'or_another_4')
self.assertEqual(plugin._meta.app_label, 'meta')
def test_meta_options_custom_both(self):
''' We should be able to customise app_label and db_table together '''
plugin = TestPlugin5.model
self.assertEqual(plugin._meta.db_table, 'or_another_5')
self.assertEqual(plugin._meta.app_label, 'one_thing')
class LinkPluginTestCase(PluginsTestBaseCase):
def test_does_not_verify_existance_of_url(self):
form = LinkForm(
{'name': 'Linkname', 'url': 'http://www.nonexistant.test'})
self.assertTrue(form.is_valid())
def test_opens_in_same_window_by_default(self):
"""Could not figure out how to render this plugin
Checking only for the values in the model"""
form = LinkForm({'name': 'Linkname',
'url': 'http://www.nonexistant.test'})
link = form.save()
self.assertEqual(link.target, '')
def test_open_in_blank_window(self):
form = LinkForm({'name': 'Linkname',
'url': 'http://www.nonexistant.test', 'target': '_blank'})
link = form.save()
self.assertEqual(link.target, '_blank')
def test_open_in_parent_window(self):
form = LinkForm({'name': 'Linkname',
'url': 'http://www.nonexistant.test', 'target': '_parent'})
link = form.save()
self.assertEqual(link.target, '_parent')
def test_open_in_top_window(self):
form = LinkForm({'name': 'Linkname',
'url': 'http://www.nonexistant.test', 'target': '_top'})
link = form.save()
self.assertEqual(link.target, '_top')
def test_open_in_nothing_else(self):
form = LinkForm({'name': 'Linkname',
'url': 'http://www.nonexistant.test', 'target': 'artificial'})
self.assertFalse(form.is_valid())
class NoDatabasePluginTests(TestCase):
def test_render_meta_is_unique(self):
text = Text()
link = Link()
self.assertNotEqual(id(text._render_meta), id(link._render_meta))
def test_render_meta_does_not_leak(self):
text = Text()
link = Link()
text._render_meta.text_enabled = False
link._render_meta.text_enabled = False
self.assertFalse(text._render_meta.text_enabled)
self.assertFalse(link._render_meta.text_enabled)
link._render_meta.text_enabled = True
self.assertFalse(text._render_meta.text_enabled)
self.assertTrue(link._render_meta.text_enabled)
def test_db_table_hack(self):
# Plugin models has been moved away due to the Django 1.7 AppConfig
from cms.test_utils.project.bunch_of_plugins.models import TestPlugin1
self.assertEqual(TestPlugin1._meta.db_table, 'bunch_of_plugins_testplugin1')
def test_db_table_hack_with_mixin(self):
# Plugin models has been moved away due to the Django 1.7 AppConfig
from cms.test_utils.project.bunch_of_plugins.models import TestPlugin2
self.assertEqual(TestPlugin2._meta.db_table, 'bunch_of_plugins_testplugin2')
def test_pickle(self):
text = Text()
text.__reduce__()
class PicturePluginTests(PluginsTestBaseCase):
def test_link_or_page(self):
"""Test a validator: you can enter a url or a page_link, but not both."""
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
picture = Picture(url="test")
# Note: don't call full_clean as it will check ALL fields - including
# the image, which we haven't defined. Call clean() instead which
# just validates the url and page_link fields.
picture.clean()
picture.page_link = page
picture.url = None
picture.clean()
picture.url = "test"
self.assertRaises(ValidationError, picture.clean)
class SimplePluginTests(TestCase):
def test_simple_naming(self):
class MyPlugin(CMSPluginBase):
render_template = 'base.html'
self.assertEqual(MyPlugin.name, 'My Plugin')
def test_simple_context(self):
class MyPlugin(CMSPluginBase):
render_template = 'base.html'
plugin = MyPlugin(ArticlePluginModel, admin.site)
context = {}
out_context = plugin.render(context, 1, 2)
self.assertEqual(out_context['instance'], 1)
self.assertEqual(out_context['placeholder'], 2)
self.assertIs(out_context, context)
class BrokenPluginTests(TestCase):
def test_import_broken_plugin(self):
"""
If there is an import error in the actual cms_plugin file it should
raise the ImportError rather than silently swallowing it -
in opposition to the ImportError if the file 'cms_plugins.py' doesn't
exist.
"""
new_apps = ['cms.test_utils.project.brokenpluginapp']
with self.settings(INSTALLED_APPS=new_apps):
plugin_pool.discovered = False
self.assertRaises(ImportError, plugin_pool.discover_plugins)
class MTIPluginsTestCase(PluginsTestBaseCase):
def test_add_edit_plugin(self):
from cms.test_utils.project.mti_pluginapp.models import TestPluginBetaModel
"""
Test that we can instantiate and use a MTI plugin
"""
# Create a page
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
# Add the MTI plugin
plugin_data = {
'plugin_type': "TestPluginBeta",
'plugin_language': settings.LANGUAGES[0][0],
'placeholder_id': page.placeholders.get(slot="body").pk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEqual(response.status_code, 200)
plugin_id = self.get_response_pk(response)
self.assertEqual(plugin_id, CMSPlugin.objects.all()[0].pk)
# Test we can open the change form for the MTI plugin
edit_url = "%s%s/" % (URL_CMS_PLUGIN_EDIT, plugin_id)
response = self.client.get(edit_url)
self.assertEqual(response.status_code, 200)
# Edit the MTI plugin
data = {
"alpha": "ALPHA",
"beta": "BETA"
}
response = self.client.post(edit_url, data)
self.assertEqual(response.status_code, 200)
# Test that the change was properly stored in the DB
plugin_model = TestPluginBetaModel.objects.all()[0]
self.assertEqual("ALPHA", plugin_model.alpha)
self.assertEqual("BETA", plugin_model.beta)
|
rmcgibbo/msmbuilder3
|
refs/heads/master
|
tests/test_pca.py
|
1
|
import numpy as np
from msmbuilder3 import PCA
import sklearn.decomposition
def test_pca():
X = np.random.randn(10,10)
skpca = sklearn.decomposition.PCA(n_components=4)
skpca.fit(X)
pca = PCA(n_components=4)
pca.fit(X)
reference = skpca.components_
result = pca.components_
# make sure the components are normalized the same way
for i in range(4):
reference[i] = reference[i] / np.sum(reference[i])
result[i] = result[i] / np.sum(result[i])
np.testing.assert_array_almost_equal(result, reference)
def test_pca_fit_update():
X = np.random.randn(100, 10)
Y = np.random.randn(200, 10)
skpca = sklearn.decomposition.PCA(n_components=4)
skpca.fit(np.vstack((X, Y)))
pca = PCA(n_components=4)
pca.fit(X)
pca.fit_update(Y)
reference = skpca.components_
result = pca.components_
# make sure the components are normalized the same way
for i in range(4):
reference[i] = reference[i] / np.sum(reference[i])
result[i] = result[i] / np.sum(result[i])
np.testing.assert_array_almost_equal(result, reference)
|
lbogdan/czl-scrape
|
refs/heads/master
|
sanatate/scrapy_proj/pipelines/__init__.py
|
5
|
# -*- coding: utf-8 -*-
from scrapy_proj.pipelines.extrameta import *
from scrapy_proj.pipelines.post import *
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.