repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
40223211/cadpbtest-0420 | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/xml/dom/minicompat.py | 781 | """Python version compatibility support for minidom."""
# This module should only be imported using "import *".
#
# The following names are defined:
#
# NodeList -- lightest possible NodeList implementation
#
# EmptyNodeList -- lightest possible NodeList that is guaranteed to
# remain empty (immutable)
#
# StringTypes -- tuple of defined string types
#
# defproperty -- function used in conjunction with GetattrMagic;
# using these together is needed to make them work
# as efficiently as possible in both Python 2.2+
# and older versions. For example:
#
# class MyClass(GetattrMagic):
# def _get_myattr(self):
# return something
#
# defproperty(MyClass, "myattr",
# "return some value")
#
# For Python 2.2 and newer, this will construct a
# property object on the class, which avoids
# needing to override __getattr__(). It will only
# work for read-only attributes.
#
# For older versions of Python, inheriting from
# GetattrMagic will use the traditional
# __getattr__() hackery to achieve the same effect,
# but less efficiently.
#
# defproperty() should be used for each version of
# the relevant _get_<property>() function.
__all__ = ["NodeList", "EmptyNodeList", "StringTypes", "defproperty"]
import xml.dom
StringTypes = (str,)
class NodeList(list):
__slots__ = ()
def item(self, index):
if 0 <= index < len(self):
return self[index]
def _get_length(self):
return len(self)
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def __getstate__(self):
return list(self)
def __setstate__(self, state):
self[:] = state
class EmptyNodeList(tuple):
__slots__ = ()
def __add__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def __radd__(self, other):
NL = NodeList()
NL.extend(other)
return NL
def item(self, index):
return None
def _get_length(self):
return 0
def _set_length(self, value):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute 'length'")
length = property(_get_length, _set_length,
doc="The number of nodes in the NodeList.")
def defproperty(klass, name, doc):
get = getattr(klass, ("_get_" + name))
def set(self, value, name=name):
raise xml.dom.NoModificationAllowedErr(
"attempt to modify read-only attribute " + repr(name))
assert not hasattr(klass, "_set_" + name), \
"expected not to find _set_" + name
prop = property(get, set, doc=doc)
setattr(klass, name, prop)
|
w1ll1am23/home-assistant | refs/heads/dev | homeassistant/components/baidu/__init__.py | 36 | """Support for Baidu integration."""
|
moorescloud/holideck | refs/heads/master | iotas/devices/moorescloud/light/__init__.py | 3 | print "Hello light"
|
openstax/openstax-cms | refs/heads/master | snippets/migrations/0004_customizationformcontent.py | 2 | # Generated by Django 3.0.4 on 2020-09-30 16:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('snippets', '0003_delete_givetoday'),
]
operations = [
migrations.CreateModel(
name='CustomizationFormContent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('heading', models.CharField(max_length=255)),
('subheading', models.CharField(max_length=255)),
('disclaimer', models.TextField()),
('next_steps', models.TextField()),
],
),
]
|
ChrisBeaumont/luigi | refs/heads/master | examples/wordcount_hadoop.py | 66 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import luigi
import luigi.contrib.hadoop
import luigi.contrib.hdfs
# To make this run, you probably want to edit /etc/luigi/client.cfg and add something like:
#
# [hadoop]
# jar: /usr/lib/hadoop-xyz/hadoop-streaming-xyz-123.jar
class InputText(luigi.ExternalTask):
"""
This task is a :py:class:`luigi.task.ExternalTask` which means it doesn't generate the
:py:meth:`~.InputText.output` target on its own instead relying on the execution something outside of Luigi
to produce it.
"""
date = luigi.DateParameter()
def output(self):
"""
Returns the target output for this task.
In this case, it expects a file to be present in HDFS.
:return: the target output for this task.
:rtype: object (:py:class:`luigi.target.Target`)
"""
return luigi.contrib.hdfs.HdfsTarget(self.date.strftime('/tmp/text/%Y-%m-%d.txt'))
class WordCount(luigi.contrib.hadoop.JobTask):
"""
This task runs a :py:class:`luigi.contrib.hadoop.JobTask`
over the target data returned by :py:meth:`~/.InputText.output` and
writes the result into its :py:meth:`~.WordCount.output` target.
This class uses :py:meth:`luigi.contrib.hadoop.JobTask.run`.
"""
date_interval = luigi.DateIntervalParameter()
def requires(self):
"""
This task's dependencies:
* :py:class:`~.InputText`
:return: list of object (:py:class:`luigi.task.Task`)
"""
return [InputText(date) for date in self.date_interval.dates()]
def output(self):
"""
Returns the target output for this task.
In this case, a successful execution of this task will create a file in HDFS.
:return: the target output for this task.
:rtype: object (:py:class:`luigi.target.Target`)
"""
return luigi.contrib.hdfs.HdfsTarget('/tmp/text-count/%s' % self.date_interval)
def mapper(self, line):
for word in line.strip().split():
yield word, 1
def reducer(self, key, values):
yield key, sum(values)
if __name__ == '__main__':
luigi.run()
|
alshedivat/tensorflow | refs/heads/master | tensorflow/python/ops/distributions/uniform.py | 6 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Uniform distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@tf_export("distributions.Uniform")
class Uniform(distribution.Distribution):
"""Uniform distribution with `low` and `high` parameters.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; a, b) = I[a <= x < b] / Z
Z = b - a
```
where
- `low = a`,
- `high = b`,
- `Z` is the normalizing constant, and
- `I[predicate]` is the [indicator function](
https://en.wikipedia.org/wiki/Indicator_function) for `predicate`.
The parameters `low` and `high` must be shaped in a way that supports
broadcasting (e.g., `high - low` is a valid operation).
#### Examples
```python
# Without broadcasting:
u1 = Uniform(low=3.0, high=4.0) # a single uniform distribution [3, 4]
u2 = Uniform(low=[1.0, 2.0],
high=[3.0, 4.0]) # 2 distributions [1, 3], [2, 4]
u3 = Uniform(low=[[1.0, 2.0],
[3.0, 4.0]],
high=[[1.5, 2.5],
[3.5, 4.5]]) # 4 distributions
```
```python
# With broadcasting:
u1 = Uniform(low=3.0, high=[5.0, 6.0, 7.0]) # 3 distributions
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
low=0.,
high=1.,
validate_args=False,
allow_nan_stats=True,
name="Uniform"):
"""Initialize a batch of Uniform distributions.
Args:
low: Floating point tensor, lower boundary of the output interval. Must
have `low < high`.
high: Floating point tensor, upper boundary of the output interval. Must
have `low < high`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
InvalidArgumentError: if `low >= high` and `validate_args=False`.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[low, high]) as name:
with ops.control_dependencies([
check_ops.assert_less(
low, high, message="uniform not defined when low >= high.")
] if validate_args else []):
self._low = array_ops.identity(low, name="low")
self._high = array_ops.identity(high, name="high")
check_ops.assert_same_float_dtype([self._low, self._high])
super(Uniform, self).__init__(
dtype=self._low.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._low,
self._high],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("low", "high"),
([ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)] * 2)))
@property
def low(self):
"""Lower boundary of the output interval."""
return self._low
@property
def high(self):
"""Upper boundary of the output interval."""
return self._high
def range(self, name="range"):
"""`high - low`."""
with self._name_scope(name):
return self.high - self.low
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.low),
array_ops.shape(self.high))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.low.get_shape(),
self.high.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
samples = random_ops.random_uniform(shape=shape,
dtype=self.dtype,
seed=seed)
return self.low + self.range() * samples
def _prob(self, x):
broadcasted_x = x * array_ops.ones(
self.batch_shape_tensor(), dtype=x.dtype)
return array_ops.where(
math_ops.is_nan(broadcasted_x),
broadcasted_x,
array_ops.where(
math_ops.logical_or(broadcasted_x < self.low,
broadcasted_x >= self.high),
array_ops.zeros_like(broadcasted_x),
array_ops.ones_like(broadcasted_x) / self.range()))
def _cdf(self, x):
broadcast_shape = array_ops.broadcast_dynamic_shape(
array_ops.shape(x), self.batch_shape_tensor())
zeros = array_ops.zeros(broadcast_shape, dtype=self.dtype)
ones = array_ops.ones(broadcast_shape, dtype=self.dtype)
broadcasted_x = x * ones
result_if_not_big = array_ops.where(
x < self.low, zeros, (broadcasted_x - self.low) / self.range())
return array_ops.where(x >= self.high, ones, result_if_not_big)
def _entropy(self):
return math_ops.log(self.range())
def _mean(self):
return (self.low + self.high) / 2.
def _variance(self):
return math_ops.square(self.range()) / 12.
def _stddev(self):
return self.range() / math.sqrt(12.)
|
plumgrid/plumgrid-nova | refs/heads/master | nova/virt/libvirt/imagebackend.py | 1 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Grid Dynamics
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import contextlib
import os
from oslo.config import cfg
from nova import exception
from nova.openstack.common import excutils
from nova.openstack.common import fileutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.disk import api as disk
from nova.virt import images
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import utils as libvirt_utils
__imagebackend_opts = [
cfg.StrOpt('libvirt_images_type',
default='default',
help='VM Images format. Acceptable values are: raw, qcow2, lvm,'
' default. If default is specified,'
' then use_cow_images flag is used instead of this one.'),
cfg.StrOpt('libvirt_images_volume_group',
help='LVM Volume Group that is used for VM images, when you'
' specify libvirt_images_type=lvm.'),
cfg.BoolOpt('libvirt_sparse_logical_volumes',
default=False,
help='Create sparse logical volumes (with virtualsize)'
' if this flag is set to True.'),
cfg.IntOpt('libvirt_lvm_snapshot_size',
default=1000,
help='The amount of storage (in megabytes) to allocate for LVM'
' snapshot copy-on-write blocks.'),
]
CONF = cfg.CONF
CONF.register_opts(__imagebackend_opts)
CONF.import_opt('base_dir_name', 'nova.virt.libvirt.imagecache')
CONF.import_opt('preallocate_images', 'nova.virt.driver')
LOG = logging.getLogger(__name__)
class Image(object):
__metaclass__ = abc.ABCMeta
def __init__(self, source_type, driver_format, is_block_dev=False):
"""Image initialization.
:source_type: block or file
:driver_format: raw or qcow2
:is_block_dev:
"""
self.source_type = source_type
self.driver_format = driver_format
self.is_block_dev = is_block_dev
self.preallocate = False
# NOTE(mikal): We need a lock directory which is shared along with
# instance files, to cover the scenario where multiple compute nodes
# are trying to create a base file at the same time
self.lock_path = os.path.join(CONF.instances_path, 'locks')
@abc.abstractmethod
def create_image(self, prepare_template, base, size, *args, **kwargs):
"""Create image from template.
Contains specific behavior for each image type.
:prepare_template: function, that creates template.
Should accept `target` argument.
:base: Template name
:size: Size of created image in bytes
"""
pass
def libvirt_info(self, disk_bus, disk_dev, device_type, cache_mode,
extra_specs, hypervisor_version):
"""Get `LibvirtConfigGuestDisk` filled for this image.
:disk_dev: Disk bus device name
:disk_bus: Disk bus type
:device_type: Device type for this image.
:cache_mode: Caching mode for this image
:extra_specs: Instance type extra specs dict.
"""
info = vconfig.LibvirtConfigGuestDisk()
info.source_type = self.source_type
info.source_device = device_type
info.target_bus = disk_bus
info.target_dev = disk_dev
info.driver_cache = cache_mode
info.driver_format = self.driver_format
driver_name = libvirt_utils.pick_disk_driver_name(hypervisor_version,
self.is_block_dev)
info.driver_name = driver_name
info.source_path = self.path
tune_items = ['disk_read_bytes_sec', 'disk_read_iops_sec',
'disk_write_bytes_sec', 'disk_write_iops_sec',
'disk_total_bytes_sec', 'disk_total_iops_sec']
# Note(yaguang): Currently, the only tuning available is Block I/O
# throttling for qemu.
if self.source_type in ['file', 'block']:
for key, value in extra_specs.iteritems():
scope = key.split(':')
if len(scope) > 1 and scope[0] == 'quota':
if scope[1] in tune_items:
setattr(info, scope[1], value)
return info
def cache(self, fetch_func, filename, size=None, *args, **kwargs):
"""Creates image from template.
Ensures that template and image not already exists.
Ensures that base directory exists.
Synchronizes on template fetching.
:fetch_func: Function that creates the base image
Should accept `target` argument.
:filename: Name of the file in the image directory
:size: Size of created image in bytes (optional)
"""
@utils.synchronized(filename, external=True, lock_path=self.lock_path)
def call_if_not_exists(target, *args, **kwargs):
if not os.path.exists(target):
fetch_func(target=target, *args, **kwargs)
elif CONF.libvirt_images_type == "lvm" and \
'ephemeral_size' in kwargs:
fetch_func(target=target, *args, **kwargs)
base_dir = os.path.join(CONF.instances_path, CONF.base_dir_name)
if not os.path.exists(base_dir):
fileutils.ensure_tree(base_dir)
base = os.path.join(base_dir, filename)
if not os.path.exists(self.path) or not os.path.exists(base):
self.create_image(call_if_not_exists, base, size,
*args, **kwargs)
if size and self.preallocate and self._can_fallocate():
utils.execute('fallocate', '-n', '-l', size, self.path)
def _can_fallocate(self):
"""Check once per class, whether fallocate(1) is available,
and that the instances directory supports fallocate(2).
"""
can_fallocate = getattr(self.__class__, 'can_fallocate', None)
if can_fallocate is None:
_out, err = utils.trycmd('fallocate', '-n', '-l', '1',
self.path + '.fallocate_test')
fileutils.delete_if_exists(self.path + '.fallocate_test')
can_fallocate = not err
self.__class__.can_fallocate = can_fallocate
if not can_fallocate:
LOG.error('Unable to preallocate_images=%s at path: %s' %
(CONF.preallocate_images, self.path))
return can_fallocate
def snapshot_create(self):
raise NotImplementedError()
def snapshot_extract(self, target, out_format):
raise NotImplementedError()
def snapshot_delete(self):
raise NotImplementedError()
class Raw(Image):
def __init__(self, instance=None, disk_name=None, path=None,
snapshot_name=None):
super(Raw, self).__init__("file", "raw", is_block_dev=False)
self.path = (path or
os.path.join(libvirt_utils.get_instance_path(instance),
disk_name))
self.snapshot_name = snapshot_name
self.preallocate = CONF.preallocate_images != 'none'
self.correct_format()
def correct_format(self):
if os.path.exists(self.path):
data = images.qemu_img_info(self.path)
self.driver_format = data.file_format or 'raw'
def create_image(self, prepare_template, base, size, *args, **kwargs):
@utils.synchronized(base, external=True, lock_path=self.lock_path)
def copy_raw_image(base, target, size):
libvirt_utils.copy_image(base, target)
if size:
disk.extend(target, size)
generating = 'image_id' not in kwargs
if generating:
#Generating image in place
prepare_template(target=self.path, *args, **kwargs)
else:
prepare_template(target=base, *args, **kwargs)
if not os.path.exists(self.path):
with fileutils.remove_path_on_error(self.path):
copy_raw_image(base, self.path, size)
self.correct_format()
def snapshot_create(self):
pass
def snapshot_extract(self, target, out_format):
images.convert_image(self.path, target, out_format)
def snapshot_delete(self):
pass
class Qcow2(Image):
def __init__(self, instance=None, disk_name=None, path=None,
snapshot_name=None):
super(Qcow2, self).__init__("file", "qcow2", is_block_dev=False)
self.path = (path or
os.path.join(libvirt_utils.get_instance_path(instance),
disk_name))
self.snapshot_name = snapshot_name
self.preallocate = CONF.preallocate_images != 'none'
def create_image(self, prepare_template, base, size, *args, **kwargs):
@utils.synchronized(base, external=True, lock_path=self.lock_path)
def copy_qcow2_image(base, target, size):
# TODO(pbrady): Consider copying the cow image here
# with preallocation=metadata set for performance reasons.
# This would be keyed on a 'preallocate_images' setting.
libvirt_utils.create_cow_image(base, target)
if size:
disk.extend(target, size)
# Download the unmodified base image unless we already have a copy.
if not os.path.exists(base):
prepare_template(target=base, *args, **kwargs)
legacy_backing_size = None
legacy_base = base
# Determine whether an existing qcow2 disk uses a legacy backing by
# actually looking at the image itself and parsing the output of the
# backing file it expects to be using.
if os.path.exists(self.path):
backing_path = libvirt_utils.get_disk_backing_file(self.path)
if backing_path is not None:
backing_file = os.path.basename(backing_path)
backing_parts = backing_file.rpartition('_')
if backing_file != backing_parts[-1] and \
backing_parts[-1].isdigit():
legacy_backing_size = int(backing_parts[-1])
legacy_base += '_%d' % legacy_backing_size
legacy_backing_size *= 1024 * 1024 * 1024
# Create the legacy backing file if necessary.
if legacy_backing_size:
if not os.path.exists(legacy_base):
with fileutils.remove_path_on_error(legacy_base):
libvirt_utils.copy_image(base, legacy_base)
disk.extend(legacy_base, legacy_backing_size)
# NOTE(cfb): Having a flavor that sets the root size to 0 and having
# nova effectively ignore that size and use the size of the
# image is considered a feature at this time, not a bug.
disk_size = disk.get_disk_size(base)
if size and size < disk_size:
msg = _('%(base)s virtual size %(disk_size)s'
'larger than flavor root disk size %(size)s')
LOG.error(msg % {'base': base,
'disk_size': disk_size,
'size': size})
raise exception.InstanceTypeDiskTooSmall()
if not os.path.exists(self.path):
with fileutils.remove_path_on_error(self.path):
copy_qcow2_image(base, self.path, size)
def snapshot_create(self):
libvirt_utils.create_snapshot(self.path, self.snapshot_name)
def snapshot_extract(self, target, out_format):
libvirt_utils.extract_snapshot(self.path, 'qcow2',
self.snapshot_name, target,
out_format)
def snapshot_delete(self):
libvirt_utils.delete_snapshot(self.path, self.snapshot_name)
class Lvm(Image):
@staticmethod
def escape(filename):
return filename.replace('_', '__')
def __init__(self, instance=None, disk_name=None, path=None,
snapshot_name=None):
super(Lvm, self).__init__("block", "raw", is_block_dev=True)
if path:
info = libvirt_utils.logical_volume_info(path)
self.vg = info['VG']
self.lv = info['LV']
self.path = path
else:
if not CONF.libvirt_images_volume_group:
raise RuntimeError(_('You should specify'
' libvirt_images_volume_group'
' flag to use LVM images.'))
self.vg = CONF.libvirt_images_volume_group
self.lv = '%s_%s' % (self.escape(instance['name']),
self.escape(disk_name))
self.path = os.path.join('/dev', self.vg, self.lv)
# TODO(pbrady): possibly deprecate libvirt_sparse_logical_volumes
# for the more general preallocate_images
self.sparse = CONF.libvirt_sparse_logical_volumes
self.preallocate = not self.sparse
if snapshot_name:
self.snapshot_name = snapshot_name
self.snapshot_path = os.path.join('/dev', self.vg,
self.snapshot_name)
def _can_fallocate(self):
return False
def create_image(self, prepare_template, base, size, *args, **kwargs):
@utils.synchronized(base, external=True, lock_path=self.lock_path)
def create_lvm_image(base, size):
base_size = disk.get_disk_size(base)
resize = size > base_size
size = size if resize else base_size
libvirt_utils.create_lvm_image(self.vg, self.lv,
size, sparse=self.sparse)
images.convert_image(base, self.path, 'raw', run_as_root=True)
if resize:
disk.resize2fs(self.path, run_as_root=True)
generated = 'ephemeral_size' in kwargs
#Generate images with specified size right on volume
if generated and size:
libvirt_utils.create_lvm_image(self.vg, self.lv,
size, sparse=self.sparse)
with self.remove_volume_on_error(self.path):
prepare_template(target=self.path, *args, **kwargs)
else:
prepare_template(target=base, *args, **kwargs)
with self.remove_volume_on_error(self.path):
create_lvm_image(base, size)
@contextlib.contextmanager
def remove_volume_on_error(self, path):
try:
yield
except Exception:
with excutils.save_and_reraise_exception():
libvirt_utils.remove_logical_volumes(path)
def snapshot_create(self):
size = CONF.libvirt_lvm_snapshot_size
cmd = ('lvcreate', '-L', size, '-s', '--name', self.snapshot_name,
self.path)
libvirt_utils.execute(*cmd, run_as_root=True, attempts=3)
def snapshot_extract(self, target, out_format):
images.convert_image(self.snapshot_path, target, out_format,
run_as_root=True)
def snapshot_delete(self):
# NOTE (rmk): Snapshot volumes are automatically zeroed by LVM
cmd = ('lvremove', '-f', self.snapshot_path)
libvirt_utils.execute(*cmd, run_as_root=True, attempts=3)
class Backend(object):
def __init__(self, use_cow):
self.BACKEND = {
'raw': Raw,
'qcow2': Qcow2,
'lvm': Lvm,
'default': Qcow2 if use_cow else Raw
}
def backend(self, image_type=None):
if not image_type:
image_type = CONF.libvirt_images_type
image = self.BACKEND.get(image_type)
if not image:
raise RuntimeError(_('Unknown image_type=%s') % image_type)
return image
def image(self, instance, disk_name, image_type=None):
"""Constructs image for selected backend
:instance: Instance name.
:name: Image name.
:image_type: Image type.
Optional, is CONF.libvirt_images_type by default.
"""
backend = self.backend(image_type)
return backend(instance=instance, disk_name=disk_name)
def snapshot(self, disk_path, snapshot_name, image_type=None):
"""Returns snapshot for given image
:path: path to image
:snapshot_name: snapshot name
:image_type: type of image
"""
backend = self.backend(image_type)
return backend(path=disk_path, snapshot_name=snapshot_name)
|
seanthegeek/crits | refs/heads/master | crits/objects/__init__.py | 12133432 | |
SimonSapin/cssselect | refs/heads/master | tests/__init__.py | 12133432 | |
Aptitudetech/ERPNext | refs/heads/master | erpnext/hr/doctype/vehicle/__init__.py | 12133432 | |
sblancov/hello_world | refs/heads/master | python/flask/api/__init__.py | 12133432 | |
Curso-OpenShift/Formulario | refs/heads/master | OverFlow/ProjectFormulario/env/lib/python2.7/site-packages/django/core/checks/compatibility/__init__.py | 12133432 | |
ZeroXn/mezzanine | refs/heads/master | mezzanine/generic/migrations/__init__.py | 12133432 | |
ENjOyAbLE1991/scrapy | refs/heads/master | scrapy/extensions/httpcache.py | 102 | from __future__ import print_function
import os
import gzip
from six.moves import cPickle as pickle
from importlib import import_module
from time import time
from weakref import WeakKeyDictionary
from email.utils import mktime_tz, parsedate_tz
from w3lib.http import headers_raw_to_dict, headers_dict_to_raw
from scrapy.http import Headers, Response
from scrapy.responsetypes import responsetypes
from scrapy.utils.request import request_fingerprint
from scrapy.utils.project import data_path
from scrapy.utils.httpobj import urlparse_cached
class DummyPolicy(object):
def __init__(self, settings):
self.ignore_schemes = settings.getlist('HTTPCACHE_IGNORE_SCHEMES')
self.ignore_http_codes = [int(x) for x in settings.getlist('HTTPCACHE_IGNORE_HTTP_CODES')]
def should_cache_request(self, request):
return urlparse_cached(request).scheme not in self.ignore_schemes
def should_cache_response(self, response, request):
return response.status not in self.ignore_http_codes
def is_cached_response_fresh(self, response, request):
return True
def is_cached_response_valid(self, cachedresponse, response, request):
return True
class RFC2616Policy(object):
MAXAGE = 3600 * 24 * 365 # one year
def __init__(self, settings):
self.always_store = settings.getbool('HTTPCACHE_ALWAYS_STORE')
self.ignore_schemes = settings.getlist('HTTPCACHE_IGNORE_SCHEMES')
self.ignore_response_cache_controls = settings.getlist('HTTPCACHE_IGNORE_RESPONSE_CACHE_CONTROLS')
self._cc_parsed = WeakKeyDictionary()
def _parse_cachecontrol(self, r):
if r not in self._cc_parsed:
cch = r.headers.get('Cache-Control', '')
parsed = parse_cachecontrol(cch)
if isinstance(r, Response):
for key in self.ignore_response_cache_controls:
parsed.pop(key, None)
self._cc_parsed[r] = parsed
return self._cc_parsed[r]
def should_cache_request(self, request):
if urlparse_cached(request).scheme in self.ignore_schemes:
return False
cc = self._parse_cachecontrol(request)
# obey user-agent directive "Cache-Control: no-store"
if 'no-store' in cc:
return False
# Any other is eligible for caching
return True
def should_cache_response(self, response, request):
# What is cacheable - http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec14.9.1
# Response cacheability - http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec13.4
# Status code 206 is not included because cache can not deal with partial contents
cc = self._parse_cachecontrol(response)
# obey directive "Cache-Control: no-store"
if 'no-store' in cc:
return False
# Never cache 304 (Not Modified) responses
elif response.status == 304:
return False
# Cache unconditionally if configured to do so
elif self.always_store:
return True
# Any hint on response expiration is good
elif 'max-age' in cc or 'Expires' in response.headers:
return True
# Firefox fallbacks this statuses to one year expiration if none is set
elif response.status in (300, 301, 308):
return True
# Other statuses without expiration requires at least one validator
elif response.status in (200, 203, 401):
return 'Last-Modified' in response.headers or 'ETag' in response.headers
# Any other is probably not eligible for caching
# Makes no sense to cache responses that does not contain expiration
# info and can not be revalidated
else:
return False
def is_cached_response_fresh(self, cachedresponse, request):
cc = self._parse_cachecontrol(cachedresponse)
ccreq = self._parse_cachecontrol(request)
if 'no-cache' in cc or 'no-cache' in ccreq:
return False
now = time()
freshnesslifetime = self._compute_freshness_lifetime(cachedresponse, request, now)
currentage = self._compute_current_age(cachedresponse, request, now)
reqmaxage = self._get_max_age(ccreq)
if reqmaxage is not None:
freshnesslifetime = min(freshnesslifetime, reqmaxage)
if currentage < freshnesslifetime:
return True
if 'max-stale' in ccreq and 'must-revalidate' not in cc:
# From RFC2616: "Indicates that the client is willing to
# accept a response that has exceeded its expiration time.
# If max-stale is assigned a value, then the client is
# willing to accept a response that has exceeded its
# expiration time by no more than the specified number of
# seconds. If no value is assigned to max-stale, then the
# client is willing to accept a stale response of any age."
staleage = ccreq['max-stale']
if staleage is None:
return True
try:
if currentage < freshnesslifetime + max(0, int(staleage)):
return True
except ValueError:
pass
# Cached response is stale, try to set validators if any
self._set_conditional_validators(request, cachedresponse)
return False
def is_cached_response_valid(self, cachedresponse, response, request):
# Use the cached response if the new response is a server error,
# as long as the old response didn't specify must-revalidate.
if response.status >= 500:
cc = self._parse_cachecontrol(cachedresponse)
if 'must-revalidate' not in cc:
return True
# Use the cached response if the server says it hasn't changed.
return response.status == 304
def _set_conditional_validators(self, request, cachedresponse):
if 'Last-Modified' in cachedresponse.headers:
request.headers['If-Modified-Since'] = cachedresponse.headers['Last-Modified']
if 'ETag' in cachedresponse.headers:
request.headers['If-None-Match'] = cachedresponse.headers['ETag']
def _get_max_age(self, cc):
try:
return max(0, int(cc['max-age']))
except (KeyError, ValueError):
return None
def _compute_freshness_lifetime(self, response, request, now):
# Reference nsHttpResponseHead::ComputeFreshnessLifetime
# http://dxr.mozilla.org/mozilla-central/source/netwerk/protocol/http/nsHttpResponseHead.cpp#410
cc = self._parse_cachecontrol(response)
maxage = self._get_max_age(cc)
if maxage is not None:
return maxage
# Parse date header or synthesize it if none exists
date = rfc1123_to_epoch(response.headers.get('Date')) or now
# Try HTTP/1.0 Expires header
if 'Expires' in response.headers:
expires = rfc1123_to_epoch(response.headers['Expires'])
# When parsing Expires header fails RFC 2616 section 14.21 says we
# should treat this as an expiration time in the past.
return max(0, expires - date) if expires else 0
# Fallback to heuristic using last-modified header
# This is not in RFC but on Firefox caching implementation
lastmodified = rfc1123_to_epoch(response.headers.get('Last-Modified'))
if lastmodified and lastmodified <= date:
return (date - lastmodified) / 10
# This request can be cached indefinitely
if response.status in (300, 301, 308):
return self.MAXAGE
# Insufficient information to compute fresshness lifetime
return 0
def _compute_current_age(self, response, request, now):
# Reference nsHttpResponseHead::ComputeCurrentAge
# http://dxr.mozilla.org/mozilla-central/source/netwerk/protocol/http/nsHttpResponseHead.cpp#366
currentage = 0
# If Date header is not set we assume it is a fast connection, and
# clock is in sync with the server
date = rfc1123_to_epoch(response.headers.get('Date')) or now
if now > date:
currentage = now - date
if 'Age' in response.headers:
try:
age = int(response.headers['Age'])
currentage = max(currentage, age)
except ValueError:
pass
return currentage
class DbmCacheStorage(object):
def __init__(self, settings):
self.cachedir = data_path(settings['HTTPCACHE_DIR'], createdir=True)
self.expiration_secs = settings.getint('HTTPCACHE_EXPIRATION_SECS')
self.dbmodule = import_module(settings['HTTPCACHE_DBM_MODULE'])
self.db = None
def open_spider(self, spider):
dbpath = os.path.join(self.cachedir, '%s.db' % spider.name)
self.db = self.dbmodule.open(dbpath, 'c')
def close_spider(self, spider):
self.db.close()
def retrieve_response(self, spider, request):
data = self._read_data(spider, request)
if data is None:
return # not cached
url = data['url']
status = data['status']
headers = Headers(data['headers'])
body = data['body']
respcls = responsetypes.from_args(headers=headers, url=url)
response = respcls(url=url, headers=headers, status=status, body=body)
return response
def store_response(self, spider, request, response):
key = self._request_key(request)
data = {
'status': response.status,
'url': response.url,
'headers': dict(response.headers),
'body': response.body,
}
self.db['%s_data' % key] = pickle.dumps(data, protocol=2)
self.db['%s_time' % key] = str(time())
def _read_data(self, spider, request):
key = self._request_key(request)
db = self.db
tkey = '%s_time' % key
if tkey not in db:
return # not found
ts = db[tkey]
if 0 < self.expiration_secs < time() - float(ts):
return # expired
return pickle.loads(db['%s_data' % key])
def _request_key(self, request):
return request_fingerprint(request)
class FilesystemCacheStorage(object):
def __init__(self, settings):
self.cachedir = data_path(settings['HTTPCACHE_DIR'])
self.expiration_secs = settings.getint('HTTPCACHE_EXPIRATION_SECS')
self.use_gzip = settings.getbool('HTTPCACHE_GZIP')
self._open = gzip.open if self.use_gzip else open
def open_spider(self, spider):
pass
def close_spider(self, spider):
pass
def retrieve_response(self, spider, request):
"""Return response if present in cache, or None otherwise."""
metadata = self._read_meta(spider, request)
if metadata is None:
return # not cached
rpath = self._get_request_path(spider, request)
with self._open(os.path.join(rpath, 'response_body'), 'rb') as f:
body = f.read()
with self._open(os.path.join(rpath, 'response_headers'), 'rb') as f:
rawheaders = f.read()
url = metadata.get('response_url')
status = metadata['status']
headers = Headers(headers_raw_to_dict(rawheaders))
respcls = responsetypes.from_args(headers=headers, url=url)
response = respcls(url=url, headers=headers, status=status, body=body)
return response
def store_response(self, spider, request, response):
"""Store the given response in the cache."""
rpath = self._get_request_path(spider, request)
if not os.path.exists(rpath):
os.makedirs(rpath)
metadata = {
'url': request.url,
'method': request.method,
'status': response.status,
'response_url': response.url,
'timestamp': time(),
}
with self._open(os.path.join(rpath, 'meta'), 'wb') as f:
f.write(repr(metadata))
with self._open(os.path.join(rpath, 'pickled_meta'), 'wb') as f:
pickle.dump(metadata, f, protocol=2)
with self._open(os.path.join(rpath, 'response_headers'), 'wb') as f:
f.write(headers_dict_to_raw(response.headers))
with self._open(os.path.join(rpath, 'response_body'), 'wb') as f:
f.write(response.body)
with self._open(os.path.join(rpath, 'request_headers'), 'wb') as f:
f.write(headers_dict_to_raw(request.headers))
with self._open(os.path.join(rpath, 'request_body'), 'wb') as f:
f.write(request.body)
def _get_request_path(self, spider, request):
key = request_fingerprint(request)
return os.path.join(self.cachedir, spider.name, key[0:2], key)
def _read_meta(self, spider, request):
rpath = self._get_request_path(spider, request)
metapath = os.path.join(rpath, 'pickled_meta')
if not os.path.exists(metapath):
return # not found
mtime = os.stat(rpath).st_mtime
if 0 < self.expiration_secs < time() - mtime:
return # expired
with self._open(metapath, 'rb') as f:
return pickle.load(f)
class LeveldbCacheStorage(object):
def __init__(self, settings):
import leveldb
self._leveldb = leveldb
self.cachedir = data_path(settings['HTTPCACHE_DIR'], createdir=True)
self.expiration_secs = settings.getint('HTTPCACHE_EXPIRATION_SECS')
self.db = None
def open_spider(self, spider):
dbpath = os.path.join(self.cachedir, '%s.leveldb' % spider.name)
self.db = self._leveldb.LevelDB(dbpath)
def close_spider(self, spider):
# Do compactation each time to save space and also recreate files to
# avoid them being removed in storages with timestamp-based autoremoval.
self.db.CompactRange()
del self.db
def retrieve_response(self, spider, request):
data = self._read_data(spider, request)
if data is None:
return # not cached
url = data['url']
status = data['status']
headers = Headers(data['headers'])
body = data['body']
respcls = responsetypes.from_args(headers=headers, url=url)
response = respcls(url=url, headers=headers, status=status, body=body)
return response
def store_response(self, spider, request, response):
key = self._request_key(request)
data = {
'status': response.status,
'url': response.url,
'headers': dict(response.headers),
'body': response.body,
}
batch = self._leveldb.WriteBatch()
batch.Put('%s_data' % key, pickle.dumps(data, protocol=2))
batch.Put('%s_time' % key, str(time()))
self.db.Write(batch)
def _read_data(self, spider, request):
key = self._request_key(request)
try:
ts = self.db.Get('%s_time' % key)
except KeyError:
return # not found or invalid entry
if 0 < self.expiration_secs < time() - float(ts):
return # expired
try:
data = self.db.Get('%s_data' % key)
except KeyError:
return # invalid entry
else:
return pickle.loads(data)
def _request_key(self, request):
return request_fingerprint(request)
def parse_cachecontrol(header):
"""Parse Cache-Control header
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9
>>> parse_cachecontrol('public, max-age=3600') == {'public': None,
... 'max-age': '3600'}
True
>>> parse_cachecontrol('') == {}
True
"""
directives = {}
for directive in header.split(','):
key, sep, val = directive.strip().partition('=')
if key:
directives[key.lower()] = val if sep else None
return directives
def rfc1123_to_epoch(date_str):
try:
return mktime_tz(parsedate_tz(date_str))
except Exception:
return None
|
Gazolik/riffPy | refs/heads/master | riffPy/riff/reader.py | 1 | import chunk
from . import CHUNKTYPE_SIZE, CHUNKHEADER_SIZE
from .chunk import RiffChunk, ListChunk, FinalChunk
from .data import Data
class ChunkReader(object):
def __init__(self, bigendian=False):
self.bigendian = bigendian
def read_riff(self, filename):
""" Read a RIFF file and put it in a RiffChunk object
:param filename: The path to the RIFF file
:return: A RiffChunk Object
"""
with open(filename, 'rb') as file:
current_chunk = chunk.Chunk(file, bigendian=self.bigendian)
riff_name = current_chunk.getname()
riff_size = current_chunk.getsize() - CHUNKTYPE_SIZE
riff_type = current_chunk.read(CHUNKTYPE_SIZE)
sub_chunks = self.read_chunks(riff_size, file)
riff_chunk = RiffChunk(riff_name, sub_chunks, riff_type)
return riff_chunk
def read_chunks(self, size, file):
"""Read a list of chunks
:param size: The total size of the list
:param file: The file handler
:return: a list of chunks
"""
chunks = []
offset = 0
while offset < size:
try:
chunk = self.read_chunk(file)
except EOFError:
break
chunks.append(chunk)
offset += chunk.size + CHUNKHEADER_SIZE
return chunks
def read_chunk(self, file):
"""Read a chunk starting at the actual position of the file handler
:param file: The file handler
:return: a Chunk
"""
current_chunk = chunk.Chunk(file, bigendian=self.bigendian)
chunk_name = current_chunk.getname()
print(chunk_name)
chunk_size = current_chunk.getsize()
if chunk_name == b'LIST':
chunk_type = current_chunk.read(CHUNKTYPE_SIZE)
sub_chunks = self.read_chunks(chunk_size - CHUNKTYPE_SIZE, file)
return_chunk = ListChunk(chunk_name, sub_chunks, chunk_type)
else:
chunk_data = Data(current_chunk.read(chunk_size))
return_chunk = FinalChunk(chunk_name, chunk_data)
return return_chunk
|
apehua/pilas | refs/heads/master | pilas/ejemplos/ejemplos/basicos/sonidos.py | 6 | import pilas
# Permite que este ejemplo funcion incluso si no has instalado pilas.
import sys
sys.path.insert(0, "..")
pilas.iniciar()
sonido = pilas.sonidos.cargar("explosion.wav")
def reproducir_sonido_cuando_hace_click(evento):
sonido.reproducir()
pilas.escena_actual().click_de_mouse.conectar(reproducir_sonido_cuando_hace_click)
pilas.avisar("Pulse sobre la pantalla para emitir un sonido de explosion.")
pilas.ejecutar()
|
nicfit/mishmash | refs/heads/master | mishmash/commands/sync/utils.py | 1 | import os
import nicfit
from nicfit.console import pout
from nicfit.console.ansi import Fg
from ...orm import VARIOUS_ARTISTS_ID
from ...orm import Artist, Track, Album
log = nicfit.getLogger(__name__)
def deleteOrphans(session):
num_orphaned_artists = 0
num_orphaned_albums = 0
num_orphaned_tracks = 0
found_ids = set()
# Tracks
for track in session.query(Track).all():
if not os.path.exists(track.path):
pout(Fg.red("Removing track") + ": " + track.path)
session.delete(track)
num_orphaned_tracks += 1
log.warn("Deleting track: %s" % str(track))
session.flush()
# Albums
found_ids.clear()
for album in session.query(Album).all():
if album.id in found_ids:
continue
any_track = session.query(Track).filter(Track.album_id == album.id).first()
if not any_track:
log.warn("Deleting album: %s" % str(album))
session.delete(album)
num_orphaned_albums += 1
else:
found_ids.add(album.id)
session.flush()
# Artists
found_ids.clear()
for artist in session.query(Artist).all():
if (artist.id == VARIOUS_ARTISTS_ID or
artist.id in found_ids):
continue
any_track = session.query(Track).filter(Track.artist_id == artist.id) \
.first()
any_album = session.query(Album).filter(Album.artist_id == artist.id) \
.first()
if not any_track and (not any_album or not any_album.tracks):
log.warn("Deleting artist: %s" % str(artist))
session.delete(artist)
num_orphaned_artists += 1
else:
found_ids.add(artist.id)
session.flush()
return (num_orphaned_tracks, num_orphaned_artists, num_orphaned_albums)
def syncImage(img, current, session):
"""Add or updated the Image."""
def _img_str(i):
return "%s - %s" % (i.type, i.description)
for db_img in current.images:
img_info = (img.type, img.md5, img.size)
db_img_info = (db_img.type, db_img.md5, db_img.size)
if db_img_info == img_info:
img = None
break
elif (db_img.type == img.type and
db_img.description == img.description):
if img.md5 != db_img.md5:
# Update image
current.images.remove(db_img)
current.images.append(img)
session.add(current)
pout(Fg.green("Updating image") + ": " + _img_str(img))
img = None
break
if img:
# Add image
current.images.append(img)
session.add(current)
pout(Fg.green("Adding image") + ": " + _img_str(img))
|
MACBIO/GIS-Scripts | refs/heads/master | ClipDatasets.py | 1 | import os
import subprocess
import sys
inFolder = sys.argv[1]
outFolder = os.path.join(os.path.dirname(inFolder), 'Clipped')
### Fiji
##ulx = 160
##uly = 0
##lrx = 195
##lry = -30
### Solomon Islands
##ulx = 145
##uly = 0
##lrx = 180
##lry = -20
### Tonga
##ulx = -180
##uly = -10
##lrx = -165
##lry = -30
# Vanuatu
ulx = 155
uly = -5
lrx = 180
lry = -30
ogr2ogrFile = r"C:\OSGeo4W64\bin\ogr2ogr.exe"
gdal_translateFile = r"C:\OSGeo4W64\bin\gdal_translate.exe"
# function that sends command to console
def check_output(command,console):
if console == True:
process = subprocess.Popen(command)
else:
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
output,error = process.communicate()
returncode = process.poll()
return returncode,output
# create list of shapefiles
fileList = []
for root, dirs, files in os.walk(inFolder):
for f in files:
if f.endswith('.shp'):
fileList.append(os.path.join(root, f))
# process each shapefile
for f in fileList:
inFile = f
outTail = inFile.split(os.path.basename(inFolder))[1]
outFile = outFolder + outTail
if not os.path.exists(os.path.dirname(outFile)):
os.makedirs(os.path.dirname(outFile))
if not os.path.exists(outFile):
args = []
args.append('"'+ogr2ogrFile+'"')
args.append('-skipfailures')
args.append('-progress')
args.append('"'+outFile+'"')
args.append('"'+inFile+'"')
args.append('-clipsrc')
args.append(str(ulx))
args.append(str(lry))
args.append(str(lrx))
args.append(str(uly))
command = " ".join(args)
print command
returncode,output = check_output(command, True)
print output
# create list of tiffs
fileList = []
for root, dirs, files in os.walk(inFolder):
for f in files:
if f.endswith('.tif'):
fileList.append(os.path.join(root, f))
# process each tiff
for f in fileList:
inFile = f
outTail = inFile.split(os.path.basename(inFolder))[1]
outFile = outFolder + outTail
if not os.path.exists(os.path.dirname(outFile)):
os.makedirs(os.path.dirname(outFile))
if not os.path.exists(outFile):
args = []
args.append('"'+gdal_translateFile+'"')
args.append('-projwin')
args.append(str(ulx))
args.append(str(uly))
args.append(str(lrx))
args.append(str(lry))
args.append('"'+inFile+'"')
args.append('"'+outFile+'"')
command = " ".join(args)
print command
returncode,output = check_output(command, True)
print output
|
savanu/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/pytest/_pytest/standalonetemplate.py | 203 | #! /usr/bin/env python
# Hi There!
# You may be wondering what this giant blob of binary data here is, you might
# even be worried that we're up to something nefarious (good for you for being
# paranoid!). This is a base64 encoding of a zip file, this zip file contains
# a fully functional basic pytest script.
#
# Pytest is a thing that tests packages, pytest itself is a package that some-
# one might want to install, especially if they're looking to run tests inside
# some package they want to install. Pytest has a lot of code to collect and
# execute tests, and other such sort of "tribal knowledge" that has been en-
# coded in its code base. Because of this we basically include a basic copy
# of pytest inside this blob. We do this because it let's you as a maintainer
# or application developer who wants people who don't deal with python much to
# easily run tests without installing the complete pytest package.
#
# If you're wondering how this is created: you can create it yourself if you
# have a complete pytest installation by using this command on the command-
# line: ``py.test --genscript=runtests.py``.
sources = """
@SOURCES@"""
import sys
import base64
import zlib
class DictImporter(object):
def __init__(self, sources):
self.sources = sources
def find_module(self, fullname, path=None):
if fullname == "argparse" and sys.version_info >= (2,7):
# we were generated with <python2.7 (which pulls in argparse)
# but we are running now on a stdlib which has it, so use that.
return None
if fullname in self.sources:
return self
if fullname + '.__init__' in self.sources:
return self
return None
def load_module(self, fullname):
# print "load_module:", fullname
from types import ModuleType
try:
s = self.sources[fullname]
is_pkg = False
except KeyError:
s = self.sources[fullname + '.__init__']
is_pkg = True
co = compile(s, fullname, 'exec')
module = sys.modules.setdefault(fullname, ModuleType(fullname))
module.__file__ = "%s/%s" % (__file__, fullname)
module.__loader__ = self
if is_pkg:
module.__path__ = [fullname]
do_exec(co, module.__dict__) # noqa
return sys.modules[fullname]
def get_source(self, name):
res = self.sources.get(name)
if res is None:
res = self.sources.get(name + '.__init__')
return res
if __name__ == "__main__":
try:
import pkg_resources # noqa
except ImportError:
sys.stderr.write("ERROR: setuptools not installed\n")
sys.exit(2)
if sys.version_info >= (3, 0):
exec("def do_exec(co, loc): exec(co, loc)\n")
import pickle
sources = sources.encode("ascii") # ensure bytes
sources = pickle.loads(zlib.decompress(base64.decodebytes(sources)))
else:
import cPickle as pickle
exec("def do_exec(co, loc): exec co in loc\n")
sources = pickle.loads(zlib.decompress(base64.decodestring(sources)))
importer = DictImporter(sources)
sys.meta_path.insert(0, importer)
entry = "@ENTRY@"
do_exec(entry, locals()) # noqa
|
bospetersen/h2o-3 | refs/heads/master | h2o-py/tests/testdir_algos/glm/pyunit_cv_carsGLM.py | 1 | import sys
sys.path.insert(1, "../../../")
import h2o, tests
import random
def cv_carsGLM(ip,port):
# read in the dataset and construct training set (and validation set)
cars = h2o.import_file(path=h2o.locate("smalldata/junit/cars_20mpg.csv"))
# choose the type model-building exercise (multinomial classification or regression). 0:regression, 1:binomial,
# 2:poisson
problem = random.sample(range(3),1)[0]
# pick the predictors and response column, along with the correct family
predictors = ["displacement","power","weight","acceleration","year"]
if problem == 1 :
response_col = "economy_20mpg"
family = "binomial"
cars[response_col] = cars[response_col].asfactor()
elif problem == 2 :
family = "poisson"
response_col = "cylinders"
else :
family = "gaussian"
response_col = "economy"
print "Distribution: {0}".format(family)
print "Response column: {0}".format(response_col)
## cross-validation
# 1. check that cv metrics are the same over repeated "Modulo" runs
nfolds = random.randint(3,10)
glm1 = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=nfolds, family=family, fold_assignment="Modulo")
glm2 = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=nfolds, family=family, fold_assignment="Modulo")
tests.check_models(glm1, glm2, True)
# 2. check that cv metrics are different over repeated "Random" runs
nfolds = random.randint(3,10)
glm1 = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=nfolds, family=family, fold_assignment="Random")
glm2 = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=nfolds, family=family, fold_assignment="Random")
try:
tests.check_models(glm1, glm2, True)
assert False, "Expected models to be different over repeated Random runs"
except AssertionError:
assert True
# 3. folds_column
num_folds = random.randint(2,5)
fold_assignments = h2o.H2OFrame(python_obj=[[random.randint(0,num_folds-1)] for f in range(cars.nrow)])
fold_assignments.setNames(["fold_assignments"])
cars = cars.cbind(fold_assignments)
glm = h2o.glm(y=cars[response_col], x=cars[predictors], training_frame=cars, family=family,
fold_column="fold_assignments", keep_cross_validation_predictions=True)
num_cv_models = len(glm._model_json['output']['cross_validation_models'])
assert num_cv_models==num_folds, "Expected {0} cross-validation models, but got " \
"{1}".format(num_folds, num_cv_models)
cv_model1 = h2o.get_model(glm._model_json['output']['cross_validation_models'][0]['name'])
cv_model2 = h2o.get_model(glm._model_json['output']['cross_validation_models'][1]['name'])
assert isinstance(cv_model1, type(glm)), "Expected cross-validation model to be the same model type as the " \
"constructed model, but got {0} and {1}".format(type(cv_model1),type(glm))
assert isinstance(cv_model2, type(glm)), "Expected cross-validation model to be the same model type as the " \
"constructed model, but got {0} and {1}".format(type(cv_model2),type(glm))
# 4. keep_cross_validation_predictions
cv_predictions = glm1._model_json['output']['cross_validation_predictions']
assert cv_predictions is None, "Expected cross-validation predictions to be None, but got {0}".format(cv_predictions)
cv_predictions = glm._model_json['output']['cross_validation_predictions']
assert len(cv_predictions)==num_folds, "Expected the same number of cross-validation predictions " \
"as folds, but got {0}".format(len(cv_predictions))
# # 5. manually construct models
# fold1 = cars[cars["fold_assignments"]==0]
# fold2 = cars[cars["fold_assignments"]==1]
# manual_model1 = h2o.glm(y=fold2[response_col],
# x=fold2[predictors],
# validation_y=fold1[response_col],
# validation_x=fold1[predictors],
# family=family)
# manual_model2 = h2o.glm(y=fold1[response_col],
# x=fold1[predictors],
# validation_y=fold2[response_col],
# validation_x=fold2[predictors],
# family=family)
## boundary cases
# 1. nfolds = number of observations (leave-one-out cross-validation)
# TODO: PUBDEV-1776
#glm = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=cars.nrow, family=family,
# fold_assignment="Modulo")
# 2. nfolds = 0
glm1 = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=0, family=family)
# check that this is equivalent to no nfolds
glm2 = h2o.glm(y=cars[response_col], x=cars[predictors], family=family)
tests.check_models(glm1, glm2)
# 3. cross-validation and regular validation attempted
glm = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=random.randint(3,10), validation_y=cars[response_col],
validation_x=cars[predictors], family=family)
## error cases
# 1. nfolds == 1 or < 0
try:
glm = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=random.sample([-1,1], 1)[0],
family=family)
assert False, "Expected model-build to fail when nfolds is 1 or < 0"
except EnvironmentError:
assert True
# 2. more folds than observations
try:
glm = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=cars.nrow+1, family=family,
fold_assignment="Modulo")
assert False, "Expected model-build to fail when nfolds > nobs"
except EnvironmentError:
assert True
# 3. fold_column and nfolds both specified
try:
glm = h2o.glm(y=cars[response_col], x=cars[predictors], nfolds=3, fold_column="fold_assignments",
family=family, training_frame=cars)
assert False, "Expected model-build to fail when fold_column and nfolds both specified"
except EnvironmentError:
assert True
# # 4. fold_column and fold_assignment both specified
# try:
# glm = h2o.glm(y=cars[response_col], x=cars[predictors], fold_assignment="Random", fold_column="fold_assignments",
# family=family, training_frame=cars)
# assert False, "Expected model-build to fail when fold_column and fold_assignment both specified"
# except EnvironmentError:
# assert True
if __name__ == "__main__":
tests.run_test(sys.argv, cv_carsGLM)
|
isaac-s/cloudify-manager | refs/heads/master | tests/integration_tests/tests/agentless_tests/test_rest_service_list_filters.py | 1 | ########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from integration_tests import AgentlessTestCase
from integration_tests.tests import utils as test_utils
from integration_tests.tests.utils import get_resource as resource
TEST_PACKAGE_NAME = 'cloudify-script-plugin'
TEST_PACKAGE_VERSION = '1.2'
OLD_TEST_PACKAGE_VERSION = '1.1'
class TestRestServiceListFilters(AgentlessTestCase):
def setUp(self):
super(TestRestServiceListFilters, self).setUp()
self.first_deployment_id, \
self.first_blueprint_id, \
self.sec_deployment_id, \
self.sec_blueprint_id = self._put_two_deployments()
self.deployment_id_filter = {'deployment_id': self.first_deployment_id}
self.blueprint_id_filter = {'blueprint_id': self.first_blueprint_id}
def _put_two_deployments(self):
dsl_path = resource("dsl/deployment_modification_operations.yaml")
first_deployment, _ = self.deploy_application(dsl_path)
sec_deployment, _ = self.deploy_application(dsl_path)
return first_deployment.id, first_deployment.blueprint_id,\
sec_deployment.id, sec_deployment.blueprint_id
def test_nodes_list_with_filters(self):
filter_params = {'blueprint_id': self.first_blueprint_id,
'deployment_id': self.first_deployment_id}
response = self.client.nodes.list(**filter_params)
self.assertEquals(len(response), 3, 'expecting 3 node results matching'
' deployment_id {0} and '
'blueprint_id {1}'
.format(self.first_deployment_id,
self.first_blueprint_id))
for node in response:
self.assertEquals(node.deployment_id, self.first_deployment_id)
self.assertEquals(node.blueprint_id, self.first_blueprint_id)
def test_nodes_list_with_filters_and_include(self):
filter_params = {'blueprint_id': self.first_blueprint_id,
'deployment_id': self.first_deployment_id}
include = ['id']
response = self.client.nodes.list(_include=include, **filter_params)
self.assertEquals(len(response), 3, 'expecting 3 node results matching'
' deployment_id {0} and '
'blueprint_id {1}'
.format(self.first_deployment_id,
self.first_blueprint_id))
for node in response:
self.assertIsNone(node.deployment_id, 'Expecting deployment_id to '
'be None')
self.assertIsNotNone(node.id, 'Expecting id not to be None')
def test_nodes_list_non_existent_filters(self):
filter_params = {'blueprint_id': self.first_blueprint_id,
'deployment_id': self.sec_deployment_id}
response = self.client.nodes.list(**filter_params)
self.assertEquals(len(response), 0, 'expecting 0 node results matching'
' deployment_id {0} and '
'blueprint_id {1}'
.format(self.first_deployment_id,
self.first_blueprint_id))
def test_nodes_list_no_filters(self):
response = self.client.nodes.list()
self.assertEquals(len(response), 6, 'expecting 6 node results matching'
' deployment_id {id}'
.format(id=self.first_deployment_id))
for node in response:
self.assertIn(node.deployment_id,
(self.first_deployment_id, self.sec_deployment_id))
self.assertIn(node.blueprint_id,
(self.first_blueprint_id, self.sec_blueprint_id))
def test_node_instances_list_with_filters(self):
res = self.client.node_instances.list(**self.deployment_id_filter)
self.assertEquals(len(res), 3, 'expecting 3 node instance results'
' matching deployment_id {0}'
.format(self.first_deployment_id))
for node_instance in res:
self.assertEquals(node_instance.deployment_id,
self.first_deployment_id)
def test_node_instances_list_with_filters_multiple_values(self):
self.multiple_value_filters = \
{'deployment_id': [self.first_deployment_id,
self.sec_deployment_id],
'node_id': ['webserver',
'compute']}
res = \
self.client.node_instances.list(
**self.multiple_value_filters)
self.assertEquals(len(res), 4, 'expecting 4 node instance results'
' matching {0}'
.format(self.multiple_value_filters))
for node_instance in res:
for key in self.multiple_value_filters:
self.assertIn(node_instance[key],
self.multiple_value_filters[key])
def test_node_instances_list_no_filters(self):
response = self.client.node_instances.list()
self.assertEquals(len(response), 6, 'expecting 6 node instance results'
' matching deployment_id {0}'
.format(self.first_deployment_id))
for node_instance in response:
self.assertIn(node_instance.deployment_id,
(self.first_deployment_id, self.sec_deployment_id))
def test_deployments_list_with_filters(self):
id_filter = {'id': self.first_deployment_id}
response = self.client.deployments.list(**id_filter)
self.assertEquals(len(response), 1, 'expecting 1 deployment results'
' matching deployment_id {0} {1}'
.format(self.first_deployment_id, len(response)))
deployment = response[0]
self.assertEquals(deployment['id'], self.first_deployment_id)
def test_deployments_list_no_filters(self):
response = self.client.deployments.list()
self.assertEquals(len(response), 2, 'expecting 2 deployment results'
' matching deployment_id {0}'
.format(self.first_deployment_id))
for deployment in response:
self.assertIn(deployment['id'],
(self.first_deployment_id, self.sec_deployment_id))
def test_executions_list_with_filters(self):
res = self.client.executions.list(**self.deployment_id_filter)
self.assertEquals(len(res), 2, 'expecting 2 execution results'
' matching deployment_id {0} {1}'
.format(self.first_deployment_id, len(res)))
for execution in res:
self.assertEquals(execution.deployment_id,
self.first_deployment_id)
def test_executions_list_no_filters(self):
response = self.client.executions.list()
self.assertEquals(len(response), 4, 'expecting 4 execution results'
' matching deployment_id {0} {1}'
.format(self.first_deployment_id, len(response)))
for execution in response:
self.assertIn(execution.deployment_id,
(self.first_deployment_id, self.sec_deployment_id))
def test_blueprints_list_with_filters(self):
id_filter = {'id': self.first_blueprint_id}
res = self.client.blueprints.list(**id_filter)
self.assertEquals(len(res), 1, 'expecting 1 blueprint result'
' matching blueprint_id {0} {1}'
.format(self.first_blueprint_id, len(res)))
blueprint = res[0]
self.assertEquals(blueprint.id, self.first_blueprint_id)
def test_blueprints_list_no_filters(self):
res = self.client.blueprints.list()
self.assertEquals(len(res), 2, 'expecting 2 blueprint results '
'matching blueprint_id {0} {1}'
.format(self.first_blueprint_id, len(res)))
for blueprint in res:
self.assertIn(blueprint.id,
(self.first_blueprint_id, self.sec_blueprint_id))
def test_plugins_list_with_filters(self):
test_utils.upload_mock_plugin(
TEST_PACKAGE_NAME,
TEST_PACKAGE_VERSION)
sec_plugin_id = test_utils.upload_mock_plugin(
TEST_PACKAGE_NAME,
OLD_TEST_PACKAGE_VERSION)['id']
filter_field = {'id': sec_plugin_id}
response = self.client.plugins.list(**filter_field)
self.assertEqual(len(response), 1, 'expecting 1 plugin result, '
'got {0}'.format(len(response)))
self.assertDictContainsSubset(filter_field, response[0],
'expecting filtered results having '
'filters {0}, got {1}'
.format(filter_field, response[0]))
def test_plugins_list_no_filters(self):
test_utils.upload_mock_plugin(
TEST_PACKAGE_NAME,
TEST_PACKAGE_VERSION)
test_utils.upload_mock_plugin(
TEST_PACKAGE_NAME,
OLD_TEST_PACKAGE_VERSION)
response = self.client.plugins.list()
self.assertEqual(len(response), 2, 'expecting 2 plugin results, '
'got {0}'.format(len(response)))
self.assertNotEquals(response[0].id, response[1].id)
|
Cheaterman/kivy | refs/heads/master | kivy/uix/recycleview/__init__.py | 7 | """
RecycleView
===========
.. versionadded:: 1.10.0
The RecycleView provides a flexible model for viewing selected sections of
large data sets. It aims to prevent the performance degradation that can occur
when generating large numbers of widgets in order to display many data items.
The view is generatad by processing the :attr:`~RecycleView.data`, essentially
a list of dicts, and uses these dicts to generate instances of the
:attr:`~RecycleView.viewclass` as required. Its design is based on the
MVC (`Model-view-controller
<https://en.wikipedia.org/wiki/Model%E2%80%93view%E2%80%93controller>`_)
pattern.
* Model: The model is formed by :attr:`~RecycleView.data` you pass in via a
list of dicts.
* View: The View is split across layout and views and implemented using
adaters.
* Controller: The controller determines the logical interaction and is
implemented by :class:`RecycleViewBehavior`.
These are abstract classes and cannot be used directly. The default concrete
implementations are the
:class:`~kivy.uix.recycleview.datamodel.RecycleDataModel` for the model, the
:class:`~kivy.uix.recyclelayout.RecycleLayout` for the view, and the
:class:`RecycleView` for the controller.
When a RecycleView is instantiated, it automatically creates the views and data
classes. However, one must manually create the layout classes and add them to
the RecycleView.
A layout manager is automatically created as a
:attr:`~RecycleViewBehavior.layout_manager` when added as the child of the
RecycleView. Similarly when removed. A requirement is that the layout manager
must be contained as a child somewhere within the RecycleView's widget tree so
the view port can be found.
A minimal example might look something like this::
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.recycleview import RecycleView
Builder.load_string('''
<RV>:
viewclass: 'Label'
RecycleBoxLayout:
default_size: None, dp(56)
default_size_hint: 1, None
size_hint_y: None
height: self.minimum_height
orientation: 'vertical'
''')
class RV(RecycleView):
def __init__(self, **kwargs):
super(RV, self).__init__(**kwargs)
self.data = [{'text': str(x)} for x in range(100)]
class TestApp(App):
def build(self):
return RV()
if __name__ == '__main__':
TestApp().run()
In order to support selection in the view, you can add the required behaviours
as follows::
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.recycleview import RecycleView
from kivy.uix.recycleview.views import RecycleDataViewBehavior
from kivy.uix.label import Label
from kivy.properties import BooleanProperty
from kivy.uix.recycleboxlayout import RecycleBoxLayout
from kivy.uix.behaviors import FocusBehavior
from kivy.uix.recycleview.layout import LayoutSelectionBehavior
Builder.load_string('''
<SelectableLabel>:
# Draw a background to indicate selection
canvas.before:
Color:
rgba: (.0, 0.9, .1, .3) if self.selected else (0, 0, 0, 1)
Rectangle:
pos: self.pos
size: self.size
<RV>:
viewclass: 'SelectableLabel'
SelectableRecycleBoxLayout:
default_size: None, dp(56)
default_size_hint: 1, None
size_hint_y: None
height: self.minimum_height
orientation: 'vertical'
multiselect: True
touch_multiselect: True
''')
class SelectableRecycleBoxLayout(FocusBehavior, LayoutSelectionBehavior,
RecycleBoxLayout):
''' Adds selection and focus behaviour to the view. '''
class SelectableLabel(RecycleDataViewBehavior, Label):
''' Add selection support to the Label '''
index = None
selected = BooleanProperty(False)
selectable = BooleanProperty(True)
def refresh_view_attrs(self, rv, index, data):
''' Catch and handle the view changes '''
self.index = index
return super(SelectableLabel, self).refresh_view_attrs(
rv, index, data)
def on_touch_down(self, touch):
''' Add selection on touch down '''
if super(SelectableLabel, self).on_touch_down(touch):
return True
if self.collide_point(*touch.pos) and self.selectable:
return self.parent.select_with_touch(self.index, touch)
def apply_selection(self, rv, index, is_selected):
''' Respond to the selection of items in the view. '''
self.selected = is_selected
if is_selected:
print("selection changed to {0}".format(rv.data[index]))
else:
print("selection removed for {0}".format(rv.data[index]))
class RV(RecycleView):
def __init__(self, **kwargs):
super(RV, self).__init__(**kwargs)
self.data = [{'text': str(x)} for x in range(100)]
class TestApp(App):
def build(self):
return RV()
if __name__ == '__main__':
TestApp().run()
Please see the `examples/widgets/recycleview/basic_data.py` file for a more
complete example.
TODO:
- Method to clear cached class instances.
- Test when views cannot be found (e.g. viewclass is None).
- Fix selection goto.
.. warning::
When views are re-used they may not trigger if the data remains the same.
"""
__all__ = ('RecycleViewBehavior', 'RecycleView')
from copy import deepcopy
from kivy.uix.scrollview import ScrollView
from kivy.properties import AliasProperty
from kivy.clock import Clock
from kivy.uix.recycleview.layout import RecycleLayoutManagerBehavior, \
LayoutChangeException
from kivy.uix.recycleview.views import RecycleDataAdapter
from kivy.uix.recycleview.datamodel import RecycleDataModelBehavior, \
RecycleDataModel
class RecycleViewBehavior(object):
"""RecycleViewBehavior provides a behavioral model upon which the
:class:`RecycleView` is built. Together, they offer an extensible and
flexible way to produce views with limited windows over large data sets.
See the module documentation for more information.
"""
# internals
_view_adapter = None
_data_model = None
_layout_manager = None
_refresh_flags = {'data': [], 'layout': [], 'viewport': False}
_refresh_trigger = None
def __init__(self, **kwargs):
self._refresh_trigger = Clock.create_trigger(self.refresh_views, -1)
self._refresh_flags = deepcopy(self._refresh_flags)
super(RecycleViewBehavior, self).__init__(**kwargs)
def get_viewport(self):
pass
def save_viewport(self):
pass
def restore_viewport(self):
pass
def refresh_views(self, *largs):
lm = self.layout_manager
flags = self._refresh_flags
if lm is None or self.view_adapter is None or self.data_model is None:
return
data = self.data
f = flags['data']
if f:
self.save_viewport()
# lm.clear_layout()
flags['data'] = []
flags['layout'] = [{}]
lm.compute_sizes_from_data(data, f)
while flags['layout']:
# if `data` we were re-triggered so finish in the next call.
# Otherwise go until fully laid out.
self.save_viewport()
if flags['data']:
return
flags['viewport'] = True
f = flags['layout']
flags['layout'] = []
try:
lm.compute_layout(data, f)
except LayoutChangeException:
flags['layout'].append({})
continue
if flags['data']: # in case that happened meanwhile
return
# make sure if we were re-triggered in the loop that we won't be
# called needlessly later.
self._refresh_trigger.cancel()
self.restore_viewport()
if flags['viewport']:
# TODO: make this also listen to LayoutChangeException
flags['viewport'] = False
viewport = self.get_viewport()
indices = lm.compute_visible_views(data, viewport)
lm.set_visible_views(indices, data, viewport)
def refresh_from_data(self, *largs, **kwargs):
"""
This should be called when data changes. Data changes typically
indicate that everything should be recomputed since the source data
changed.
This method is automatically bound to the
:attr:`~RecycleDataModelBehavior.on_data_changed` method of the
:class:`~RecycleDataModelBehavior` class and
therefore responds to and accepts the keyword arguments of that event.
It can be called manually to trigger an update.
"""
self._refresh_flags['data'].append(kwargs)
self._refresh_trigger()
def refresh_from_layout(self, *largs, **kwargs):
"""
This should be called when the layout changes or needs to change. It is
typically called when a layout parameter has changed and therefore the
layout needs to be recomputed.
"""
self._refresh_flags['layout'].append(kwargs)
self._refresh_trigger()
def refresh_from_viewport(self, *largs):
"""
This should be called when the viewport changes and the displayed data
must be updated. Neither the data nor the layout will be recomputed.
"""
self._refresh_flags['viewport'] = True
self._refresh_trigger()
def _dispatch_prop_on_source(self, prop_name, *largs):
# Dispatches the prop of this class when the
# view_adapter/layout_manager property changes.
getattr(self.__class__, prop_name).dispatch(self)
def _get_data_model(self):
return self._data_model
def _set_data_model(self, value):
data_model = self._data_model
if value is data_model:
return
if data_model is not None:
self._data_model = None
data_model.detach_recycleview()
if value is None:
return True
if not isinstance(value, RecycleDataModelBehavior):
raise ValueError(
'Expected object based on RecycleDataModelBehavior, got {}'.
format(value.__class__))
self._data_model = value
value.attach_recycleview(self)
self.refresh_from_data()
return True
data_model = AliasProperty(_get_data_model, _set_data_model)
"""
The Data model responsible for maintaining the data set.
data_model is an :class:`~kivy.properties.AliasProperty` that gets and sets
the current data model.
"""
def _get_view_adapter(self):
return self._view_adapter
def _set_view_adapter(self, value):
view_adapter = self._view_adapter
if value is view_adapter:
return
if view_adapter is not None:
self._view_adapter = None
view_adapter.detach_recycleview()
if value is None:
return True
if not isinstance(value, RecycleDataAdapter):
raise ValueError(
'Expected object based on RecycleAdapter, got {}'.
format(value.__class__))
self._view_adapter = value
value.attach_recycleview(self)
self.refresh_from_layout()
return True
view_adapter = AliasProperty(_get_view_adapter, _set_view_adapter)
"""
The adapter responsible for providing views that represent items in a data
set.
view_adapter is an :class:`~kivy.properties.AliasProperty` that gets and
sets the current view adapter.
"""
def _get_layout_manager(self):
return self._layout_manager
def _set_layout_manager(self, value):
lm = self._layout_manager
if value is lm:
return
if lm is not None:
self._layout_manager = None
lm.detach_recycleview()
if value is None:
return True
if not isinstance(value, RecycleLayoutManagerBehavior):
raise ValueError(
'Expected object based on RecycleLayoutManagerBehavior, '
'got {}'.format(value.__class__))
self._layout_manager = value
value.attach_recycleview(self)
self.refresh_from_layout()
return True
layout_manager = AliasProperty(
_get_layout_manager, _set_layout_manager)
"""
The Layout manager responsible for positioning views within the
:class:`RecycleView`.
layout_manager is an :class:`~kivy.properties.AliasProperty` that gets
and sets the layout_manger.
"""
class RecycleView(RecycleViewBehavior, ScrollView):
"""
RecycleView is a flexible view for providing a limited window
into a large data set.
See the module documentation for more information.
"""
def __init__(self, **kwargs):
if self.data_model is None:
kwargs.setdefault('data_model', RecycleDataModel())
if self.view_adapter is None:
kwargs.setdefault('view_adapter', RecycleDataAdapter())
super(RecycleView, self).__init__(**kwargs)
fbind = self.fbind
fbind('scroll_x', self.refresh_from_viewport)
fbind('scroll_y', self.refresh_from_viewport)
fbind('size', self.refresh_from_viewport)
self.refresh_from_data()
def _convert_sv_to_lm(self, x, y):
lm = self.layout_manager
tree = [lm]
parent = lm.parent
while parent is not None and parent is not self:
tree.append(parent)
parent = parent.parent
if parent is not self:
raise Exception(
'The layout manager must be a sub child of the recycleview. '
'Could not find {} in the parent tree of {}'.format(self, lm))
for widget in reversed(tree):
x, y = widget.to_local(x, y)
return x, y
def get_viewport(self):
lm = self.layout_manager
lm_w, lm_h = lm.size
w, h = self.size
scroll_y = min(1, max(self.scroll_y, 0))
scroll_x = min(1, max(self.scroll_x, 0))
if lm_h <= h:
bottom = 0
else:
above = (lm_h - h) * scroll_y
bottom = max(0, lm_h - above - h)
bottom = max(0, (lm_h - h) * scroll_y)
left = max(0, (lm_w - w) * scroll_x)
width = min(w, lm_w)
height = min(h, lm_h)
# now convert the sv coordinates into the coordinates of the lm. In
# case there's a relative layout type widget in the parent tree
# between the sv and the lm.
left, bottom = self._convert_sv_to_lm(left, bottom)
return left, bottom, width, height
def save_viewport(self):
pass
def restore_viewport(self):
pass
def add_widget(self, widget, *largs):
super(RecycleView, self).add_widget(widget, *largs)
if (isinstance(widget, RecycleLayoutManagerBehavior) and
not self.layout_manager):
self.layout_manager = widget
def remove_widget(self, widget, *largs):
super(RecycleView, self).remove_widget(widget, *largs)
if self.layout_manager == widget:
self.layout_manager = None
# or easier way to use
def _get_data(self):
d = self.data_model
return d and d.data
def _set_data(self, value):
d = self.data_model
if d is not None:
d.data = value
data = AliasProperty(_get_data, _set_data, bind=["data_model"])
"""
The data used by the current view adapter. This is a list of dicts whose
keys map to the corresponding property names of the
:attr:`~RecycleView.viewclass`.
data is an :class:`~kivy.properties.AliasProperty` that gets and sets the
data used to generate the views.
"""
def _get_viewclass(self):
a = self.layout_manager
return a and a.viewclass
def _set_viewclass(self, value):
a = self.layout_manager
if a:
a.viewclass = value
viewclass = AliasProperty(_get_viewclass, _set_viewclass,
bind=["layout_manager"])
"""
The viewclass used by the current layout_manager.
viewclass is an :class:`~kivy.properties.AliasProperty` that gets and sets
the class used to generate the individual items presented in the view.
"""
def _get_key_viewclass(self):
a = self.layout_manager
return a and a.key_viewclass
def _set_key_viewclass(self, value):
a = self.layout_manager
if a:
a.key_viewclass = value
key_viewclass = AliasProperty(_get_key_viewclass, _set_key_viewclass,
bind=["layout_manager"])
"""
key_viewclass is an :class:`~kivy.properties.AliasProperty` that gets and
sets the key viewclass for the current
:attr:`~kivy.uix.recycleview.layout_manager`.
"""
|
EDUlib/edx-platform | refs/heads/master | cms/djangoapps/contentstore/tests/tests.py | 1 | """
This test file will test registration, login, activation, and session activity timeouts
"""
import datetime
import time
from unittest import mock
from ddt import data, ddt, unpack
from django.conf import settings
from django.core.cache import cache
from django.test.utils import override_settings
from django.urls import reverse
from pytz import UTC
from cms.djangoapps.contentstore.tests.test_course_settings import CourseTestCase
from cms.djangoapps.contentstore.tests.utils import AjaxEnabledTestClient, parse_json, registration, user
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class ContentStoreTestCase(ModuleStoreTestCase):
"""Test class to verify user account operations"""
def _login(self, email, password):
"""
Login. View should always return 200. The success/fail is in the
returned json
"""
resp = self.client.post(
reverse('user_api_login_session'),
{'email': email, 'password': password}
)
return resp
def login(self, email, password):
"""Login, check that it worked."""
resp = self._login(email, password)
self.assertEqual(resp.status_code, 200)
return resp
def _create_account(self, username, email, password):
"""Try to create an account. No error checking"""
registration_url = reverse('user_api_registration')
resp = self.client.post(registration_url, {
'username': username,
'email': email,
'password': password,
'location': 'home',
'language': 'Franglish',
'name': 'Fred Weasley',
'terms_of_service': 'true',
'honor_code': 'true',
})
return resp
def create_account(self, username, email, password):
"""Create the account and check that it worked"""
resp = self._create_account(username, email, password)
self.assertEqual(resp.status_code, 200)
json_data = parse_json(resp)
self.assertEqual(json_data['success'], True)
# Check both that the user is created, and inactive
self.assertFalse(user(email).is_active)
return resp
def _activate_user(self, email):
"""Look up the activation key for the user, then hit the activate view.
No error checking"""
activation_key = registration(email).activation_key
# and now we try to activate
resp = self.client.get(reverse('activate', kwargs={'key': activation_key}))
return resp
def activate_user(self, email):
resp = self._activate_user(email)
self.assertEqual(resp.status_code, 200)
# Now make sure that the user is now actually activated
self.assertTrue(user(email).is_active)
@ddt
class AuthTestCase(ContentStoreTestCase):
"""Check that various permissions-related things work"""
CREATE_USER = False
ENABLED_CACHES = ['default', 'mongo_metadata_inheritance', 'loc_cache']
def setUp(self):
super().setUp()
self.email = 'a@b.com'
self.pw = 'xyz'
self.username = 'testuser'
self.client = AjaxEnabledTestClient()
# clear the cache so ratelimiting won't affect these tests
cache.clear()
def check_page_get(self, url, expected):
resp = self.client.get_html(url)
self.assertEqual(resp.status_code, expected)
return resp
def test_private_pages_auth(self):
"""Make sure pages that do require login work."""
auth_pages = (
'/home/',
)
# These are pages that should just load when the user is logged in
# (no data needed)
simple_auth_pages = (
'/home/',
)
# need an activated user
self.create_account(self.username, self.email, self.pw)
self.activate_user(self.email)
# Create a new session
self.client = AjaxEnabledTestClient()
# Not logged in. Should redirect to login.
print('Not logged in')
for page in auth_pages:
print(f"Checking '{page}'")
self.check_page_get(page, expected=302)
# Logged in should work.
self.login(self.email, self.pw)
print('Logged in')
for page in simple_auth_pages:
print(f"Checking '{page}'")
self.check_page_get(page, expected=200)
@override_settings(SESSION_INACTIVITY_TIMEOUT_IN_SECONDS=1)
def test_inactive_session_timeout(self):
"""
Verify that an inactive session times out and redirects to the
login page
"""
self.create_account(self.username, self.email, self.pw)
self.activate_user(self.email)
self.login(self.email, self.pw)
# make sure we can access courseware immediately
course_url = '/home/'
resp = self.client.get_html(course_url)
self.assertEqual(resp.status_code, 200)
# then wait a bit and see if we get timed out
time.sleep(2)
resp = self.client.get_html(course_url)
# re-request, and we should get a redirect to login page
self.assertRedirects(resp, settings.LOGIN_URL + '?next=/home/', target_status_code=302)
@data(
(True, 'assertContains'),
(False, 'assertNotContains'))
@unpack
def test_signin_and_signup_buttons_index_page(self, allow_account_creation, assertion_method_name):
"""
Navigate to the home page and check the Sign Up button is hidden when ALLOW_PUBLIC_ACCOUNT_CREATION flag
is turned off, and not when it is turned on. The Sign In button should always appear.
"""
with mock.patch.dict(settings.FEATURES, {"ALLOW_PUBLIC_ACCOUNT_CREATION": allow_account_creation}):
response = self.client.get(reverse('homepage'))
assertion_method = getattr(self, assertion_method_name)
assertion_method(
response,
'<a class="action action-signup" href="{}/register?next=http%3A%2F%2Ftestserver%2F">Sign Up</a>'.format
(settings.LMS_ROOT_URL)
)
self.assertContains(
response,
'<a class="action action-signin" href="/signin_redirect_to_lms?next=http%3A%2F%2Ftestserver%2F">'
'Sign In</a>'
)
class ForumTestCase(CourseTestCase):
"""Tests class to verify course to forum operations"""
def setUp(self):
""" Creates the test course. """
super().setUp()
self.course = CourseFactory.create(org='testX', number='727', display_name='Forum Course')
def set_blackout_dates(self, blackout_dates):
"""Helper method to set blackout dates in course."""
self.course.discussion_blackouts = [
[start_date.isoformat(), end_date.isoformat()] for start_date, end_date in blackout_dates
]
def test_blackouts(self):
now = datetime.datetime.now(UTC)
times1 = [
(now - datetime.timedelta(days=14), now - datetime.timedelta(days=11)),
(now + datetime.timedelta(days=24), now + datetime.timedelta(days=30))
]
self.set_blackout_dates(times1)
self.assertTrue(self.course.forum_posts_allowed)
times2 = [
(now - datetime.timedelta(days=14), now + datetime.timedelta(days=2)),
(now + datetime.timedelta(days=24), now + datetime.timedelta(days=30))
]
self.set_blackout_dates(times2)
self.assertFalse(self.course.forum_posts_allowed)
# Single date set for allowed forum posts.
self.course.discussion_blackouts = [
now + datetime.timedelta(days=24),
now + datetime.timedelta(days=30)
]
self.assertTrue(self.course.forum_posts_allowed)
# Single date set for restricted forum posts.
self.course.discussion_blackouts = [
now - datetime.timedelta(days=24),
now + datetime.timedelta(days=30)
]
self.assertFalse(self.course.forum_posts_allowed)
# test if user gives empty blackout date it should return true for forum_posts_allowed
self.course.discussion_blackouts = [[]]
self.assertTrue(self.course.forum_posts_allowed)
@ddt
class CourseKeyVerificationTestCase(CourseTestCase):
"""Test class to verify course decorator operations"""
def setUp(self):
"""
Create test course.
"""
super().setUp()
self.course = CourseFactory.create(org='edX', number='test_course_key', display_name='Test Course')
@data(('edX/test_course_key/Test_Course', 200), ('garbage:edX+test_course_key+Test_Course', 404))
@unpack
def test_course_key_decorator(self, course_key, status_code):
"""
Tests for the ensure_valid_course_key decorator.
"""
url = f'/import/{course_key}'
resp = self.client.get_html(url)
self.assertEqual(resp.status_code, status_code)
url = '/import_status/{course_key}/{filename}'.format(
course_key=course_key,
filename='xyz.tar.gz'
)
resp = self.client.get_html(url)
self.assertEqual(resp.status_code, status_code)
|
theo-l/django | refs/heads/master | tests/migrations/test_migrations_squashed_ref_squashed/app1/4_auto.py | 133 | from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("app1", "2_squashed_3")]
|
felliott/waterbutler | refs/heads/develop | waterbutler/providers/gitlab/settings.py | 5 | from waterbutler import settings
config = settings.child('GITLAB_PROVIDER_CONFIG')
|
bop/bauhaus | refs/heads/master | lib/python2.7/site-packages/tagging/admin.py | 20 | from django.contrib import admin
from tagging.models import Tag, TaggedItem
from tagging.forms import TagAdminForm
class TagAdmin(admin.ModelAdmin):
form = TagAdminForm
admin.site.register(TaggedItem)
admin.site.register(Tag, TagAdmin)
|
jank3/django | refs/heads/master | django/db/migrations/questioner.py | 85 | from __future__ import print_function, unicode_literals
import importlib
import os
import sys
from django.apps import apps
from django.db.models.fields import NOT_PROVIDED
from django.utils import datetime_safe, six, timezone
from django.utils.six.moves import input
from .loader import MigrationLoader
class MigrationQuestioner(object):
"""
Gives the autodetector responses to questions it might have.
This base class has a built-in noninteractive mode, but the
interactive subclass is what the command-line arguments will use.
"""
def __init__(self, defaults=None, specified_apps=None, dry_run=None):
self.defaults = defaults or {}
self.specified_apps = specified_apps or set()
self.dry_run = dry_run
def ask_initial(self, app_label):
"Should we create an initial migration for the app?"
# If it was specified on the command line, definitely true
if app_label in self.specified_apps:
return True
# Otherwise, we look to see if it has a migrations module
# without any Python files in it, apart from __init__.py.
# Apps from the new app template will have these; the python
# file check will ensure we skip South ones.
try:
app_config = apps.get_app_config(app_label)
except LookupError: # It's a fake app.
return self.defaults.get("ask_initial", False)
migrations_import_path = MigrationLoader.migrations_module(app_config.label)
try:
migrations_module = importlib.import_module(migrations_import_path)
except ImportError:
return self.defaults.get("ask_initial", False)
else:
if hasattr(migrations_module, "__file__"):
filenames = os.listdir(os.path.dirname(migrations_module.__file__))
elif hasattr(migrations_module, "__path__"):
if len(migrations_module.__path__) > 1:
return False
filenames = os.listdir(list(migrations_module.__path__)[0])
return not any(x.endswith(".py") for x in filenames if x != "__init__.py")
def ask_not_null_addition(self, field_name, model_name):
"Adding a NOT NULL field to a model"
# None means quit
return None
def ask_not_null_alteration(self, field_name, model_name):
"Changing a NULL field to NOT NULL"
# None means quit
return None
def ask_rename(self, model_name, old_name, new_name, field_instance):
"Was this field really renamed?"
return self.defaults.get("ask_rename", False)
def ask_rename_model(self, old_model_state, new_model_state):
"Was this model really renamed?"
return self.defaults.get("ask_rename_model", False)
def ask_merge(self, app_label):
"Do you really want to merge these migrations?"
return self.defaults.get("ask_merge", False)
class InteractiveMigrationQuestioner(MigrationQuestioner):
def _boolean_input(self, question, default=None):
result = input("%s " % question)
if not result and default is not None:
return default
while len(result) < 1 or result[0].lower() not in "yn":
result = input("Please answer yes or no: ")
return result[0].lower() == "y"
def _choice_input(self, question, choices):
print(question)
for i, choice in enumerate(choices):
print(" %s) %s" % (i + 1, choice))
result = input("Select an option: ")
while True:
try:
value = int(result)
if 0 < value <= len(choices):
return value
except ValueError:
pass
result = input("Please select a valid option: ")
def _ask_default(self):
print("Please enter the default value now, as valid Python")
print("The datetime and django.utils.timezone modules are available, so you can do e.g. timezone.now()")
while True:
if six.PY3:
# Six does not correctly abstract over the fact that
# py3 input returns a unicode string, while py2 raw_input
# returns a bytestring.
code = input(">>> ")
else:
code = input(">>> ").decode(sys.stdin.encoding)
if not code:
print("Please enter some code, or 'exit' (with no quotes) to exit.")
elif code == "exit":
sys.exit(1)
else:
try:
return eval(code, {}, {"datetime": datetime_safe, "timezone": timezone})
except (SyntaxError, NameError) as e:
print("Invalid input: %s" % e)
def ask_not_null_addition(self, field_name, model_name):
"Adding a NOT NULL field to a model"
if not self.dry_run:
choice = self._choice_input(
"You are trying to add a non-nullable field '%s' to %s without a default; "
"we can't do that (the database needs something to populate existing rows).\n"
"Please select a fix:" % (field_name, model_name),
[
"Provide a one-off default now (will be set on all existing rows)",
"Quit, and let me add a default in models.py",
]
)
if choice == 2:
sys.exit(3)
else:
return self._ask_default()
return None
def ask_not_null_alteration(self, field_name, model_name):
"Changing a NULL field to NOT NULL"
if not self.dry_run:
choice = self._choice_input(
"You are trying to change the nullable field '%s' on %s to non-nullable "
"without a default; we can't do that (the database needs something to "
"populate existing rows).\n"
"Please select a fix:" % (field_name, model_name),
[
"Provide a one-off default now (will be set on all existing rows)",
("Ignore for now, and let me handle existing rows with NULL myself "
"(e.g. adding a RunPython or RunSQL operation in the new migration "
"file before the AlterField operation)"),
"Quit, and let me add a default in models.py",
]
)
if choice == 2:
return NOT_PROVIDED
elif choice == 3:
sys.exit(3)
else:
return self._ask_default()
return None
def ask_rename(self, model_name, old_name, new_name, field_instance):
"Was this field really renamed?"
msg = "Did you rename %s.%s to %s.%s (a %s)? [y/N]"
return self._boolean_input(msg % (model_name, old_name, model_name, new_name,
field_instance.__class__.__name__), False)
def ask_rename_model(self, old_model_state, new_model_state):
"Was this model really renamed?"
msg = "Did you rename the %s.%s model to %s? [y/N]"
return self._boolean_input(msg % (old_model_state.app_label, old_model_state.name,
new_model_state.name), False)
def ask_merge(self, app_label):
return self._boolean_input(
"\nMerging will only work if the operations printed above do not conflict\n" +
"with each other (working on different fields or models)\n" +
"Do you want to merge these migration branches? [y/N]",
False,
)
class NonInteractiveMigrationQuestioner(MigrationQuestioner):
def ask_not_null_addition(self, field_name, model_name):
# We can't ask the user, so act like the user aborted.
sys.exit(3)
def ask_not_null_alteration(self, field_name, model_name):
# We can't ask the user, so set as not provided.
return NOT_PROVIDED
|
epaglier/Project-JARVIS | refs/heads/master | mycroft-core/mycroft/client/speech/__init__.py | 65 | # Copyright 2016 Mycroft AI, Inc.
#
# This file is part of Mycroft Core.
#
# Mycroft Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mycroft Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
__author__ = 'seanfitz'
|
viewfinderco/viewfinder | refs/heads/master | backend/db/db_client.py | 13 | # Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""Interface for client access to datastore backends.
Implemented via the DynamoDB client (dynamodb_client) and the local datastore
emulation client (local_client).
Each client operation takes a callback for asynchronous operation.
Client operations:
- GetItem: retrieve a database item by key (can be composite key)
- BatchGetItem: retrieve a batch of database items by key
- PutItem: store a database item
- DeleteItem: deletes a database item
- UpdateItem: update attributes of a database item
- Query: queries database item(s)
"""
__author__ = 'spencer@emailscrubbed.com (Spencer Kimball)'
from collections import namedtuple
from tornado import ioloop, options
options.define('localdb', default=False, help='use local datastore emulation')
options.define('localdb_dir', default='./local/db',
help='directory in which to store database persistence files')
options.define('localdb_sync_secs', default=1.0,
help='seconds between successive syncs to disk')
options.define('localdb_version', default=0,
help='specify a version other than 0 to use as the current on startup; '
'the current version ".0" is still moved to ".1" as normal')
options.define('localdb_num_versions', default=20,
help='number of previous versions of the database to maintain')
options.define('localdb_reset', default=False,
help='reset all existing database files')
options.define('readonly_db', default=False, help='Read-only database')
# Operation information, including operation id and priority, 'op_id'
# == 0 means the request is not attached to an operation but is being
# made extemporaneously.
DBOp = namedtuple('DBOp', ['op_id', 'priority'])
# Named tuple for database keys. Composite keys define both the hash
# key and the range key. Objects which have only a hash key leave the
# range key as None.
DBKey = namedtuple('DBKey', ['hash_key', 'range_key'])
DBKeySchema = namedtuple('DBKeySchema', ['name', 'value_type'])
# Named tuple of calls to Client.UpdateItem. Action must be one of
# 'PUT', 'ADD', 'DELETE'.
UpdateAttr = namedtuple('UpdateAttr', ['value', 'action'])
# Named tuple for range key queries. 'key' is a list of length 1 if
# 'op' is one of (EQ|LE|LT|GE|GT|BEGINS_WITH), --or-- 'key' is a list
# of length 2 ([start, end]), if 'op' is BETWEEN.
RangeOperator = namedtuple('RangeOperator', ['key', 'op'])
# Named tuple for scan filter. The comments for RangeOperator apply
# here as well, though local_db supports only a subset of the actual
# scan filter functionality. 'value' is analagous here to 'key' in
# RangeOperator. It is a list of either one or more than one values
# depending on the value of 'op'.
ScanFilter = namedtuple('ScanFilter', ['value', 'op'])
# Description of a table.
TableSchema = namedtuple('TableSchema', ['create_time', 'hash_key_schema',
'range_key_schema', 'read_units',
'write_units', 'status'])
# Table metadata results.
ListTablesResult = namedtuple('ListTables', ['tables'])
CreateTableResult = namedtuple('CreateTable', ['schema'])
DescribeTableResult = namedtuple('DescribeTable', ['schema', 'count', 'size_bytes'])
DeleteTableResult = namedtuple('DeleteTable', ['schema'])
# Named tuples for results of datastore operations.
GetResult = namedtuple('GetResult', ['attributes', 'read_units'])
PutResult = namedtuple('PutResult', ['return_values', 'write_units'])
DeleteResult = namedtuple('DeleteResult', ['return_values', 'write_units'])
UpdateResult = namedtuple('UpdateResult', ['return_values', 'write_units'])
QueryResult = namedtuple('QueryResult', ['count', 'items', 'last_key', 'read_units'])
ScanResult = namedtuple('ScanResult', ['count', 'items', 'last_key', 'read_units'])
# Batch tuples (batch operations use dictionary that maps from table name => tuple).
BatchGetRequest = namedtuple('BatchGetRequest', ['keys', 'attributes', 'consistent_read'])
BatchGetResult = namedtuple('BatchGetResult', ['items', 'read_units'])
class DBClient(object):
"""Interface for asynchronous access to backend datastore.
"""
def Shutdown(self):
"""Cleanup on process exit."""
raise NotImplementedError()
def ListTables(self, callback):
"""Lists the set of tables."""
raise NotImplementedError()
def CreateTable(self, table, hash_key_schema, range_key_schema,
read_units, write_units, callback):
"""Create a table with specified name, key schema and provisioned
throughput settings.
"""
raise NotImplementedError()
def DeleteTable(self, table, callback):
"""Create a table with specified name, key schema and provisioned
throughput settings.
"""
raise NotImplementedError()
def DescribeTable(self, table, callback):
"""Describes the named table."""
raise NotImplementedError()
def GetItem(self, table, key, callback, attributes, must_exist=True,
consistent_read=False):
"""Gets the specified attribute values by key. 'must_exist'
specifies whether to throw an exception if the item is not found.
If False, None is returned if not found. 'consistent_read'
designates whether to fetch an authoritative value for the item.
"""
raise NotImplementedError()
def BatchGetItem(self, batch_dict, callback, must_exist=True):
"""Gets a batch of items from the database. Items to get are described in 'batch_dict',
which has the following format:
{'table-name-0': BatchGetRequest(keys=<list of db-keys from the table>,
attributes=[attr-0, attr-1, ...],
consistent_read=<bool>),
'table-name-1': ...}
Returns results in the following format:
{'table-name-0': BatchGetResult(items={'attr-0': value-0, 'attr-1': value-1, ...},
read_units=3.0),
'table-name-1': ...}
If 'must_exist' is true, then raises an error if a db-key is not found in the table.
Otherwise, returns None in corresponding positions in the 'items' array.
"""
raise NotImplementedError()
def PutItem(self, table, key, callback, attributes, expected=None,
return_values=None):
"""Sets the specified item attributes by key. 'attributes' is a
dict {attr: value}. If 'expected' is not None, requires that the
values specified in the expected dict {attr: value} match before
mutation. 'return_values', if not None, must be one of (NONE,
ALL_OLD); if ALL_OLD, the previous values for the named attributes
are returned as an attribute dict.
"""
raise NotImplementedError()
def DeleteItem(self, table, key, callback, expected=None,
return_values=None):
"""Deletes the specified item by key. 'expected' and
'return_values' are identical to PutItem().
"""
raise NotImplementedError()
def UpdateItem(self, table, key, callback, attributes, expected=None,
return_values=None):
"""Updates the specified item attributes by key. 'attributes' is a
dict {attr: AttrUpdate} (see AttrUpdate named tuple above).
'expected' and 'return_values' are the same as for PutItem(),
except that 'return_values' may contain any of (NONE, ALL_OLD,
UPDATED_OLD, ALL_NEW, UPDATED_NEW).
"""
raise NotImplementedError()
def Query(self, table, hash_key, range_operator, callback, attributes,
limit=None, consistent_read=False, count=False,
scan_forward=True, excl_start_key=None):
"""Queries a range of values by 'hash_key' and 'range_operator'.
'range_operator' is of type RangeOperator (see named tuple above;
if None, selects all values). 'attributes' is a list of
attributes to query, limit is an upper limit on the number of
results. If True, 'count' will return just a count of items, but
no actual data. 'scan_forward', if False, causes a reverse scan
according to the range operator. If not None, 'excl_start_key'
allows the query operation to start partway through the
range. 'excl_start_key' specifies just the range key.
"""
raise NotImplementedError()
def Scan(self, table, callback, attributes, limit=None,
excl_start_key=None, scan_filter=None):
"""Scans the table starting at 'excl_start_key' (if provided) and
reading the next 'limit' rows, reading the specified 'attributes'.
If 'scan_filter' is specified, it is applied to each scanned item
to pre-filter returned results. 'scan_filter' is a map from
attribute name to ScanFilter tuple.
"""
raise NotImplementedError()
def AddTimeout(self, deadline_secs, callback):
"""Invokes the specified callback after 'deadline_secs'. Returns a
handle which can be suppled to RemoveTimeout to disable the
timeout.
"""
raise NotImplementedError()
def AddAbsoluteTimeout(self, abs_timeout, callback):
"""Invokes the specified callback at wall time
'abs_timeout'. Returns a handle which can be supplied to
RemoveTimeout to disable the timeout."""
raise NotImplementedError()
def RemoveTimeout(self, timeout):
"""Removes a timeout added via AddTimeout or AddAbsoluteTimeout."""
raise NotImplementedError()
@staticmethod
def Instance():
assert hasattr(DBClient, "_instance"), 'instance not initialized'
return DBClient._instance
@staticmethod
def SetInstance(client):
"""Sets a new instance for testing."""
DBClient._instance = client
def InitDB(schema=None, callback=None, verify_or_create=True):
"""Sets the db client instance.
Initialize the local datastore if --localdb was specified.
Callback is invoked with the verified table schemas if
'verify_or_create' is True; None otherwise.
"""
assert not hasattr(DBClient, "_instance"), 'instance already initialized'
assert schema is not None
if options.options.localdb:
from local_client import LocalClient
DBClient.SetInstance(LocalClient(schema, read_only=options.options.readonly_db))
else:
from dynamodb_client import DynamoDBClient
DBClient._instance = DynamoDBClient(schema, read_only=options.options.readonly_db)
if verify_or_create:
schema.VerifyOrCreate(DBClient.Instance(), callback)
else:
callback([])
def ShutdownDB():
"""Shuts down the currently running instance."""
if hasattr(DBClient, "_instance"):
DBClient.Instance().Shutdown()
|
WritingTechForJarrod/vision | refs/heads/master | src/vision/marker_detector.py | 1 | from __future__ import print_function, division, absolute_import, unicode_literals
import cv2
import logging
import numpy as np
import time
class ContrastFilter(object):
def __init__(self):
logging.getLogger('ContrastFilter').debug('AYUPP')
def within_range(px0,px1,r):
if abs(px0[0]-px1[0]) <= r:
if abs(px0[1]-px1[1]) <= r:
return True
return False
def belongs_to(px,group,r):
for gpx in group:
if within_range(px,gpx,r): return True
return False
def contour_area(contour):
return cv2.moments(contour)['m00']
def contour_position(contour):
try:
M = cv2.moments(contour)
if M['m00'] > 0:
return (int(M['m10']/M['m00']),int(M['m01']/M['m00']))
except TypeError:
return None
return None
class ContourWrapper(object):
def __init__(self,contour):
self._history = []
self._range = 10
self.updated = False
self.update(contour)
def is_same_contour(self,contour):
this_pos = self.last_position()
other_pos = contour_position(contour)
if this_pos is not None and other_pos is not None:
return within_range(this_pos,other_pos,self._range)
def last_position(self):
if len(self._history) > 0:
return self._history[-1][0]
else:
return None
def last_area(self):
if len(self._history) > 0:
return self._history[-1][1]
else:
return None
def update(self,contour):
pos = contour_position(contour)
area = contour_area(contour)
if pos is not None:
self._history.append([pos,area])
self.updated = True
if __name__ == '__main__':
mainlog = logging.getLogger('main')
logging.basicConfig(level=logging.DEBUG)
screen_w = 640
screen_h = 480
mainlog.debug('Opening window...')
window_name = 'main'
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
cv2.resizeWindow(window_name, screen_w, screen_h)
camera = cv2.VideoCapture(0)
def get_image():
retval, im = camera.read()
return im
user_in = ''
mainlog.debug('Press x to quit')
last_result = None
last_contours = []
t0 = time.clock()
shape = get_image().shape
result = np.empty(shape,np.uint8)
final = np.empty(shape,np.uint8)
final.fill(0)
datetime_start = time.strftime('%Y%m%d%H%M%S')
with open('../../gen/'+datetime_start+'.vcr','w') as f:
while user_in != ord('x'):
global last_result
img = cv2.flip(get_image(),1)
#img = cv2.medianBlur(img,5)
#img = cv2.imread('../../img/dot_rg_low_light.png')
if len(shape) < 3:
raise ValueError
result = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
result[:,:,2] = cv2.inRange(result[:,:,2],63,255)
#result[:,:,2] = cv2.inRange(result[:,:,1],64,255)
result = cv2.cvtColor(result,cv2.COLOR_HSV2BGR)
result = result[:,:,1]//2 - result[:,:,2]//2 + 127
result = cv2.pyrDown(result)
#result = cv2.pyrDown(result)
k_array = [[-3,-1,-1,-1,-3],
[-1, 1, 3, 1,-1],
[-1, 3, 6, 3,-1],
[-1, 1, 3, 1,-1],
[-3,-1,-1,-1,-3]]
kernel = np.array(k_array,np.float32)
final = cv2.filter2D(result,-1,kernel)
#final = cv2.medianBlur(final,3)
final = cv2.inRange(final,63,255)
if 1:
# TODO make this less ugly-like. maybe just store default cv2 types and operate on them?
ret,contours,hierarchy = cv2.findContours(final,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE)
new_contours = [ContourWrapper(contour) for contour in contours]
new_contours[:] = [c for c in new_contours if c.last_area() > 1.0]
t = time.clock() - t0
f.write('t '+str(t)+'\n')
cv2.imwrite('../../img/captured/'+datetime_start+'_'+str(t)+'_.png',img)
for contour in new_contours:
area = contour.last_area()
pos = contour.last_position()
if area is not None and pos is not None:
f.write(str(area)+' '+str(pos[0])+' '+str(pos[1])+'\n')
if 1:
for last_contour in last_contours:
last_contour.updated = False
for new_contour in contours:
if last_contour.is_same_contour(new_contour):
last_contour.update(new_contour)
break
if len(last_contours) > 1:
circle_r = 7
circle_t = 2
circle_c = np.array([255,0,0])
for contour in last_contours:
if contour.updated:
x,y = contour.last_position()
cv2.circle(img,(x,y),circle_r,circle_c,circle_t)
cv2.circle(result,(x,y),circle_r,circle_c,circle_t)
cv2.circle(final,(x,y),circle_r,circle_c,circle_t)
last_contours = new_contours
cv2.imshow(window_name,img)
last_result = final
user_in = cv2.waitKey(1)
cv2.destroyAllWindows()
|
jreiher2003/google_app_engine_deployment | refs/heads/master | lib/flask/exthook.py | 783 | # -*- coding: utf-8 -*-
"""
flask.exthook
~~~~~~~~~~~~~
Redirect imports for extensions. This module basically makes it possible
for us to transition from flaskext.foo to flask_foo without having to
force all extensions to upgrade at the same time.
When a user does ``from flask.ext.foo import bar`` it will attempt to
import ``from flask_foo import bar`` first and when that fails it will
try to import ``from flaskext.foo import bar``.
We're switching from namespace packages because it was just too painful for
everybody involved.
This is used by `flask.ext`.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
import os
from ._compat import reraise
class ExtensionImporter(object):
"""This importer redirects imports from this submodule to other locations.
This makes it possible to transition from the old flaskext.name to the
newer flask_name without people having a hard time.
"""
def __init__(self, module_choices, wrapper_module):
self.module_choices = module_choices
self.wrapper_module = wrapper_module
self.prefix = wrapper_module + '.'
self.prefix_cutoff = wrapper_module.count('.') + 1
def __eq__(self, other):
return self.__class__.__module__ == other.__class__.__module__ and \
self.__class__.__name__ == other.__class__.__name__ and \
self.wrapper_module == other.wrapper_module and \
self.module_choices == other.module_choices
def __ne__(self, other):
return not self.__eq__(other)
def install(self):
sys.meta_path[:] = [x for x in sys.meta_path if self != x] + [self]
def find_module(self, fullname, path=None):
if fullname.startswith(self.prefix):
return self
def load_module(self, fullname):
if fullname in sys.modules:
return sys.modules[fullname]
modname = fullname.split('.', self.prefix_cutoff)[self.prefix_cutoff]
for path in self.module_choices:
realname = path % modname
try:
__import__(realname)
except ImportError:
exc_type, exc_value, tb = sys.exc_info()
# since we only establish the entry in sys.modules at the
# very this seems to be redundant, but if recursive imports
# happen we will call into the move import a second time.
# On the second invocation we still don't have an entry for
# fullname in sys.modules, but we will end up with the same
# fake module name and that import will succeed since this
# one already has a temporary entry in the modules dict.
# Since this one "succeeded" temporarily that second
# invocation now will have created a fullname entry in
# sys.modules which we have to kill.
sys.modules.pop(fullname, None)
# If it's an important traceback we reraise it, otherwise
# we swallow it and try the next choice. The skipped frame
# is the one from __import__ above which we don't care about
if self.is_important_traceback(realname, tb):
reraise(exc_type, exc_value, tb.tb_next)
continue
module = sys.modules[fullname] = sys.modules[realname]
if '.' not in modname:
setattr(sys.modules[self.wrapper_module], modname, module)
return module
raise ImportError('No module named %s' % fullname)
def is_important_traceback(self, important_module, tb):
"""Walks a traceback's frames and checks if any of the frames
originated in the given important module. If that is the case then we
were able to import the module itself but apparently something went
wrong when the module was imported. (Eg: import of an import failed).
"""
while tb is not None:
if self.is_important_frame(important_module, tb):
return True
tb = tb.tb_next
return False
def is_important_frame(self, important_module, tb):
"""Checks a single frame if it's important."""
g = tb.tb_frame.f_globals
if '__name__' not in g:
return False
module_name = g['__name__']
# Python 2.7 Behavior. Modules are cleaned up late so the
# name shows up properly here. Success!
if module_name == important_module:
return True
# Some python versions will will clean up modules so early that the
# module name at that point is no longer set. Try guessing from
# the filename then.
filename = os.path.abspath(tb.tb_frame.f_code.co_filename)
test_string = os.path.sep + important_module.replace('.', os.path.sep)
return test_string + '.py' in filename or \
test_string + os.path.sep + '__init__.py' in filename
|
SivilTaram/edx-platform | refs/heads/master | common/djangoapps/track/utils.py | 230 | """Utility functions and classes for track backends"""
from datetime import datetime, date
import json
from pytz import UTC
class DateTimeJSONEncoder(json.JSONEncoder):
"""JSON encoder aware of datetime.datetime and datetime.date objects"""
def default(self, obj): # pylint: disable=method-hidden
"""
Serialize datetime and date objects of iso format.
datatime objects are converted to UTC.
"""
if isinstance(obj, datetime):
if obj.tzinfo is None:
# Localize to UTC naive datetime objects
obj = UTC.localize(obj)
else:
# Convert to UTC datetime objects from other timezones
obj = obj.astimezone(UTC)
return obj.isoformat()
elif isinstance(obj, date):
return obj.isoformat()
return super(DateTimeJSONEncoder, self).default(obj)
|
ccpgames/eve-metrics | refs/heads/master | web2py/scripts/extract_mysql_models.py | 1 | '''
Create the web2py code needed to access your mysql legacy db.
To make this work all the legacy tables you want to access need to have an "id" field.
This plugin needs:
mysql
mysqldump
installed and globally available.
Under Windows you will probably need to add the mysql executable directory to the PATH variable,
you will also need to modify mysql to mysql.exe and mysqldump to mysqldump.exe below.
Just guessing here :)
Access your tables with:
legacy_db(legacy_db.mytable.id>0).select()
If the script crashes this is might be due to that fact that the data_type_map dictionary below is incomplete.
Please complete it, improve it and continue.
Created by Falko Krause, minor modifications by Massimo Di Pierro and Ron McOuat
'''
import subprocess
import re
import sys
data_type_map = dict(
varchar='string',
int='integer',
integer='integer',
tinyint='integer',
smallint='integer',
mediumint='integer',
bigint='integer',
float='double',
double='double',
char='string',
decimal='integer',
date='date',
#year = 'date',
time='time',
timestamp='datetime',
datetime='datetime',
binary='blob',
blob='blob',
tinyblob='blob',
mediumblob='blob',
longblob='blob',
text='text',
tinytext='text',
mediumtext='text',
longtext='text',
)
def mysql(database_name, username, password):
p = subprocess.Popen(['mysql',
'--user=%s' % username,
'--password=%s' % password,
'--execute=show tables;',
database_name],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
sql_showtables, stderr = p.communicate()
tables = [re.sub(
'\|\s+([^\|*])\s+.*', '\1', x) for x in sql_showtables.split()[1:]]
connection_string = "legacy_db = DAL('mysql://%s:%s@localhost/%s')" % (
username, password, database_name)
legacy_db_table_web2py_code = []
for table_name in tables:
#get the sql create statement
p = subprocess.Popen(['mysqldump',
'--user=%s' % username,
'--password=%s' % password,
'--skip-add-drop-table',
'--no-data', database_name,
table_name], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
sql_create_stmnt, stderr = p.communicate()
if 'CREATE' in sql_create_stmnt: # check if the table exists
#remove garbage lines from sql statement
sql_lines = sql_create_stmnt.split('\n')
sql_lines = [x for x in sql_lines if not(
x.startswith('--') or x.startswith('/*') or x == '')]
#generate the web2py code from the create statement
web2py_table_code = ''
table_name = re.search(
'CREATE TABLE .(\S+). \(', sql_lines[0]).group(1)
fields = []
for line in sql_lines[1:-1]:
if re.search('KEY', line) or re.search('PRIMARY', line) or re.search(' ID', line) or line.startswith(')'):
continue
hit = re.search('(\S+)\s+(\S+)(,| )( .*)?', line)
if hit is not None:
name, d_type = hit.group(1), hit.group(2)
d_type = re.sub(r'(\w+)\(.*', r'\1', d_type)
name = re.sub('`', '', name)
web2py_table_code += "\n Field('%s','%s')," % (
name, data_type_map[d_type])
web2py_table_code = "legacy_db.define_table('%s',%s\n migrate=False)" % (table_name, web2py_table_code)
legacy_db_table_web2py_code.append(web2py_table_code)
#----------------------------------------
#write the legacy db to file
legacy_db_web2py_code = connection_string + "\n\n"
legacy_db_web2py_code += "\n\n#--------\n".join(
legacy_db_table_web2py_code)
return legacy_db_web2py_code
regex = re.compile('(.*?):(.*?)@(.*)')
if len(sys.argv) < 2 or not regex.match(sys.argv[1]):
print 'USAGE:\n\n extract_mysql_models.py username:password@data_basename\n\n'
else:
m = regex.match(sys.argv[1])
print mysql(m.group(3), m.group(1), m.group(2))
|
svaksha/epiMM | refs/heads/master | api/plotli.py | 1 | #!/usr/bin/env python
# encoding: utf-8
from __future__ import (absolute_import, division,print_function, unicode_literals)
################################################################################
"""
COPYRIGHT(C) 2013-Now SVAKSHA :: https://github.com/svaksha
LICENSE: AGPLv3 License <http://www.gnu.org/licenses/agpl.html>.
# All copies must retain this permission notice with the copyright notice.
"""
################################################################################
# Created:: Thu Nov 21 11:15:41 2013
## PROGRAM USECASE: Graphing and plotting Epidemiology data with the plotly API.
##==============================================================================
__author__ = 'SVAKSHA'
__copyright__ = 'Copyright (c) 2013-Now, SVAKSHA'
__license__ = 'AGPLv3'
__version__ = "14.02.dev"
# IMPORTS
##------------------------------------------------------------------------------
import plotly as pot
import numpy as np
import os
import sys
from IPython.display import HTML
py = pot.plotly(username='svaksha', key='kxlzomgu1g')
def __init__():
plotly_key = pot.pot(UN, KEY)
def plotlyUser():
username='svaksha'
email='svaksha@gmail.com'
response = pot.signup(username, email)
api_key = response['api_key']
tmp_pw = response['tmp_pw']
print("api_key:", api_key)
print("tmp_pw:", tmp_pw)
if __name__ == '__main__':
# build paths to import modules via pathmap.py
print('testing')
import pathmap
|
Smarsh/django | refs/heads/master | django/contrib/syndication/views.py | 20 | import datetime
from django.conf import settings
from django.contrib.sites.models import Site, RequestSite
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from django.http import HttpResponse, Http404
from django.template import loader, Template, TemplateDoesNotExist, RequestContext
from django.utils import feedgenerator, tzinfo
from django.utils.encoding import force_unicode, iri_to_uri, smart_unicode
from django.utils.html import escape
def add_domain(domain, url):
if not (url.startswith('http://')
or url.startswith('https://')
or url.startswith('mailto:')):
# 'url' must already be ASCII and URL-quoted, so no need for encoding
# conversions here.
url = iri_to_uri(u'http://%s%s' % (domain, url))
return url
class FeedDoesNotExist(ObjectDoesNotExist):
pass
class Feed(object):
feed_type = feedgenerator.DefaultFeed
title_template = None
description_template = None
def __call__(self, request, *args, **kwargs):
try:
obj = self.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
raise Http404('Feed object does not exist.')
feedgen = self.get_feed(obj, request)
response = HttpResponse(mimetype=feedgen.mime_type)
feedgen.write(response, 'utf-8')
return response
def item_title(self, item):
# Titles should be double escaped by default (see #6533)
return escape(force_unicode(item))
def item_description(self, item):
return force_unicode(item)
def item_link(self, item):
try:
return item.get_absolute_url()
except AttributeError:
raise ImproperlyConfigured('Give your %s class a get_absolute_url() method, or define an item_link() method in your Feed class.' % item.__class__.__name__)
def __get_dynamic_attr(self, attname, obj, default=None):
try:
attr = getattr(self, attname)
except AttributeError:
return default
if callable(attr):
# Check func_code.co_argcount rather than try/excepting the
# function and catching the TypeError, because something inside
# the function may raise the TypeError. This technique is more
# accurate.
if hasattr(attr, 'func_code'):
argcount = attr.func_code.co_argcount
else:
argcount = attr.__call__.func_code.co_argcount
if argcount == 2: # one argument is 'self'
return attr(obj)
else:
return attr()
return attr
def feed_extra_kwargs(self, obj):
"""
Returns an extra keyword arguments dictionary that is used when
initializing the feed generator.
"""
return {}
def item_extra_kwargs(self, item):
"""
Returns an extra keyword arguments dictionary that is used with
the `add_item` call of the feed generator.
"""
return {}
def get_object(self, request, *args, **kwargs):
return None
def get_feed(self, obj, request):
"""
Returns a feedgenerator.DefaultFeed object, fully populated, for
this feed. Raises FeedDoesNotExist for invalid parameters.
"""
if Site._meta.installed:
current_site = Site.objects.get_current()
else:
current_site = RequestSite(request)
link = self.__get_dynamic_attr('link', obj)
link = add_domain(current_site.domain, link)
feed = self.feed_type(
title = self.__get_dynamic_attr('title', obj),
subtitle = self.__get_dynamic_attr('subtitle', obj),
link = link,
description = self.__get_dynamic_attr('description', obj),
language = settings.LANGUAGE_CODE.decode(),
feed_url = add_domain(current_site.domain,
self.__get_dynamic_attr('feed_url', obj) or request.path),
author_name = self.__get_dynamic_attr('author_name', obj),
author_link = self.__get_dynamic_attr('author_link', obj),
author_email = self.__get_dynamic_attr('author_email', obj),
categories = self.__get_dynamic_attr('categories', obj),
feed_copyright = self.__get_dynamic_attr('feed_copyright', obj),
feed_guid = self.__get_dynamic_attr('feed_guid', obj),
ttl = self.__get_dynamic_attr('ttl', obj),
**self.feed_extra_kwargs(obj)
)
title_tmp = None
if self.title_template is not None:
try:
title_tmp = loader.get_template(self.title_template)
except TemplateDoesNotExist:
pass
description_tmp = None
if self.description_template is not None:
try:
description_tmp = loader.get_template(self.description_template)
except TemplateDoesNotExist:
pass
for item in self.__get_dynamic_attr('items', obj):
if title_tmp is not None:
title = title_tmp.render(RequestContext(request, {'obj': item, 'site': current_site}))
else:
title = self.__get_dynamic_attr('item_title', item)
if description_tmp is not None:
description = description_tmp.render(RequestContext(request, {'obj': item, 'site': current_site}))
else:
description = self.__get_dynamic_attr('item_description', item)
link = add_domain(current_site.domain, self.__get_dynamic_attr('item_link', item))
enc = None
enc_url = self.__get_dynamic_attr('item_enclosure_url', item)
if enc_url:
enc = feedgenerator.Enclosure(
url = smart_unicode(enc_url),
length = smart_unicode(self.__get_dynamic_attr('item_enclosure_length', item)),
mime_type = smart_unicode(self.__get_dynamic_attr('item_enclosure_mime_type', item))
)
author_name = self.__get_dynamic_attr('item_author_name', item)
if author_name is not None:
author_email = self.__get_dynamic_attr('item_author_email', item)
author_link = self.__get_dynamic_attr('item_author_link', item)
else:
author_email = author_link = None
pubdate = self.__get_dynamic_attr('item_pubdate', item)
if pubdate and not pubdate.tzinfo:
ltz = tzinfo.LocalTimezone(pubdate)
pubdate = pubdate.replace(tzinfo=ltz)
feed.add_item(
title = title,
link = link,
description = description,
unique_id = self.__get_dynamic_attr('item_guid', item, link),
enclosure = enc,
pubdate = pubdate,
author_name = author_name,
author_email = author_email,
author_link = author_link,
categories = self.__get_dynamic_attr('item_categories', item),
item_copyright = self.__get_dynamic_attr('item_copyright', item),
**self.item_extra_kwargs(item)
)
return feed
def feed(request, url, feed_dict=None):
"""Provided for backwards compatibility."""
import warnings
warnings.warn('The syndication feed() view is deprecated. Please use the '
'new class based view API.',
category=PendingDeprecationWarning)
if not feed_dict:
raise Http404("No feeds are registered.")
try:
slug, param = url.split('/', 1)
except ValueError:
slug, param = url, ''
try:
f = feed_dict[slug]
except KeyError:
raise Http404("Slug %r isn't registered." % slug)
try:
feedgen = f(slug, request).get_feed(param)
except FeedDoesNotExist:
raise Http404("Invalid feed parameters. Slug %r is valid, but other parameters, or lack thereof, are not." % slug)
response = HttpResponse(mimetype=feedgen.mime_type)
feedgen.write(response, 'utf-8')
return response
|
ckclark/leetcode | refs/heads/master | py/brick-wall.py | 1 | from collections import Counter
class Solution(object):
def leastBricks(self, wall):
"""
:type wall: List[List[int]]
:rtype: int
"""
c = Counter()
wall_width = sum(wall[0])
max_non_cut = 0
for row in wall:
subsum = 0
for n in row:
subsum += n
c[subsum] += 1
if subsum < wall_width:
max_non_cut = max(c[subsum], max_non_cut)
return len(wall) - max_non_cut
|
WebSpider/SickRage | refs/heads/master | sickbeard/notifiers/emby.py | 13 | # Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import urllib
import urllib2
import sickbeard
from sickbeard import logger
from sickrage.helper.exceptions import ex
try:
import json
except ImportError:
import simplejson as json
class EMBYNotifier:
def _notify_emby(self, message, host=None, emby_apikey=None):
"""Handles notifying Emby host via HTTP API
Returns:
Returns True for no issue or False if there was an error
"""
# fill in omitted parameters
if not host:
host = sickbeard.EMBY_HOST
if not emby_apikey:
emby_apikey = sickbeard.EMBY_APIKEY
url = 'http://%s/emby/Notifications/Admin' % (host)
values = {'Name': 'SickRage', 'Description': message, 'ImageUrl': 'https://raw.githubusercontent.com/SiCKRAGETV/SickRage/master/gui/slick/images/sickrage-shark-mascot.png'}
data = json.dumps(values)
try:
req = urllib2.Request(url, data)
req.add_header('X-MediaBrowser-Token', emby_apikey)
req.add_header('Content-Type', 'application/json')
response = urllib2.urlopen(req)
result = response.read()
response.close()
logger.log(u'EMBY: HTTP response: ' + result.replace('\n', ''), logger.DEBUG)
return True
except (urllib2.URLError, IOError), e:
logger.log(u'EMBY: Warning: Couldn\'t contact Emby at ' + url + ' ' + ex(e), logger.WARNING)
return False
##############################################################################
# Public functions
##############################################################################
def test_notify(self, host, emby_apikey):
return self._notify_emby('This is a test notification from SickRage', host, emby_apikey)
def update_library(self, show=None):
"""Handles updating the Emby Media Server host via HTTP API
Returns:
Returns True for no issue or False if there was an error
"""
if sickbeard.USE_EMBY:
if not sickbeard.EMBY_HOST:
logger.log(u'EMBY: No host specified, check your settings', logger.DEBUG)
return False
if show:
if show.indexer == 1:
provider = 'tvdb'
elif show.indexer == 2:
logger.log(u'EMBY: TVRage Provider no longer valid', logger.WARNING)
return False
else:
logger.log(u'EMBY: Provider unknown', logger.WARNING)
return False
query = '?%sid=%s' % (provider, show.indexerid)
else:
query = ''
url = 'http://%s/emby/Library/Series/Updated%s' % (sickbeard.EMBY_HOST, query)
values = {}
data = urllib.urlencode(values)
try:
req = urllib2.Request(url, data)
req.add_header('X-MediaBrowser-Token', sickbeard.EMBY_APIKEY)
response = urllib2.urlopen(req)
result = response.read()
response.close()
logger.log(u'EMBY: HTTP response: ' + result.replace('\n', ''), logger.DEBUG)
return True
except (urllib2.URLError, IOError), e:
logger.log(u'EMBY: Warning: Couldn\'t contact Emby at ' + url + ' ' + ex(e), logger.WARNING)
return False
notifier = EMBYNotifier
|
fintech-circle/edx-platform | refs/heads/master | cms/djangoapps/course_creators/migrations/__init__.py | 12133432 | |
zhakui/QMarkdowner | refs/heads/master | app/__init__.py | 12133432 | |
kaoscoach/crits | refs/heads/master | crits/indicators/__init__.py | 12133432 | |
rdandy/django-mailer | refs/heads/master | mailer/management/commands/__init__.py | 12133432 | |
Khan/khan-linter | refs/heads/master | vendor/py2/pkg_resources/_vendor/__init__.py | 12133432 | |
Katello/grinder | refs/heads/master | src/grinder/RHNComm.py | 2 | #
# Copyright (c) 2011 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
import os
import logging
import httplib
import urllib
import xmlrpclib
import urlparse
from grinder.GrinderExceptions import GetRequestException
LOG = logging.getLogger("grinder.RHNComm")
class RHNComm(object):
"""
This class is responsible for handling communication to RHN APIs.
It uses a mixture of XMLRPC calls as well as wrappers around regular 'GET' calls
"""
def __init__(self, satelliteURL, systemId):
self.baseURL = satelliteURL
self.authMap = None
self.systemId = systemId
def login(self, refresh=False):
"""
Input: refresh default value is False
if refresh is True we will force a login call and refresh the
cached authentication map
Output: dict of authentication credentials to be placed in header
for future package fetch 'GET' calls
Note:
The authentication data returned is cached, it is only updated on the
first call, or when "refresh=True" is passed.
Background:
If we make too many login calls to RHN we could make the referring
systemid be flagged as abusive. Current metrics allow ~100 logins a day
"""
if self.authMap and not refresh:
return self.authMap
client = xmlrpclib.Server(self.baseURL+"/SAT/", verbose=0)
self.authMap = client.authentication.login(self.systemId)
self.authMap["X-RHN-Satellite-XML-Dump-Version"] = "3.5"
return self.authMap
def __getRequest(self, relativeURL, headers={}):
"""
Input:
relativeURL - url for request
headers - dictionary of key/value pairs to add to header
Output:
data from response
Exception:
GetRequestException is thrown if response is anything other than 200
"""
authMap = self.login()
for key in authMap:
headers[key] = self.authMap[key]
r = urlparse.urlsplit(self.baseURL)
if hasattr(r, 'netloc'):
netloc = r.netloc
else:
netloc = r[1]
conn = httplib.HTTPConnection(netloc)
conn.request("GET", relativeURL, headers=headers)
resp = conn.getresponse()
if resp.status == 401:
LOG.warn("Got a response of %s:%s, Will refresh authentication credentials and retry" \
% (resp.status, resp.reason))
authMap = self.login(refresh=True)
conn.request("GET", relativeURL, params=params, headers=headers)
resp = conn.getresponse()
if resp.status != 200:
LOG.critical("ERROR: Response = %s 'GET' %s. Our Authentication Info is : %s" \
% (resp.status, relativeURL, authMap))
conn.close()
raise GetRequestException(relativeURL, resp.status)
data = resp.read()
conn.close()
return data
def getRepodata(self, channelLabel, fileName):
url = "/SAT/$RHN/" + channelLabel + "/repodata/" + fileName
data = self.__getRequest(url)
return data
if __name__ == "__main__":
systemId = open("/etc/sysconfig/rhn/systemid").read()
downldr = RHNComm("http://satellite.rhn.redhat.com", systemId)
d = downldr.getRepodata("rhel-i386-server-5", "comps.xml")
print d
|
arborh/tensorflow | refs/heads/master | tensorflow/python/profiler/traceme.py | 3 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TraceMe allows the profiler to trace python events.
Usage:
with profiler.TraceMe('name'):
...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.profiler.internal import _pywrap_traceme
class TraceMe(object):
"""Context manager that generates a trace event in the profiler."""
def __init__(self, name, **kwargs):
if _pywrap_traceme.TraceMe.IsEnabled():
if kwargs:
name += '#' + ','.join(key + '=' + str(value)
for key, value in six.iteritems(kwargs)) + '#'
self._traceme = _pywrap_traceme.TraceMe(name)
else:
self._traceme = None
def __enter__(self):
if self._traceme:
self._traceme.Enter()
def __exit__(self, exc_type, exc_val, exc_tb):
if self._traceme:
self._traceme.Exit()
|
msmolens/VTK | refs/heads/slicer-v6.3.0-2015-07-21-426987d | ThirdParty/Twisted/twisted/internet/test/test_posixbase.py | 33 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.posixbase} and supporting code.
"""
from __future__ import division, absolute_import
from twisted.python.compat import _PY3
from twisted.trial.unittest import TestCase
from twisted.internet.defer import Deferred
from twisted.internet.posixbase import PosixReactorBase, _Waker
from twisted.internet.protocol import ServerFactory
skipSockets = None
if _PY3:
skipSockets = "Re-enable when Python 3 port supports AF_UNIX"
else:
try:
from twisted.internet import unix
from twisted.test.test_unix import ClientProto
except ImportError:
skipSockets = "Platform does not support AF_UNIX sockets"
from twisted.internet.tcp import Port
from twisted.internet import reactor
class TrivialReactor(PosixReactorBase):
def __init__(self):
self._readers = {}
self._writers = {}
PosixReactorBase.__init__(self)
def addReader(self, reader):
self._readers[reader] = True
def removeReader(self, reader):
del self._readers[reader]
def addWriter(self, writer):
self._writers[writer] = True
def removeWriter(self, writer):
del self._writers[writer]
class PosixReactorBaseTests(TestCase):
"""
Tests for L{PosixReactorBase}.
"""
def _checkWaker(self, reactor):
self.assertIsInstance(reactor.waker, _Waker)
self.assertIn(reactor.waker, reactor._internalReaders)
self.assertIn(reactor.waker, reactor._readers)
def test_wakerIsInternalReader(self):
"""
When L{PosixReactorBase} is instantiated, it creates a waker and adds
it to its internal readers set.
"""
reactor = TrivialReactor()
self._checkWaker(reactor)
def test_removeAllSkipsInternalReaders(self):
"""
Any L{IReadDescriptors} in L{PosixReactorBase._internalReaders} are
left alone by L{PosixReactorBase._removeAll}.
"""
reactor = TrivialReactor()
extra = object()
reactor._internalReaders.add(extra)
reactor.addReader(extra)
reactor._removeAll(reactor._readers, reactor._writers)
self._checkWaker(reactor)
self.assertIn(extra, reactor._internalReaders)
self.assertIn(extra, reactor._readers)
def test_removeAllReturnsRemovedDescriptors(self):
"""
L{PosixReactorBase._removeAll} returns a list of removed
L{IReadDescriptor} and L{IWriteDescriptor} objects.
"""
reactor = TrivialReactor()
reader = object()
writer = object()
reactor.addReader(reader)
reactor.addWriter(writer)
removed = reactor._removeAll(
reactor._readers, reactor._writers)
self.assertEqual(set(removed), set([reader, writer]))
self.assertNotIn(reader, reactor._readers)
self.assertNotIn(writer, reactor._writers)
class TCPPortTests(TestCase):
"""
Tests for L{twisted.internet.tcp.Port}.
"""
if not isinstance(reactor, PosixReactorBase):
skip = "Non-posixbase reactor"
def test_connectionLostFailed(self):
"""
L{Port.stopListening} returns a L{Deferred} which errbacks if
L{Port.connectionLost} raises an exception.
"""
port = Port(12345, ServerFactory())
port.connected = True
port.connectionLost = lambda reason: 1 // 0
return self.assertFailure(port.stopListening(), ZeroDivisionError)
class TimeoutReportReactor(PosixReactorBase):
"""
A reactor which is just barely runnable and which cannot monitor any
readers or writers, and which fires a L{Deferred} with the timeout
passed to its C{doIteration} method as soon as that method is invoked.
"""
def __init__(self):
PosixReactorBase.__init__(self)
self.iterationTimeout = Deferred()
self.now = 100
def addReader(self, reader):
"""
Ignore the reader. This is necessary because the waker will be
added. However, we won't actually monitor it for any events.
"""
def removeAll(self):
"""
There are no readers or writers, so there is nothing to remove.
This will be called when the reactor stops, though, so it must be
implemented.
"""
return []
def seconds(self):
"""
Override the real clock with a deterministic one that can be easily
controlled in a unit test.
"""
return self.now
def doIteration(self, timeout):
d = self.iterationTimeout
if d is not None:
self.iterationTimeout = None
d.callback(timeout)
class IterationTimeoutTests(TestCase):
"""
Tests for the timeout argument L{PosixReactorBase.run} calls
L{PosixReactorBase.doIteration} with in the presence of various delayed
calls.
"""
def _checkIterationTimeout(self, reactor):
timeout = []
reactor.iterationTimeout.addCallback(timeout.append)
reactor.iterationTimeout.addCallback(lambda ignored: reactor.stop())
reactor.run()
return timeout[0]
def test_noCalls(self):
"""
If there are no delayed calls, C{doIteration} is called with a
timeout of C{None}.
"""
reactor = TimeoutReportReactor()
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, None)
def test_delayedCall(self):
"""
If there is a delayed call, C{doIteration} is called with a timeout
which is the difference between the current time and the time at
which that call is to run.
"""
reactor = TimeoutReportReactor()
reactor.callLater(100, lambda: None)
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, 100)
def test_timePasses(self):
"""
If a delayed call is scheduled and then some time passes, the
timeout passed to C{doIteration} is reduced by the amount of time
which passed.
"""
reactor = TimeoutReportReactor()
reactor.callLater(100, lambda: None)
reactor.now += 25
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, 75)
def test_multipleDelayedCalls(self):
"""
If there are several delayed calls, C{doIteration} is called with a
timeout which is the difference between the current time and the
time at which the earlier of the two calls is to run.
"""
reactor = TimeoutReportReactor()
reactor.callLater(50, lambda: None)
reactor.callLater(10, lambda: None)
reactor.callLater(100, lambda: None)
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, 10)
def test_resetDelayedCall(self):
"""
If a delayed call is reset, the timeout passed to C{doIteration} is
based on the interval between the time when reset is called and the
new delay of the call.
"""
reactor = TimeoutReportReactor()
call = reactor.callLater(50, lambda: None)
reactor.now += 25
call.reset(15)
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, 15)
def test_delayDelayedCall(self):
"""
If a delayed call is re-delayed, the timeout passed to
C{doIteration} is based on the remaining time before the call would
have been made and the additional amount of time passed to the delay
method.
"""
reactor = TimeoutReportReactor()
call = reactor.callLater(50, lambda: None)
reactor.now += 10
call.delay(20)
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, 60)
def test_cancelDelayedCall(self):
"""
If the only delayed call is canceled, C{None} is the timeout passed
to C{doIteration}.
"""
reactor = TimeoutReportReactor()
call = reactor.callLater(50, lambda: None)
call.cancel()
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, None)
class ConnectedDatagramPortTestCase(TestCase):
"""
Test connected datagram UNIX sockets.
"""
if skipSockets is not None:
skip = skipSockets
def test_connectionFailedDoesntCallLoseConnection(self):
"""
L{ConnectedDatagramPort} does not call the deprecated C{loseConnection}
in L{ConnectedDatagramPort.connectionFailed}.
"""
def loseConnection():
"""
Dummy C{loseConnection} method. C{loseConnection} is deprecated and
should not get called.
"""
self.fail("loseConnection is deprecated and should not get called.")
port = unix.ConnectedDatagramPort(None, ClientProto())
port.loseConnection = loseConnection
port.connectionFailed("goodbye")
def test_connectionFailedCallsStopListening(self):
"""
L{ConnectedDatagramPort} calls L{ConnectedDatagramPort.stopListening}
instead of the deprecated C{loseConnection} in
L{ConnectedDatagramPort.connectionFailed}.
"""
self.called = False
def stopListening():
"""
Dummy C{stopListening} method.
"""
self.called = True
port = unix.ConnectedDatagramPort(None, ClientProto())
port.stopListening = stopListening
port.connectionFailed("goodbye")
self.assertEqual(self.called, True)
|
SnappleCap/oh-mainline | refs/heads/master | vendor/packages/sphinx/sphinx/ext/inheritance_diagram.py | 15 | # -*- coding: utf-8 -*-
r"""
sphinx.ext.inheritance_diagram
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Defines a docutils directive for inserting inheritance diagrams.
Provide the directive with one or more classes or modules (separated
by whitespace). For modules, all of the classes in that module will
be used.
Example::
Given the following classes:
class A: pass
class B(A): pass
class C(A): pass
class D(B, C): pass
class E(B): pass
.. inheritance-diagram: D E
Produces a graph like the following:
A
/ \
B C
/ \ /
E D
The graph is inserted as a PNG+image map into HTML and a PDF in
LaTeX.
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import sys
import inspect
import __builtin__ as __builtin__ # as __builtin__ is for lib2to3 compatibility
try:
from hashlib import md5
except ImportError:
from md5 import md5
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx.ext.graphviz import render_dot_html, render_dot_latex, \
render_dot_texinfo
from sphinx.util.compat import Directive
class_sig_re = re.compile(r'''^([\w.]*\.)? # module names
(\w+) \s* $ # class/final module name
''', re.VERBOSE)
class InheritanceException(Exception):
pass
class InheritanceGraph(object):
"""
Given a list of classes, determines the set of classes that they inherit
from all the way to the root "object", and then is able to generate a
graphviz dot graph from them.
"""
def __init__(self, class_names, currmodule, show_builtins=False,
private_bases=False, parts=0):
"""*class_names* is a list of child classes to show bases from.
If *show_builtins* is True, then Python builtins will be shown
in the graph.
"""
self.class_names = class_names
classes = self._import_classes(class_names, currmodule)
self.class_info = self._class_info(classes, show_builtins,
private_bases, parts)
if not self.class_info:
raise InheritanceException('No classes found for '
'inheritance diagram')
def _import_class_or_module(self, name, currmodule):
"""Import a class using its fully-qualified *name*."""
try:
path, base = class_sig_re.match(name).groups()
except (AttributeError, ValueError):
raise InheritanceException('Invalid class or module %r specified '
'for inheritance diagram' % name)
fullname = (path or '') + base
path = (path and path.rstrip('.') or '')
# two possibilities: either it is a module, then import it
try:
__import__(fullname)
todoc = sys.modules[fullname]
except ImportError:
# else it is a class, then import the module
if not path:
if currmodule:
# try the current module
path = currmodule
else:
raise InheritanceException(
'Could not import class %r specified for '
'inheritance diagram' % base)
try:
__import__(path)
todoc = getattr(sys.modules[path], base)
except (ImportError, AttributeError):
raise InheritanceException(
'Could not import class or module %r specified for '
'inheritance diagram' % (path + '.' + base))
# If a class, just return it
if inspect.isclass(todoc):
return [todoc]
elif inspect.ismodule(todoc):
classes = []
for cls in todoc.__dict__.values():
if inspect.isclass(cls) and cls.__module__ == todoc.__name__:
classes.append(cls)
return classes
raise InheritanceException('%r specified for inheritance diagram is '
'not a class or module' % name)
def _import_classes(self, class_names, currmodule):
"""Import a list of classes."""
classes = []
for name in class_names:
classes.extend(self._import_class_or_module(name, currmodule))
return classes
def _class_info(self, classes, show_builtins, private_bases, parts):
"""Return name and bases for all classes that are ancestors of
*classes*.
*parts* gives the number of dotted name parts that is removed from the
displayed node names.
"""
all_classes = {}
builtins = vars(__builtin__).values()
def recurse(cls):
if not show_builtins and cls in builtins:
return
if not private_bases and cls.__name__.startswith('_'):
return
nodename = self.class_name(cls, parts)
fullname = self.class_name(cls, 0)
baselist = []
all_classes[cls] = (nodename, fullname, baselist)
for base in cls.__bases__:
if not show_builtins and base in builtins:
continue
if not private_bases and base.__name__.startswith('_'):
continue
baselist.append(self.class_name(base, parts))
if base not in all_classes:
recurse(base)
for cls in classes:
recurse(cls)
return all_classes
def class_name(self, cls, parts=0):
"""Given a class object, return a fully-qualified name.
This works for things I've tested in matplotlib so far, but may not be
completely general.
"""
module = cls.__module__
if module == '__builtin__':
fullname = cls.__name__
else:
fullname = '%s.%s' % (module, cls.__name__)
if parts == 0:
return fullname
name_parts = fullname.split('.')
return '.'.join(name_parts[-parts:])
def get_all_class_names(self):
"""Get all of the class names involved in the graph."""
return [fullname for (_, fullname, _) in self.class_info.values()]
# These are the default attrs for graphviz
default_graph_attrs = {
'rankdir': 'LR',
'size': '"8.0, 12.0"',
}
default_node_attrs = {
'shape': 'box',
'fontsize': 10,
'height': 0.25,
'fontname': '"Vera Sans, DejaVu Sans, Liberation Sans, '
'Arial, Helvetica, sans"',
'style': '"setlinewidth(0.5)"',
}
default_edge_attrs = {
'arrowsize': 0.5,
'style': '"setlinewidth(0.5)"',
}
def _format_node_attrs(self, attrs):
return ','.join(['%s=%s' % x for x in attrs.items()])
def _format_graph_attrs(self, attrs):
return ''.join(['%s=%s;\n' % x for x in attrs.items()])
def generate_dot(self, name, urls={}, env=None,
graph_attrs={}, node_attrs={}, edge_attrs={}):
"""Generate a graphviz dot graph from the classes that were passed in
to __init__.
*name* is the name of the graph.
*urls* is a dictionary mapping class names to HTTP URLs.
*graph_attrs*, *node_attrs*, *edge_attrs* are dictionaries containing
key/value pairs to pass on as graphviz properties.
"""
g_attrs = self.default_graph_attrs.copy()
n_attrs = self.default_node_attrs.copy()
e_attrs = self.default_edge_attrs.copy()
g_attrs.update(graph_attrs)
n_attrs.update(node_attrs)
e_attrs.update(edge_attrs)
if env:
g_attrs.update(env.config.inheritance_graph_attrs)
n_attrs.update(env.config.inheritance_node_attrs)
e_attrs.update(env.config.inheritance_edge_attrs)
res = []
res.append('digraph %s {\n' % name)
res.append(self._format_graph_attrs(g_attrs))
for cls, (name, fullname, bases) in self.class_info.items():
# Write the node
this_node_attrs = n_attrs.copy()
if fullname in urls:
this_node_attrs['URL'] = '"%s"' % urls[fullname]
# Use first line of docstring as tooltip, if available
if cls.__doc__:
doc = cls.__doc__.strip().split("\n")[0]
if doc:
doc = doc.replace('"', '\\"')
this_node_attrs['tooltip'] = '"%s"' % doc
res.append(' "%s" [%s];\n' %
(name, self._format_node_attrs(this_node_attrs)))
# Write the edges
for base_name in bases:
res.append(' "%s" -> "%s" [%s];\n' %
(base_name, name,
self._format_node_attrs(e_attrs)))
res.append('}\n')
return ''.join(res)
class inheritance_diagram(nodes.General, nodes.Element):
"""
A docutils node to use as a placeholder for the inheritance diagram.
"""
pass
class InheritanceDiagram(Directive):
"""
Run when the inheritance_diagram directive is first encountered.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {
'parts': directives.nonnegative_int,
'private-bases': directives.flag,
}
def run(self):
node = inheritance_diagram()
node.document = self.state.document
env = self.state.document.settings.env
class_names = self.arguments[0].split()
class_role = env.get_domain('py').role('class')
# Store the original content for use as a hash
node['parts'] = self.options.get('parts', 0)
node['content'] = ', '.join(class_names)
# Create a graph starting with the list of classes
try:
graph = InheritanceGraph(
class_names, env.temp_data.get('py:module'),
parts=node['parts'],
private_bases='private-bases' in self.options)
except InheritanceException, err:
return [node.document.reporter.warning(err.args[0],
line=self.lineno)]
# Create xref nodes for each target of the graph's image map and
# add them to the doc tree so that Sphinx can resolve the
# references to real URLs later. These nodes will eventually be
# removed from the doctree after we're done with them.
for name in graph.get_all_class_names():
refnodes, x = class_role(
'class', ':class:`%s`' % name, name, 0, self.state)
node.extend(refnodes)
# Store the graph object so we can use it to generate the
# dot file later
node['graph'] = graph
return [node]
def get_graph_hash(node):
encoded = (node['content'] + str(node['parts'])).encode('utf-8')
return md5(encoded).hexdigest()[-10:]
def html_visit_inheritance_diagram(self, node):
"""
Output the graph for HTML. This will insert a PNG with clickable
image map.
"""
graph = node['graph']
graph_hash = get_graph_hash(node)
name = 'inheritance%s' % graph_hash
# Create a mapping from fully-qualified class names to URLs.
urls = {}
for child in node:
if child.get('refuri') is not None:
urls[child['reftitle']] = child.get('refuri')
elif child.get('refid') is not None:
urls[child['reftitle']] = '#' + child.get('refid')
dotcode = graph.generate_dot(name, urls, env=self.builder.env)
render_dot_html(self, node, dotcode, [], 'inheritance', 'inheritance',
alt='Inheritance diagram of ' + node['content'])
raise nodes.SkipNode
def latex_visit_inheritance_diagram(self, node):
"""
Output the graph for LaTeX. This will insert a PDF.
"""
graph = node['graph']
graph_hash = get_graph_hash(node)
name = 'inheritance%s' % graph_hash
dotcode = graph.generate_dot(name, env=self.builder.env,
graph_attrs={'size': '"6.0,6.0"'})
render_dot_latex(self, node, dotcode, [], 'inheritance')
raise nodes.SkipNode
def texinfo_visit_inheritance_diagram(self, node):
"""
Output the graph for Texinfo. This will insert a PNG.
"""
graph = node['graph']
graph_hash = get_graph_hash(node)
name = 'inheritance%s' % graph_hash
dotcode = graph.generate_dot(name, env=self.builder.env,
graph_attrs={'size': '"6.0,6.0"'})
render_dot_texinfo(self, node, dotcode, [], 'inheritance')
raise nodes.SkipNode
def skip(self, node):
raise nodes.SkipNode
def setup(app):
app.setup_extension('sphinx.ext.graphviz')
app.add_node(
inheritance_diagram,
latex=(latex_visit_inheritance_diagram, None),
html=(html_visit_inheritance_diagram, None),
text=(skip, None),
man=(skip, None),
texinfo=(texinfo_visit_inheritance_diagram, None))
app.add_directive('inheritance-diagram', InheritanceDiagram)
app.add_config_value('inheritance_graph_attrs', {}, False),
app.add_config_value('inheritance_node_attrs', {}, False),
app.add_config_value('inheritance_edge_attrs', {}, False),
|
ThiagoGarciaAlves/intellij-community | refs/heads/master | python/testData/quickFixes/PyMakeFunctionFromMethodQuickFixTest/removeQualifiers.py | 44 | class C:
def me<caret>thod(self, x):
test = 1
inst = C()
inst . method(1)
C . method(inst, 42)
|
RayMick/scikit-learn | refs/heads/master | sklearn/cluster/affinity_propagation_.py | 224 | """ Algorithms for clustering : Meanshift, Affinity propagation and spectral
clustering.
"""
# Author: Alexandre Gramfort alexandre.gramfort@inria.fr
# Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import as_float_array, check_array
from ..utils.validation import check_is_fitted
from ..metrics import euclidean_distances
from ..metrics import pairwise_distances_argmin
def affinity_propagation(S, preference=None, convergence_iter=15, max_iter=200,
damping=0.5, copy=True, verbose=False,
return_n_iter=False):
"""Perform Affinity Propagation Clustering of data
Read more in the :ref:`User Guide <affinity_propagation>`.
Parameters
----------
S : array-like, shape (n_samples, n_samples)
Matrix of similarities between points
preference : array-like, shape (n_samples,) or float, optional
Preferences for each point - points with larger values of
preferences are more likely to be chosen as exemplars. The number of
exemplars, i.e. of clusters, is influenced by the input preferences
value. If the preferences are not passed as arguments, they will be
set to the median of the input similarities (resulting in a moderate
number of clusters). For a smaller amount of clusters, this can be set
to the minimum value of the similarities.
convergence_iter : int, optional, default: 15
Number of iterations with no change in the number
of estimated clusters that stops the convergence.
max_iter : int, optional, default: 200
Maximum number of iterations
damping : float, optional, default: 0.5
Damping factor between 0.5 and 1.
copy : boolean, optional, default: True
If copy is False, the affinity matrix is modified inplace by the
algorithm, for memory efficiency
verbose : boolean, optional, default: False
The verbosity level
return_n_iter : bool, default False
Whether or not to return the number of iterations.
Returns
-------
cluster_centers_indices : array, shape (n_clusters,)
index of clusters centers
labels : array, shape (n_samples,)
cluster labels for each point
n_iter : int
number of iterations run. Returned only if `return_n_iter` is
set to True.
Notes
-----
See examples/cluster/plot_affinity_propagation.py for an example.
References
----------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
S = as_float_array(S, copy=copy)
n_samples = S.shape[0]
if S.shape[0] != S.shape[1]:
raise ValueError("S must be a square array (shape=%s)" % repr(S.shape))
if preference is None:
preference = np.median(S)
if damping < 0.5 or damping >= 1:
raise ValueError('damping must be >= 0.5 and < 1')
random_state = np.random.RandomState(0)
# Place preference on the diagonal of S
S.flat[::(n_samples + 1)] = preference
A = np.zeros((n_samples, n_samples))
R = np.zeros((n_samples, n_samples)) # Initialize messages
# Intermediate results
tmp = np.zeros((n_samples, n_samples))
# Remove degeneracies
S += ((np.finfo(np.double).eps * S + np.finfo(np.double).tiny * 100) *
random_state.randn(n_samples, n_samples))
# Execute parallel affinity propagation updates
e = np.zeros((n_samples, convergence_iter))
ind = np.arange(n_samples)
for it in range(max_iter):
# tmp = A + S; compute responsibilities
np.add(A, S, tmp)
I = np.argmax(tmp, axis=1)
Y = tmp[ind, I] # np.max(A + S, axis=1)
tmp[ind, I] = -np.inf
Y2 = np.max(tmp, axis=1)
# tmp = Rnew
np.subtract(S, Y[:, None], tmp)
tmp[ind, I] = S[ind, I] - Y2
# Damping
tmp *= 1 - damping
R *= damping
R += tmp
# tmp = Rp; compute availabilities
np.maximum(R, 0, tmp)
tmp.flat[::n_samples + 1] = R.flat[::n_samples + 1]
# tmp = -Anew
tmp -= np.sum(tmp, axis=0)
dA = np.diag(tmp).copy()
tmp.clip(0, np.inf, tmp)
tmp.flat[::n_samples + 1] = dA
# Damping
tmp *= 1 - damping
A *= damping
A -= tmp
# Check for convergence
E = (np.diag(A) + np.diag(R)) > 0
e[:, it % convergence_iter] = E
K = np.sum(E, axis=0)
if it >= convergence_iter:
se = np.sum(e, axis=1)
unconverged = (np.sum((se == convergence_iter) + (se == 0))
!= n_samples)
if (not unconverged and (K > 0)) or (it == max_iter):
if verbose:
print("Converged after %d iterations." % it)
break
else:
if verbose:
print("Did not converge")
I = np.where(np.diag(A + R) > 0)[0]
K = I.size # Identify exemplars
if K > 0:
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K) # Identify clusters
# Refine the final set of exemplars and clusters and return results
for k in range(K):
ii = np.where(c == k)[0]
j = np.argmax(np.sum(S[ii[:, np.newaxis], ii], axis=0))
I[k] = ii[j]
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K)
labels = I[c]
# Reduce labels to a sorted, gapless, list
cluster_centers_indices = np.unique(labels)
labels = np.searchsorted(cluster_centers_indices, labels)
else:
labels = np.empty((n_samples, 1))
cluster_centers_indices = None
labels.fill(np.nan)
if return_n_iter:
return cluster_centers_indices, labels, it + 1
else:
return cluster_centers_indices, labels
###############################################################################
class AffinityPropagation(BaseEstimator, ClusterMixin):
"""Perform Affinity Propagation Clustering of data.
Read more in the :ref:`User Guide <affinity_propagation>`.
Parameters
----------
damping : float, optional, default: 0.5
Damping factor between 0.5 and 1.
convergence_iter : int, optional, default: 15
Number of iterations with no change in the number
of estimated clusters that stops the convergence.
max_iter : int, optional, default: 200
Maximum number of iterations.
copy : boolean, optional, default: True
Make a copy of input data.
preference : array-like, shape (n_samples,) or float, optional
Preferences for each point - points with larger values of
preferences are more likely to be chosen as exemplars. The number
of exemplars, ie of clusters, is influenced by the input
preferences value. If the preferences are not passed as arguments,
they will be set to the median of the input similarities.
affinity : string, optional, default=``euclidean``
Which affinity to use. At the moment ``precomputed`` and
``euclidean`` are supported. ``euclidean`` uses the
negative squared euclidean distance between points.
verbose : boolean, optional, default: False
Whether to be verbose.
Attributes
----------
cluster_centers_indices_ : array, shape (n_clusters,)
Indices of cluster centers
cluster_centers_ : array, shape (n_clusters, n_features)
Cluster centers (if affinity != ``precomputed``).
labels_ : array, shape (n_samples,)
Labels of each point
affinity_matrix_ : array, shape (n_samples, n_samples)
Stores the affinity matrix used in ``fit``.
n_iter_ : int
Number of iterations taken to converge.
Notes
-----
See examples/cluster/plot_affinity_propagation.py for an example.
The algorithmic complexity of affinity propagation is quadratic
in the number of points.
References
----------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
def __init__(self, damping=.5, max_iter=200, convergence_iter=15,
copy=True, preference=None, affinity='euclidean',
verbose=False):
self.damping = damping
self.max_iter = max_iter
self.convergence_iter = convergence_iter
self.copy = copy
self.verbose = verbose
self.preference = preference
self.affinity = affinity
@property
def _pairwise(self):
return self.affinity == "precomputed"
def fit(self, X, y=None):
""" Create affinity matrix from negative euclidean distances, then
apply affinity propagation clustering.
Parameters
----------
X: array-like, shape (n_samples, n_features) or (n_samples, n_samples)
Data matrix or, if affinity is ``precomputed``, matrix of
similarities / affinities.
"""
X = check_array(X, accept_sparse='csr')
if self.affinity == "precomputed":
self.affinity_matrix_ = X
elif self.affinity == "euclidean":
self.affinity_matrix_ = -euclidean_distances(X, squared=True)
else:
raise ValueError("Affinity must be 'precomputed' or "
"'euclidean'. Got %s instead"
% str(self.affinity))
self.cluster_centers_indices_, self.labels_, self.n_iter_ = \
affinity_propagation(
self.affinity_matrix_, self.preference, max_iter=self.max_iter,
convergence_iter=self.convergence_iter, damping=self.damping,
copy=self.copy, verbose=self.verbose, return_n_iter=True)
if self.affinity != "precomputed":
self.cluster_centers_ = X[self.cluster_centers_indices_].copy()
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data to predict.
Returns
-------
labels : array, shape (n_samples,)
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_indices_")
if not hasattr(self, "cluster_centers_"):
raise ValueError("Predict method is not supported when "
"affinity='precomputed'.")
return pairwise_distances_argmin(X, self.cluster_centers_)
|
xfxf/veyepar | refs/heads/master | dj/scripts/ck_invalid.py | 3 | #!/usr/bin/python
# ck_invalid.py
# looks for a big "INVALID" video
# which is what melt does when things are broken
# todo: look for silence too.
# not sure what that algorithem will look like,
# so this can wait till I have a problem
import os
import gslevels
from . import gsocr
from process import process
class ckbroke(process):
ready_state = 3
def process_ep(self, ep):
exts = self.options.upload_formats
for ext in exts:
src_pathname = os.path.join( self.show_dir, ext,
"%s.%s"%(ep.slug,ext))
p=gsocr.Main(src_pathname)
# gocr -s 40 -C A-Z ~/shot0001.png INVALID
p.gocr_cmd = ['gocr', '-', '-s', '40', '-C', 'A-Z']
dictionary = ["INVALID"]
p.dictionaries=[dictionary]
# p.frame=30*5 # start 5 seconds into it (past the title)
p.seek_sec = 1
if self.options.verbose: print("checking ", ext)
gsocr.gtk.main()
print(p.words)
if p.words: ## ["INVALID"] is kinda the only thing it can be
print(ep.id, ep.name)
print(p.words)
ep.name = "INVALID " + ep.name
ep.state = -1
ep.save()
ret=False
else:
# return True to bump state
# assuming we are not --force-ing the check
ret=self.options.push
return ret
def add_more_options(self, parser):
parser.add_option('--push',
help="Push episode past review step if it passes check.")
if __name__ == '__main__':
p=ckbroke()
p.main()
|
ukanga/SickRage | refs/heads/master | sickbeard/databases/mainDB.py | 2 | # coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: https://sickrage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=line-too-long
from __future__ import print_function, unicode_literals
import datetime
import warnings
import sickbeard
import os.path
from sickbeard import db, common, helpers, logger
from sickbeard.name_parser.parser import NameParser, InvalidNameException, InvalidShowException
from sickrage.helper.common import dateTimeFormat, episode_num
from sickrage.helper.encoding import ek
from sickbeard import subtitles
import six
MIN_DB_VERSION = 9 # oldest db version we support migrating from
MAX_DB_VERSION = 44
class MainSanityCheck(db.DBSanityCheck):
def check(self):
self.fix_missing_table_indexes()
self.fix_duplicate_shows()
self.fix_duplicate_episodes()
self.fix_orphan_episodes()
self.fix_unaired_episodes()
self.fix_tvrage_show_statues()
self.fix_episode_statuses()
self.fix_invalid_airdates()
# self.fix_subtitles_codes()
self.fix_show_nfo_lang()
self.convert_tvrage_to_tvdb()
self.convert_archived_to_compound()
def convert_archived_to_compound(self):
logger.log('Checking for archived episodes not qualified', logger.DEBUG)
query = "SELECT episode_id, showid, status, location, season, episode " + \
"FROM tv_episodes WHERE status = {0}".format(common.ARCHIVED)
sql_results = self.connection.select(query)
if sql_results:
logger.log("Found {0:d} shows with bare archived status, attempting automatic conversion...".format(len(sql_results)), logger.WARNING)
for archivedEp in sql_results:
fixedStatus = common.Quality.compositeStatus(common.ARCHIVED, common.Quality.UNKNOWN)
existing = archivedEp[b'location'] and ek(os.path.exists, archivedEp[b'location'])
if existing:
quality = common.Quality.nameQuality(archivedEp[b'location'])
fixedStatus = common.Quality.compositeStatus(common.ARCHIVED, quality)
logger.log('Changing status from {old_status} to {new_status} for {id}: {ep} at {location} (File {result})'.format
(old_status=common.statusStrings[common.ARCHIVED], new_status=common.statusStrings[fixedStatus],
id=archivedEp[b'showid'], ep=episode_num(archivedEp[b'season'], archivedEp[b'episode']),
location=archivedEp[b'location'] if archivedEp[b'location'] else 'unknown location',
result=('NOT FOUND', 'EXISTS')[bool(existing)]))
self.connection.action("UPDATE tv_episodes SET status = {0:d} WHERE episode_id = {1:d}".format(fixedStatus, archivedEp[b'episode_id']))
def convert_tvrage_to_tvdb(self):
logger.log("Checking for shows with tvrage id's, since tvrage is gone", logger.DEBUG)
from sickbeard.indexers.indexer_config import INDEXER_TVRAGE
from sickbeard.indexers.indexer_config import INDEXER_TVDB
sql_results = self.connection.select("SELECT indexer_id, show_name, location FROM tv_shows WHERE indexer = {0:d}".format(INDEXER_TVRAGE))
if sql_results:
logger.log("Found {0:d} shows with TVRage ID's, attempting automatic conversion...".format(len(sql_results)), logger.WARNING)
for tvrage_show in sql_results:
logger.log("Processing {0} at {1}".format(tvrage_show[b'show_name'], tvrage_show[b'location']))
mapping = self.connection.select("SELECT mindexer_id FROM indexer_mapping WHERE indexer_id={0:d} AND indexer={1:d} AND mindexer={2:d}".format
(tvrage_show[b'indexer_id'], INDEXER_TVRAGE, INDEXER_TVDB))
if len(mapping) != 1:
logger.log("Error mapping show from tvrage to tvdb for {0} ({1}), found {2:d} mapping results. Cannot convert automatically!".format
(tvrage_show[b'show_name'], tvrage_show[b'location'], len(mapping)), logger.WARNING)
logger.log("Removing the TVRage show and it's episodes from the DB, use 'addExistingShow'", logger.WARNING)
self.connection.action("DELETE FROM tv_shows WHERE indexer_id = {0:d} AND indexer = {1:d}".format(tvrage_show[b'indexer_id'], INDEXER_TVRAGE))
self.connection.action("DELETE FROM tv_episodes WHERE showid = {0:d}".format(tvrage_show[b'indexer_id']))
continue
logger.log('Checking if there is already a show with id:%i in the show list')
duplicate = self.connection.select("SELECT show_name, indexer_id, location FROM tv_shows WHERE indexer_id = {0:d} AND indexer = {1:d}".format(
mapping[0][b'mindexer_id'], INDEXER_TVDB))
if duplicate:
logger.log('Found {0} which has the same id as {1}, cannot convert automatically so I am pausing {2}'.format(
duplicate[0][b'show_name'], tvrage_show[b'show_name'], duplicate[0][b'show_name']), logger.WARNING
)
self.connection.action("UPDATE tv_shows SET paused=1 WHERE indexer={0:d} AND indexer_id={1:d}".format(
INDEXER_TVDB, duplicate[0][b'indexer_id'])
)
logger.log("Removing {0} and it's episodes from the DB".format(tvrage_show[b'show_name']), logger.WARNING)
self.connection.action("DELETE FROM tv_shows WHERE indexer_id = {0:d} AND indexer = {1:d}".format(tvrage_show[b'indexer_id'], INDEXER_TVRAGE))
self.connection.action("DELETE FROM tv_episodes WHERE showid = {0:d}".format(tvrage_show[b'indexer_id']))
logger.log('Manually move the season folders from {0} into {1}, and delete {2} before rescanning {3} and unpausing it'.format(
tvrage_show[b'location'], duplicate[0][b'location'], tvrage_show[b'location'], duplicate[0][b'show_name']), logger.WARNING
)
continue
logger.log('Mapping {0} to tvdb id {1:d}'.format(tvrage_show[b'show_name'], mapping[0][b'mindexer_id']))
self.connection.action(
"UPDATE tv_shows SET indexer={0:d}, indexer_id={1:d} WHERE indexer_id={2:d}".format(
INDEXER_TVDB, mapping[0][b'mindexer_id'], tvrage_show[b'indexer_id']
)
)
logger.log('Relinking episodes to show')
self.connection.action(
"UPDATE tv_episodes SET indexer={0:d}, showid={1:d}, indexerid=0 WHERE showid={2:d}".format(
INDEXER_TVDB, mapping[0][b'mindexer_id'], tvrage_show[b'indexer_id']
)
)
logger.log('Please perform a full update on {0}'.format(tvrage_show[b'show_name']), logger.WARNING)
def fix_duplicate_shows(self, column=b'indexer_id'):
sql_results = self.connection.select(
"SELECT show_id, " + column + ", COUNT(" + column + ") as count FROM tv_shows GROUP BY " + column + " HAVING count > 1")
for cur_duplicate in sql_results:
logger.log("Duplicate show detected! " + column + ": " + str(cur_duplicate[column]) + " count: " + str(
cur_duplicate[b"count"]), logger.DEBUG)
cur_dupe_results = self.connection.select(
"SELECT show_id, " + column + " FROM tv_shows WHERE " + column + " = ? LIMIT ?",
[cur_duplicate[column], int(cur_duplicate[b"count"]) - 1]
)
for cur_dupe_id in cur_dupe_results:
logger.log(
"Deleting duplicate show with " + column + ": " + str(cur_dupe_id[column]) + " show_id: " + str(
cur_dupe_id[b"show_id"]))
self.connection.action("DELETE FROM tv_shows WHERE show_id = ?", [cur_dupe_id[b"show_id"]])
def fix_duplicate_episodes(self):
sql_results = self.connection.select(
"SELECT showid, season, episode, COUNT(showid) as count FROM tv_episodes GROUP BY showid, season, episode HAVING count > 1")
for cur_duplicate in sql_results:
logger.log("Duplicate episode detected! showid: {dupe_id} season: {dupe_season} episode {dupe_episode} count: {dupe_count}".format
(dupe_id=str(cur_duplicate[b"showid"]), dupe_season=str(cur_duplicate[b"season"]), dupe_episode=str(cur_duplicate[b"episode"]),
dupe_count=str(cur_duplicate[b"count"])),
logger.DEBUG)
cur_dupe_results = self.connection.select(
"SELECT episode_id FROM tv_episodes WHERE showid = ? AND season = ? and episode = ? ORDER BY episode_id DESC LIMIT ?",
[cur_duplicate[b"showid"], cur_duplicate[b"season"], cur_duplicate[b"episode"],
int(cur_duplicate[b"count"]) - 1]
)
for cur_dupe_id in cur_dupe_results:
logger.log("Deleting duplicate episode with episode_id: " + str(cur_dupe_id[b"episode_id"]))
self.connection.action("DELETE FROM tv_episodes WHERE episode_id = ?", [cur_dupe_id[b"episode_id"]])
def fix_orphan_episodes(self):
sql_results = self.connection.select(
"SELECT episode_id, showid, tv_shows.indexer_id FROM tv_episodes LEFT JOIN tv_shows ON tv_episodes.showid=tv_shows.indexer_id WHERE tv_shows.indexer_id is NULL")
for cur_orphan in sql_results:
logger.log("Orphan episode detected! episode_id: " + str(cur_orphan[b"episode_id"]) + " showid: " + str(
cur_orphan[b"showid"]), logger.DEBUG)
logger.log("Deleting orphan episode with episode_id: " + str(cur_orphan[b"episode_id"]))
self.connection.action("DELETE FROM tv_episodes WHERE episode_id = ?", [cur_orphan[b"episode_id"]])
def fix_missing_table_indexes(self):
if not self.connection.select("PRAGMA index_info('idx_indexer_id')"):
logger.log("Missing idx_indexer_id for TV Shows table detected!, fixing...")
self.connection.action("CREATE UNIQUE INDEX idx_indexer_id ON tv_shows(indexer_id);")
if not self.connection.select("PRAGMA index_info('idx_tv_episodes_showid_airdate')"):
logger.log("Missing idx_tv_episodes_showid_airdate for TV Episodes table detected!, fixing...")
self.connection.action("CREATE INDEX idx_tv_episodes_showid_airdate ON tv_episodes(showid, airdate);")
if not self.connection.select("PRAGMA index_info('idx_showid')"):
logger.log("Missing idx_showid for TV Episodes table detected!, fixing...")
self.connection.action("CREATE INDEX idx_showid ON tv_episodes (showid);")
if not self.connection.select("PRAGMA index_info('idx_status')"):
logger.log("Missing idx_status for TV Episodes table detected!, fixing...")
self.connection.action("CREATE INDEX idx_status ON tv_episodes (status, season, episode, airdate)")
if not self.connection.select("PRAGMA index_info('idx_sta_epi_air')"):
logger.log("Missing idx_sta_epi_air for TV Episodes table detected!, fixing...")
self.connection.action("CREATE INDEX idx_sta_epi_air ON tv_episodes (status, episode, airdate)")
if not self.connection.select("PRAGMA index_info('idx_sta_epi_sta_air')"):
logger.log("Missing idx_sta_epi_sta_air for TV Episodes table detected!, fixing...")
self.connection.action("CREATE INDEX idx_sta_epi_sta_air ON tv_episodes (season, episode, status, airdate)")
def fix_unaired_episodes(self):
curDate = datetime.date.today()
sql_results = self.connection.select(
"SELECT episode_id FROM tv_episodes WHERE (airdate > ? or airdate = 1) AND status in (?,?) AND season > 0",
[curDate.toordinal(), common.SKIPPED, common.WANTED])
for cur_unaired in sql_results:
logger.log("Fixing unaired episode status for episode_id: {0}".format(cur_unaired[b"episode_id"]))
self.connection.action("UPDATE tv_episodes SET status = ? WHERE episode_id = ?",
[common.UNAIRED, cur_unaired[b"episode_id"]])
def fix_tvrage_show_statues(self):
status_map = {
'returning series': 'Continuing',
'canceled/ended': 'Ended',
'tbd/on the bubble': 'Continuing',
'in development': 'Continuing',
'new series': 'Continuing',
'never aired': 'Ended',
'final season': 'Continuing',
'on hiatus': 'Continuing',
'pilot ordered': 'Continuing',
'pilot rejected': 'Ended',
'canceled': 'Ended',
'ended': 'Ended',
'': 'Unknown',
}
for old_status, new_status in six.iteritems(status_map):
self.connection.action("UPDATE tv_shows SET status = ? WHERE LOWER(status) = ?", [new_status, old_status])
def fix_episode_statuses(self):
sql_results = self.connection.select("SELECT episode_id, showid FROM tv_episodes WHERE status IS NULL")
for cur_ep in sql_results:
logger.log("MALFORMED episode status detected! episode_id: " + str(cur_ep[b"episode_id"]) + " showid: " + str(
cur_ep[b"showid"]), logger.DEBUG)
logger.log("Fixing malformed episode status with episode_id: " + str(cur_ep[b"episode_id"]))
self.connection.action("UPDATE tv_episodes SET status = ? WHERE episode_id = ?",
[common.UNKNOWN, cur_ep[b"episode_id"]])
def fix_invalid_airdates(self):
sql_results = self.connection.select(
"SELECT episode_id, showid FROM tv_episodes WHERE airdate >= ? OR airdate < 1",
[datetime.date.max.toordinal()])
for bad_airdate in sql_results:
logger.log("Bad episode airdate detected! episode_id: " + str(bad_airdate[b"episode_id"]) + " showid: " + str(
bad_airdate[b"showid"]), logger.DEBUG)
logger.log("Fixing bad episode airdate for episode_id: " + str(bad_airdate[b"episode_id"]))
self.connection.action("UPDATE tv_episodes SET airdate = '1' WHERE episode_id = ?", [bad_airdate[b"episode_id"]])
def fix_subtitles_codes(self):
sql_results = self.connection.select(
"SELECT subtitles, episode_id FROM tv_episodes WHERE subtitles != '' AND subtitles_lastsearch < ?;",
[datetime.datetime(2015, 7, 15, 17, 20, 44, 326380).strftime(dateTimeFormat)]
)
for sql_result in sql_results:
langs = []
logger.log("Checking subtitle codes for episode_id: {0}, codes: {1}".format(sql_result[b'episode_id'], sql_result[b'subtitles']), logger.DEBUG)
for subcode in sql_result[b'subtitles'].split(','):
if not len(subcode) == 3 or subcode not in subtitles.subtitle_code_filter():
logger.log("Fixing subtitle codes for episode_id: {0}, invalid code: {1}".format(sql_result[b'episode_id'], subcode), logger.DEBUG)
continue
langs.append(subcode)
self.connection.action("UPDATE tv_episodes SET subtitles = ?, subtitles_lastsearch = ? WHERE episode_id = ?;",
[','.join(langs), datetime.datetime.now().strftime(dateTimeFormat), sql_result[b'episode_id']])
def fix_show_nfo_lang(self):
self.connection.action("UPDATE tv_shows SET lang = '' WHERE lang = 0 or lang = '0'")
def backupDatabase(version):
logger.log("Backing up database before upgrade")
if not helpers.backupVersionedFile(db.dbFilename(), version):
logger.log_error_and_exit("Database backup failed, abort upgrading database")
else:
logger.log("Proceeding with upgrade")
# ======================
# = Main DB Migrations =
# ======================
# Add new migrations at the bottom of the list; subclass the previous migration.
class InitialSchema(db.SchemaUpgrade):
def test(self):
return self.hasTable("db_version")
def execute(self):
if not self.hasTable("tv_shows") and not self.hasTable("db_version"):
queries = [
"CREATE TABLE db_version(db_version INTEGER);",
"CREATE TABLE history(action NUMERIC, date NUMERIC, showid NUMERIC, season NUMERIC, episode NUMERIC, quality NUMERIC, resource TEXT, provider TEXT, version NUMERIC DEFAULT -1);",
"CREATE TABLE imdb_info(indexer_id INTEGER PRIMARY KEY, imdb_id TEXT, title TEXT, year NUMERIC, akas TEXT, runtimes NUMERIC, genres TEXT, countries TEXT, country_codes TEXT, certificates TEXT, rating TEXT, votes INTEGER, last_update NUMERIC);",
"CREATE TABLE info(last_backlog NUMERIC, last_indexer NUMERIC, last_proper_search NUMERIC);",
"CREATE TABLE scene_numbering(indexer TEXT, indexer_id INTEGER, season INTEGER, episode INTEGER, scene_season INTEGER, scene_episode INTEGER, absolute_number NUMERIC, scene_absolute_number NUMERIC, PRIMARY KEY(indexer_id, season, episode));",
"CREATE TABLE tv_shows(show_id INTEGER PRIMARY KEY, indexer_id NUMERIC, indexer NUMERIC, show_name TEXT, location TEXT, network TEXT, genre TEXT, classification TEXT, runtime NUMERIC, quality NUMERIC, airs TEXT, status TEXT, flatten_folders NUMERIC, paused NUMERIC, startyear NUMERIC, air_by_date NUMERIC, lang TEXT, subtitles NUMERIC, notify_list TEXT, imdb_id TEXT, last_update_indexer NUMERIC, dvdorder NUMERIC, archive_firstmatch NUMERIC, rls_require_words TEXT, rls_ignore_words TEXT, sports NUMERIC, anime NUMERIC, scene NUMERIC, default_ep_status NUMERIC DEFAULT -1);",
"CREATE TABLE tv_episodes(episode_id INTEGER PRIMARY KEY, showid NUMERIC, indexerid NUMERIC, indexer TEXT, name TEXT, season NUMERIC, episode NUMERIC, description TEXT, airdate NUMERIC, hasnfo NUMERIC, hastbn NUMERIC, status NUMERIC, location TEXT, file_size NUMERIC, release_name TEXT, subtitles TEXT, subtitles_searchcount NUMERIC, subtitles_lastsearch TIMESTAMP, is_proper NUMERIC, scene_season NUMERIC, scene_episode NUMERIC, absolute_number NUMERIC, scene_absolute_number NUMERIC, version NUMERIC DEFAULT -1, release_group TEXT);",
"CREATE TABLE blacklist (show_id INTEGER, range TEXT, keyword TEXT);",
"CREATE TABLE whitelist (show_id INTEGER, range TEXT, keyword TEXT);",
"CREATE TABLE xem_refresh (indexer TEXT, indexer_id INTEGER PRIMARY KEY, last_refreshed INTEGER);",
"CREATE TABLE indexer_mapping (indexer_id INTEGER, indexer NUMERIC, mindexer_id INTEGER, mindexer NUMERIC, PRIMARY KEY (indexer_id, indexer));",
"CREATE UNIQUE INDEX idx_indexer_id ON tv_shows(indexer_id);",
"CREATE INDEX idx_showid ON tv_episodes(showid);",
"CREATE INDEX idx_sta_epi_air ON tv_episodes(status, episode, airdate);",
"CREATE INDEX idx_sta_epi_sta_air ON tv_episodes(season, episode, status, airdate);",
"CREATE INDEX idx_status ON tv_episodes(status,season,episode,airdate);",
"CREATE INDEX idx_tv_episodes_showid_airdate ON tv_episodes(showid, airdate);",
"INSERT INTO db_version(db_version) VALUES (43);"
]
for query in queries:
self.connection.action(query)
else:
cur_db_version = self.checkDBVersion()
if cur_db_version < MIN_DB_VERSION:
logger.log_error_and_exit(
"Your database version ({cur_db_version}) is too old to migrate from what this version of SickRage supports ({min_db_version}).\n"
"Upgrade using a previous version (tag) build 496 to build 501 of SickRage first or remove database file to begin fresh.".format
(cur_db_version=str(cur_db_version), min_db_version=str(MIN_DB_VERSION)))
if cur_db_version > MAX_DB_VERSION:
logger.log_error_and_exit(
"Your database version ({cur_db_version}) has been incremented past what this version of SickRage supports ({max_db_version}).\n"
"If you have used other forks of SickRage, your database may be unusable due to their modifications.".format
(cur_db_version=str(cur_db_version), max_db_version=str(MAX_DB_VERSION)))
class AddSizeAndSceneNameFields(InitialSchema):
def test(self):
return self.checkDBVersion() >= 10
def execute(self):
backupDatabase(self.checkDBVersion())
if not self.hasColumn("tv_episodes", "file_size"):
self.addColumn("tv_episodes", "file_size")
if not self.hasColumn("tv_episodes", "release_name"):
self.addColumn("tv_episodes", "release_name", "TEXT", "")
ep_results = self.connection.select("SELECT episode_id, location, file_size FROM tv_episodes")
logger.log("Adding file size to all episodes in DB, please be patient")
for cur_ep in ep_results:
if not cur_ep[b"location"]:
continue
# if there is no size yet then populate it for us
if (not cur_ep[b"file_size"] or not int(cur_ep[b"file_size"])) and ek(os.path.isfile, cur_ep[b"location"]):
cur_size = ek(os.path.getsize, cur_ep[b"location"])
self.connection.action("UPDATE tv_episodes SET file_size = ? WHERE episode_id = ?",
[cur_size, int(cur_ep[b"episode_id"])])
# check each snatch to see if we can use it to get a release name from
history_results = self.connection.select("SELECT * FROM history WHERE provider != -1 ORDER BY date ASC")
logger.log("Adding release name to all episodes still in history")
for cur_result in history_results:
# find the associated download, if there isn't one then ignore it
download_results = self.connection.select(
"SELECT resource FROM history WHERE provider = -1 AND showid = ? AND season = ? AND episode = ? AND date > ?",
[cur_result[b"showid"], cur_result[b"season"], cur_result[b"episode"], cur_result[b"date"]])
if not download_results:
logger.log(
"Found a snatch in the history for " + cur_result[b"resource"] + " but couldn't find the associated download, skipping it",
logger.DEBUG
)
continue
nzb_name = cur_result[b"resource"]
file_name = ek(os.path.basename, download_results[0][b"resource"])
# take the extension off the filename, it's not needed
if '.' in file_name:
file_name = file_name.rpartition('.')[0]
# find the associated episode on disk
ep_results = self.connection.select(
"SELECT episode_id, status FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ? AND location != ''",
[cur_result[b"showid"], cur_result[b"season"], cur_result[b"episode"]])
if not ep_results:
logger.log(
"The episode " + nzb_name + " was found in history but doesn't exist on disk anymore, skipping",
logger.DEBUG)
continue
# get the status/quality of the existing ep and make sure it's what we expect
ep_status, ep_quality = common.Quality.splitCompositeStatus(int(ep_results[0][b"status"]))
if ep_status != common.DOWNLOADED:
continue
if ep_quality != int(cur_result[b"quality"]):
continue
# make sure this is actually a real release name and not a season pack or something
for cur_name in (nzb_name, file_name):
logger.log("Checking if " + cur_name + " is actually a good release name", logger.DEBUG)
try:
parse_result = NameParser(False).parse(cur_name)
except (InvalidNameException, InvalidShowException):
continue
if parse_result.series_name and parse_result.season_number is not None and parse_result.episode_numbers and parse_result.release_group:
# if all is well by this point we'll just put the release name into the database
self.connection.action("UPDATE tv_episodes SET release_name = ? WHERE episode_id = ?",
[cur_name, ep_results[0][b"episode_id"]])
break
# check each snatch to see if we can use it to get a release name from
empty_results = self.connection.select("SELECT episode_id, location FROM tv_episodes WHERE release_name = ''")
logger.log("Adding release name to all episodes with obvious scene filenames")
for cur_result in empty_results:
ep_file_name = ek(os.path.basename, cur_result[b"location"])
ep_file_name = ek(os.path.splitext, ep_file_name)[0]
# only want to find real scene names here so anything with a space in it is out
if ' ' in ep_file_name:
continue
try:
parse_result = NameParser(False).parse(ep_file_name)
except (InvalidNameException, InvalidShowException):
continue
if not parse_result.release_group:
continue
logger.log(
"Name " + ep_file_name + " gave release group of " + parse_result.release_group + ", seems valid",
logger.DEBUG)
self.connection.action("UPDATE tv_episodes SET release_name = ? WHERE episode_id = ?",
[ep_file_name, cur_result[b"episode_id"]])
self.incDBVersion()
class RenameSeasonFolders(AddSizeAndSceneNameFields):
def test(self):
return self.checkDBVersion() >= 11
def execute(self):
backupDatabase(self.checkDBVersion())
# rename the column
self.connection.action("DROP TABLE IF EXISTS tmp_tv_shows")
self.connection.action("ALTER TABLE tv_shows RENAME TO tmp_tv_shows")
self.connection.action(
"CREATE TABLE tv_shows (show_id INTEGER PRIMARY KEY, location TEXT, show_name TEXT, tvdb_id NUMERIC, network TEXT, genre TEXT, runtime NUMERIC, quality NUMERIC, airs TEXT, status TEXT, flatten_folders NUMERIC, paused NUMERIC, startyear NUMERIC, tvr_id NUMERIC, tvr_name TEXT, air_by_date NUMERIC, lang TEXT)")
self.connection.action("INSERT INTO tv_shows SELECT * FROM tmp_tv_shows")
# flip the values to be opposite of what they were before
self.connection.action("UPDATE tv_shows SET flatten_folders = 2 WHERE flatten_folders = 1")
self.connection.action("UPDATE tv_shows SET flatten_folders = 1 WHERE flatten_folders = 0")
self.connection.action("UPDATE tv_shows SET flatten_folders = 0 WHERE flatten_folders = 2")
self.connection.action("DROP TABLE tmp_tv_shows")
self.incDBVersion()
class Add1080pAndRawHDQualities(RenameSeasonFolders):
"""Add support for 1080p related qualities along with RawHD
Quick overview of what the upgrade needs to do:
quality | old | new
--------------------------
hdwebdl | 1<<3 | 1<<5
hdbluray | 1<<4 | 1<<7
fullhdbluray | 1<<5 | 1<<8
--------------------------
rawhdtv | | 1<<3
fullhdtv | | 1<<4
fullhdwebdl | | 1<<6
"""
def test(self):
return self.checkDBVersion() >= 12
def _update_status(self, old_status):
(status, quality) = common.Quality.splitCompositeStatus(old_status)
return common.Quality.compositeStatus(status, self._update_quality(quality))
def _update_quality(self, old_quality):
"""Update bitwise flags to reflect new quality values
Check flag bits (clear old then set their new locations) starting
with the highest bits so we dont overwrite data we need later on
"""
result = old_quality
# move fullhdbluray from 1<<5 to 1<<8 if set
if result & (1 << 5):
result &= ~(1 << 5)
result |= 1 << 8
# move hdbluray from 1<<4 to 1<<7 if set
if result & (1 << 4):
result &= ~(1 << 4)
result |= 1 << 7
# move hdwebdl from 1<<3 to 1<<5 if set
if result & (1 << 3):
result &= ~(1 << 3)
result |= 1 << 5
return result
def _update_composite_qualities(self, status):
"""Unpack, Update, Return new quality values
Unpack the composite archive/initial values.
Update either qualities if needed.
Then return the new compsite quality value.
"""
best = (status & (0xffff << 16)) >> 16
initial = status & 0xffff
best = self._update_quality(best)
initial = self._update_quality(initial)
result = ((best << 16) | initial)
return result
def execute(self):
backupDatabase(self.checkDBVersion())
# update the default quality so we dont grab the wrong qualities after migration
sickbeard.QUALITY_DEFAULT = self._update_composite_qualities(sickbeard.QUALITY_DEFAULT)
sickbeard.save_config()
# upgrade previous HD to HD720p -- shift previous qualities to new placevalues
old_hd = common.Quality.combineQualities(
[common.Quality.HDTV, common.Quality.HDWEBDL >> 2, common.Quality.HDBLURAY >> 3], [])
new_hd = common.Quality.combineQualities([common.Quality.HDTV, common.Quality.HDWEBDL, common.Quality.HDBLURAY],
[])
# update ANY -- shift existing qualities and add new 1080p qualities, note that rawHD was not added to the ANY template
old_any = common.Quality.combineQualities(
[common.Quality.SDTV, common.Quality.SDDVD, common.Quality.HDTV, common.Quality.HDWEBDL >> 2,
common.Quality.HDBLURAY >> 3, common.Quality.UNKNOWN], [])
new_any = common.Quality.combineQualities(
[common.Quality.SDTV, common.Quality.SDDVD, common.Quality.HDTV, common.Quality.FULLHDTV,
common.Quality.HDWEBDL, common.Quality.FULLHDWEBDL, common.Quality.HDBLURAY, common.Quality.FULLHDBLURAY,
common.Quality.UNKNOWN], [])
# update qualities (including templates)
logger.log("[1/4] Updating pre-defined templates and the quality for each show...", logger.INFO)
cl = []
shows = self.connection.select("SELECT * FROM tv_shows")
for cur_show in shows:
if cur_show[b"quality"] == old_hd:
new_quality = new_hd
elif cur_show[b"quality"] == old_any:
new_quality = new_any
else:
new_quality = self._update_composite_qualities(cur_show[b"quality"])
cl.append(["UPDATE tv_shows SET quality = ? WHERE show_id = ?", [new_quality, cur_show[b"show_id"]]])
self.connection.mass_action(cl)
# update status that are are within the old hdwebdl (1<<3 which is 8) and better -- exclude unknown (1<<15 which is 32768)
logger.log("[2/4] Updating the status for the episodes within each show...", logger.INFO)
cl = []
episodes = self.connection.select("SELECT * FROM tv_episodes WHERE status < 3276800 AND status >= 800")
for cur_episode in episodes:
cl.append(["UPDATE tv_episodes SET status = ? WHERE episode_id = ?",
[self._update_status(cur_episode[b"status"]), cur_episode[b"episode_id"]]])
self.connection.mass_action(cl)
# make two seperate passes through the history since snatched and downloaded (action & quality) may not always coordinate together
# update previous history so it shows the correct action
logger.log("[3/4] Updating history to reflect the correct action...", logger.INFO)
cl = []
historyAction = self.connection.select("SELECT * FROM history WHERE action < 3276800 AND action >= 800")
for cur_entry in historyAction:
cl.append(["UPDATE history SET action = ? WHERE showid = ? AND date = ?",
[self._update_status(cur_entry[b"action"]), cur_entry[b"showid"], cur_entry[b"date"]]])
self.connection.mass_action(cl)
# update previous history so it shows the correct quality
logger.log("[4/4] Updating history to reflect the correct quality...", logger.INFO)
cl = []
historyQuality = self.connection.select("SELECT * FROM history WHERE quality < 32768 AND quality >= 8")
for cur_entry in historyQuality:
cl.append(["UPDATE history SET quality = ? WHERE showid = ? AND date = ?",
[self._update_quality(cur_entry[b"quality"]), cur_entry[b"showid"], cur_entry[b"date"]]])
self.connection.mass_action(cl)
self.incDBVersion()
# cleanup and reduce db if any previous data was removed
logger.log("Performing a vacuum on the database.", logger.DEBUG)
self.connection.action("VACUUM")
class AddShowidTvdbidIndex(Add1080pAndRawHDQualities):
""" Adding index on tvdb_id (tv_shows) and showid (tv_episodes) to speed up searches/queries """
def test(self):
return self.checkDBVersion() >= 13
def execute(self):
backupDatabase(self.checkDBVersion())
logger.log("Check for duplicate shows before adding unique index.")
MainSanityCheck(self.connection).fix_duplicate_shows(b'tvdb_id')
logger.log("Adding index on tvdb_id (tv_shows) and showid (tv_episodes) to speed up searches/queries.")
if not self.hasTable("idx_showid"):
self.connection.action("CREATE INDEX idx_showid ON tv_episodes (showid);")
if not self.hasTable("idx_tvdb_id"):
self.connection.action("CREATE UNIQUE INDEX idx_tvdb_id ON tv_shows (tvdb_id);")
self.incDBVersion()
class AddLastUpdateTVDB(AddShowidTvdbidIndex):
""" Adding column last_update_tvdb to tv_shows for controlling nightly updates """
def test(self):
return self.checkDBVersion() >= 14
def execute(self):
backupDatabase(self.checkDBVersion())
logger.log("Adding column last_update_tvdb to tvshows")
if not self.hasColumn("tv_shows", "last_update_tvdb"):
self.addColumn("tv_shows", "last_update_tvdb", default=1)
self.incDBVersion()
class AddDBIncreaseTo15(AddLastUpdateTVDB):
def test(self):
return self.checkDBVersion() >= 15
def execute(self):
backupDatabase(self.checkDBVersion())
self.incDBVersion()
class AddIMDbInfo(AddDBIncreaseTo15):
def test(self):
return self.checkDBVersion() >= 16
def execute(self):
backupDatabase(self.checkDBVersion())
self.connection.action(
"CREATE TABLE imdb_info (tvdb_id INTEGER PRIMARY KEY, imdb_id TEXT, title TEXT, year NUMERIC, akas TEXT, runtimes NUMERIC, genres TEXT, countries TEXT, country_codes TEXT, certificates TEXT, rating TEXT, votes INTEGER, last_update NUMERIC)")
if not self.hasColumn("tv_shows", "imdb_id"):
self.addColumn("tv_shows", "imdb_id")
self.incDBVersion()
class AddProperNamingSupport(AddIMDbInfo):
def test(self):
return self.checkDBVersion() >= 17
def execute(self):
backupDatabase(self.checkDBVersion())
self.addColumn("tv_episodes", "is_proper")
self.incDBVersion()
class AddEmailSubscriptionTable(AddProperNamingSupport):
def test(self):
return self.checkDBVersion() >= 18
def execute(self):
backupDatabase(self.checkDBVersion())
self.addColumn('tv_shows', 'notify_list', 'TEXT', None)
self.incDBVersion()
class AddProperSearch(AddEmailSubscriptionTable):
def test(self):
return self.checkDBVersion() >= 19
def execute(self):
backupDatabase(self.checkDBVersion())
logger.log("Adding column last_proper_search to info")
if not self.hasColumn("info", "last_proper_search"):
self.addColumn("info", "last_proper_search", default=1)
self.incDBVersion()
class AddDvdOrderOption(AddProperSearch):
def test(self):
return self.checkDBVersion() >= 20
def execute(self):
backupDatabase(self.checkDBVersion())
logger.log("Adding column dvdorder to tvshows")
if not self.hasColumn("tv_shows", "dvdorder"):
self.addColumn("tv_shows", "dvdorder", "NUMERIC", "0")
self.incDBVersion()
class AddSubtitlesSupport(AddDvdOrderOption):
def test(self):
return self.checkDBVersion() >= 21
def execute(self):
backupDatabase(self.checkDBVersion())
if not self.hasColumn("tv_shows", "subtitles"):
self.addColumn("tv_shows", "subtitles")
self.addColumn("tv_episodes", "subtitles", "TEXT", "")
self.addColumn("tv_episodes", "subtitles_searchcount")
self.addColumn("tv_episodes", "subtitles_lastsearch", "TIMESTAMP", str(datetime.datetime.min))
self.incDBVersion()
class ConvertTVShowsToIndexerScheme(AddSubtitlesSupport):
def test(self):
return self.checkDBVersion() >= 22
def execute(self):
backupDatabase(self.checkDBVersion())
logger.log("Converting TV Shows table to Indexer Scheme...")
self.connection.action("DROP TABLE IF EXISTS tmp_tv_shows")
self.connection.action("ALTER TABLE tv_shows RENAME TO tmp_tv_shows")
self.connection.action("CREATE TABLE tv_shows (show_id INTEGER PRIMARY KEY, indexer_id NUMERIC, indexer NUMERIC, show_name TEXT, location TEXT, network TEXT, genre TEXT, classification TEXT, runtime NUMERIC, quality NUMERIC, airs TEXT, status TEXT, flatten_folders NUMERIC, paused NUMERIC, startyear NUMERIC, air_by_date NUMERIC, lang TEXT, subtitles NUMERIC, notify_list TEXT, imdb_id TEXT, last_update_indexer NUMERIC, dvdorder NUMERIC)")
self.connection.action(
"INSERT INTO tv_shows (show_id, indexer_id, show_name, location, network, genre, runtime, quality, airs, status, flatten_folders, paused, startyear, air_by_date, lang, subtitles, dvdorder) " +
"SELECT show_id, tvdb_id as indexer_id, show_name, location, network, genre, runtime, quality, airs, status, flatten_folders, paused, startyear, air_by_date, lang, subtitles, dvdorder FROM tmp_tv_shows"
)
self.connection.action("DROP TABLE tmp_tv_shows")
self.connection.action("CREATE UNIQUE INDEX idx_indexer_id ON tv_shows (indexer_id);")
self.connection.action("UPDATE tv_shows SET classification = 'Scripted'")
self.connection.action("UPDATE tv_shows SET indexer = 1")
self.incDBVersion()
class ConvertTVEpisodesToIndexerScheme(ConvertTVShowsToIndexerScheme):
def test(self):
return self.checkDBVersion() >= 23
def execute(self):
backupDatabase(self.checkDBVersion())
logger.log("Converting TV Episodes table to Indexer Scheme...")
self.connection.action("DROP TABLE IF EXISTS tmp_tv_episodes")
self.connection.action("ALTER TABLE tv_episodes RENAME TO tmp_tv_episodes")
self.connection.action(
"CREATE TABLE tv_episodes (episode_id INTEGER PRIMARY KEY, showid NUMERIC, indexerid NUMERIC, indexer NUMERIC, name TEXT, season NUMERIC, episode NUMERIC, description TEXT, airdate NUMERIC, hasnfo NUMERIC, hastbn NUMERIC, status NUMERIC, location TEXT, file_size NUMERIC, release_name TEXT, subtitles TEXT, subtitles_searchcount NUMERIC, subtitles_lastsearch TIMESTAMP, is_proper NUMERIC)")
self.connection.action(
"INSERT INTO tv_episodes (episode_id, showid, indexerid, name, season, episode, description, airdate, hasnfo, hastbn, status, location, file_size, release_name, subtitles, subtitles_searchcount, subtitles_lastsearch) " +
"SELECT episode_id, showid, tvdbid as indexerid, name, season, episode, description, airdate, hasnfo, hastbn, status, location, file_size, release_name, subtitles, subtitles_searchcount, subtitles_lastsearch FROM tmp_tv_episodes"
)
self.connection.action("DROP TABLE tmp_tv_episodes")
self.connection.action("CREATE INDEX idx_tv_episodes_showid_airdate ON tv_episodes(showid,airdate);")
self.connection.action("CREATE INDEX idx_showid ON tv_episodes (showid);")
self.connection.action("CREATE INDEX idx_status ON tv_episodes (status,season,episode,airdate)")
self.connection.action("CREATE INDEX idx_sta_epi_air ON tv_episodes (status,episode, airdate)")
self.connection.action("CREATE INDEX idx_sta_epi_sta_air ON tv_episodes (season,episode, status, airdate)")
self.connection.action("UPDATE tv_episodes SET indexer = 1, is_proper = 0")
self.incDBVersion()
class ConvertIMDBInfoToIndexerScheme(ConvertTVEpisodesToIndexerScheme):
def test(self):
return self.checkDBVersion() >= 24
def execute(self):
backupDatabase(self.checkDBVersion())
logger.log("Converting IMDB Info table to Indexer Scheme...")
self.connection.action("DROP TABLE IF EXISTS tmp_imdb_info")
if self.hasTable("imdb_info"):
self.connection.action("ALTER TABLE imdb_info RENAME TO tmp_imdb_info")
self.connection.action(
"CREATE TABLE imdb_info (indexer_id INTEGER PRIMARY KEY, imdb_id TEXT, title TEXT, year NUMERIC, akas TEXT, runtimes NUMERIC, genres TEXT, countries TEXT, country_codes TEXT, certificates TEXT, rating TEXT, votes INTEGER, last_update NUMERIC)")
if self.hasTable("tmp_imdb_info"):
self.connection.action("INSERT INTO imdb_info SELECT * FROM tmp_imdb_info")
self.connection.action("DROP TABLE IF EXISTS tmp_imdb_info")
self.incDBVersion()
class ConvertInfoToIndexerScheme(ConvertIMDBInfoToIndexerScheme):
def test(self):
return self.checkDBVersion() >= 25
def execute(self):
backupDatabase(self.checkDBVersion())
logger.log("Converting Info table to Indexer Scheme...")
self.connection.action("DROP TABLE IF EXISTS tmp_info")
self.connection.action("ALTER TABLE info RENAME TO tmp_info")
self.connection.action(
"CREATE TABLE info (last_backlog NUMERIC, last_indexer NUMERIC, last_proper_search NUMERIC)")
self.connection.action(
"INSERT INTO info SELECT * FROM tmp_info")
self.connection.action("DROP TABLE tmp_info")
self.incDBVersion()
class AddArchiveFirstMatchOption(ConvertInfoToIndexerScheme):
def test(self):
return self.checkDBVersion() >= 26
def execute(self):
backupDatabase(self.checkDBVersion())
logger.log("Adding column archive_firstmatch to tvshows")
if not self.hasColumn("tv_shows", "archive_firstmatch"):
self.addColumn("tv_shows", "archive_firstmatch", "NUMERIC", "0")
self.incDBVersion()
class AddSceneNumbering(AddArchiveFirstMatchOption):
def test(self):
return self.checkDBVersion() >= 27
def execute(self):
backupDatabase(self.checkDBVersion())
if self.hasTable("scene_numbering"):
self.connection.action("DROP TABLE scene_numbering")
self.connection.action(
"CREATE TABLE scene_numbering (indexer TEXT, indexer_id INTEGER, season INTEGER, episode INTEGER, scene_season INTEGER, scene_episode INTEGER, PRIMARY KEY (indexer_id, season, episode, scene_season, scene_episode))")
self.incDBVersion()
class ConvertIndexerToInteger(AddSceneNumbering):
def test(self):
return self.checkDBVersion() >= 28
def execute(self):
backupDatabase(self.checkDBVersion())
cl = []
logger.log("Converting Indexer to Integer ...", logger.INFO)
cl.append(["UPDATE tv_shows SET indexer = ? WHERE LOWER(indexer) = ?", ["1", "tvdb"]])
cl.append(["UPDATE tv_shows SET indexer = ? WHERE LOWER(indexer) = ?", ["2", "tvrage"]])
cl.append(["UPDATE tv_episodes SET indexer = ? WHERE LOWER(indexer) = ?", ["1", "tvdb"]])
cl.append(["UPDATE tv_episodes SET indexer = ? WHERE LOWER(indexer) = ?", ["2", "tvrage"]])
cl.append(["UPDATE scene_numbering SET indexer = ? WHERE LOWER(indexer) = ?", ["1", "tvdb"]])
cl.append(["UPDATE scene_numbering SET indexer = ? WHERE LOWER(indexer) = ?", ["2", "tvrage"]])
self.connection.mass_action(cl)
self.incDBVersion()
class AddRequireAndIgnoreWords(ConvertIndexerToInteger):
""" Adding column rls_require_words and rls_ignore_words to tv_shows """
def test(self):
return self.checkDBVersion() >= 29
def execute(self):
backupDatabase(self.checkDBVersion())
logger.log("Adding column rls_require_words to tvshows")
if not self.hasColumn("tv_shows", "rls_require_words"):
self.addColumn("tv_shows", "rls_require_words", "TEXT", "")
logger.log("Adding column rls_ignore_words to tvshows")
if not self.hasColumn("tv_shows", "rls_ignore_words"):
self.addColumn("tv_shows", "rls_ignore_words", "TEXT", "")
self.incDBVersion()
class AddSportsOption(AddRequireAndIgnoreWords):
def test(self):
return self.checkDBVersion() >= 30
def execute(self):
backupDatabase(self.checkDBVersion())
logger.log("Adding column sports to tvshows")
if not self.hasColumn("tv_shows", "sports"):
self.addColumn("tv_shows", "sports", "NUMERIC", "0")
if self.hasColumn("tv_shows", "air_by_date") and self.hasColumn("tv_shows", "sports"):
# update sports column
logger.log("[4/4] Updating tv_shows to reflect the correct sports value...", logger.INFO)
cl = []
historyQuality = self.connection.select(
"SELECT * FROM tv_shows WHERE LOWER(classification) = 'sports' AND air_by_date = 1 AND sports = 0")
for cur_entry in historyQuality:
cl.append(["UPDATE tv_shows SET sports = ? WHERE show_id = ?",
[cur_entry[b"air_by_date"], cur_entry[b"show_id"]]])
cl.append(["UPDATE tv_shows SET air_by_date = 0 WHERE show_id = ?", [cur_entry[b"show_id"]]])
self.connection.mass_action(cl)
self.incDBVersion()
class AddSceneNumberingToTvEpisodes(AddSportsOption):
def test(self):
return self.checkDBVersion() >= 31
def execute(self):
backupDatabase(self.checkDBVersion())
logger.log("Adding column scene_season and scene_episode to tvepisodes")
self.addColumn("tv_episodes", "scene_season", "NUMERIC", "NULL")
self.addColumn("tv_episodes", "scene_episode", "NUMERIC", "NULL")
self.incDBVersion()
class AddAnimeTVShow(AddSceneNumberingToTvEpisodes):
def test(self):
return self.checkDBVersion() >= 32
def execute(self):
backupDatabase(self.checkDBVersion())
logger.log("Adding column anime to tv_episodes")
self.addColumn("tv_shows", "anime", "NUMERIC", "0")
self.incDBVersion()
class AddAbsoluteNumbering(AddAnimeTVShow):
def test(self):
return self.checkDBVersion() >= 33
def execute(self):
backupDatabase(self.checkDBVersion())
logger.log("Adding column absolute_number to tv_episodes")
self.addColumn("tv_episodes", "absolute_number", "NUMERIC", "0")
self.incDBVersion()
class AddSceneAbsoluteNumbering(AddAbsoluteNumbering):
def test(self):
return self.checkDBVersion() >= 34
def execute(self):
backupDatabase(self.checkDBVersion())
logger.log("Adding column absolute_number and scene_absolute_number to scene_numbering")
self.addColumn("scene_numbering", "absolute_number", "NUMERIC", "0")
self.addColumn("scene_numbering", "scene_absolute_number", "NUMERIC", "0")
self.incDBVersion()
class AddAnimeBlacklistWhitelist(AddSceneAbsoluteNumbering):
def test(self):
return self.checkDBVersion() >= 35
def execute(self):
backupDatabase(self.checkDBVersion())
cl = [
["CREATE TABLE blacklist (show_id INTEGER, range TEXT, keyword TEXT)"],
["CREATE TABLE whitelist (show_id INTEGER, range TEXT, keyword TEXT)"]
]
self.connection.mass_action(cl)
self.incDBVersion()
class AddSceneAbsoluteNumbering2(AddAnimeBlacklistWhitelist):
def test(self):
return self.checkDBVersion() >= 36
def execute(self):
backupDatabase(self.checkDBVersion())
logger.log("Adding column scene_absolute_number to tv_episodes")
self.addColumn("tv_episodes", "scene_absolute_number", "NUMERIC", "0")
self.incDBVersion()
class AddXemRefresh(AddSceneAbsoluteNumbering2):
def test(self):
return self.checkDBVersion() >= 37
def execute(self):
backupDatabase(self.checkDBVersion())
logger.log("Creating table xem_refresh")
self.connection.action(
"CREATE TABLE xem_refresh (indexer TEXT, indexer_id INTEGER PRIMARY KEY, last_refreshed INTEGER)")
self.incDBVersion()
class AddSceneToTvShows(AddXemRefresh):
def test(self):
return self.checkDBVersion() >= 38
def execute(self):
backupDatabase(self.checkDBVersion())
logger.log("Adding column scene to tv_shows")
self.addColumn("tv_shows", "scene", "NUMERIC", "0")
self.incDBVersion()
class AddIndexerMapping(AddSceneToTvShows):
def test(self):
return self.checkDBVersion() >= 39
def execute(self):
backupDatabase(self.checkDBVersion())
if self.hasTable("indexer_mapping"):
self.connection.action("DROP TABLE indexer_mapping")
logger.log("Adding table indexer_mapping")
self.connection.action(
"CREATE TABLE indexer_mapping (indexer_id INTEGER, indexer NUMERIC, mindexer_id INTEGER, mindexer NUMERIC, PRIMARY KEY (indexer_id, indexer))")
self.incDBVersion()
class AddVersionToTvEpisodes(AddIndexerMapping):
def test(self):
return self.checkDBVersion() >= 40
def execute(self):
backupDatabase(self.checkDBVersion())
logger.log("Adding column version to tv_episodes and history")
self.addColumn("tv_episodes", "version", "NUMERIC", "-1")
self.addColumn("tv_episodes", "release_group", "TEXT", "")
self.addColumn("history", "version", "NUMERIC", "-1")
self.incDBVersion()
class AddDefaultEpStatusToTvShows(AddVersionToTvEpisodes):
def test(self):
return self.checkDBVersion() >= 41
def execute(self):
backupDatabase(self.checkDBVersion())
logger.log("Adding column default_ep_status to tv_shows")
self.addColumn("tv_shows", "default_ep_status", "NUMERIC", "-1")
self.incDBVersion()
class AlterTVShowsFieldTypes(AddDefaultEpStatusToTvShows):
def test(self):
return self.checkDBVersion() >= 42
def execute(self):
backupDatabase(self.checkDBVersion())
logger.log("Converting column indexer and default_ep_status field types to numeric")
self.connection.action("DROP TABLE IF EXISTS tmp_tv_shows")
self.connection.action("ALTER TABLE tv_shows RENAME TO tmp_tv_shows")
self.connection.action("CREATE TABLE tv_shows (show_id INTEGER PRIMARY KEY, indexer_id NUMERIC, indexer NUMERIC, show_name TEXT, location TEXT, network TEXT, genre TEXT, classification TEXT, runtime NUMERIC, quality NUMERIC, airs TEXT, status TEXT, flatten_folders NUMERIC, paused NUMERIC, startyear NUMERIC, air_by_date NUMERIC, lang TEXT, subtitles NUMERIC, notify_list TEXT, imdb_id TEXT, last_update_indexer NUMERIC, dvdorder NUMERIC, archive_firstmatch NUMERIC, rls_require_words TEXT, rls_ignore_words TEXT, sports NUMERIC, anime NUMERIC, scene NUMERIC, default_ep_status NUMERIC)")
self.connection.action("INSERT INTO tv_shows SELECT * FROM tmp_tv_shows")
self.connection.action("DROP TABLE tmp_tv_shows")
self.incDBVersion()
class AddMinorVersion(AlterTVShowsFieldTypes):
def test(self):
return self.checkDBVersion() >= 42 and self.hasColumn(b'db_version', b'db_minor_version')
def incDBVersion(self):
warnings.warn("Deprecated: Use inc_major_version or inc_minor_version instead", DeprecationWarning)
def inc_major_version(self):
major_version, minor_version = self.connection.version
major_version += 1
minor_version = 0
self.connection.action("UPDATE db_version SET db_version = ?, db_minor_version = ?", [major_version, minor_version])
return self.connection.version
def inc_minor_version(self):
major_version, minor_version = self.connection.version
minor_version += 1
self.connection.action("UPDATE db_version SET db_version = ?, db_minor_version = ?", [major_version, minor_version])
return self.connection.version
def execute(self):
backupDatabase(self.checkDBVersion())
logger.log("Add minor version numbers to database")
self.addColumn(b'db_version', b'db_minor_version')
self.inc_minor_version()
logger.log('Updated to: {0:d}.{1:d}'.format(*self.connection.version))
class UseSickRageMetadataForSubtitle(AlterTVShowsFieldTypes):
"""
Add a minor version for adding a show setting to use SR metadata for subtitles
"""
def test(self):
return self.hasColumn('tv_shows', 'sub_use_sr_metadata')
def execute(self):
backupDatabase(self.checkDBVersion())
self.addColumn('tv_shows', 'sub_use_sr_metadata', "NUMERIC", "0")
class ResetDBVersion(UseSickRageMetadataForSubtitle):
def test(self):
return False
def execute(self):
self.connection.action("UPDATE db_version SET db_version = ?, db_minor_version = ?", [MAX_DB_VERSION, 0])
|
yaojingwu1992/XlsxWriter | refs/heads/master | xlsxwriter/test/comparison/test_data_validation05.py | 8 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'data_validation02.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_5' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of an XlsxWriter file data validation."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.data_validation(
'C2', {'validate': 'list',
'value': ['Foo', 'Bar', 'Baz'],
'input_title': 'This is the input title',
'input_message': 'This is the input message',
}
)
# The following should be rejected because the input message is too long.
input_title = 'This is the longest input title1'
input_message = 'This is the longest input message ' + ('a' * 222)
values = [
"Foobar", "Foobas", "Foobat", "Foobau", "Foobav", "Foobaw",
"Foobax", "Foobay", "Foobaz", "Foobba", "Foobbb", "Foobbc",
"Foobbd", "Foobbe", "Foobbf", "Foobbg", "Foobbh", "Foobbi",
"Foobbj", "Foobbk", "Foobbl", "Foobbm", "Foobbn", "Foobbo",
"Foobbp", "Foobbq", "Foobbr", "Foobbs", "Foobbt", "Foobbu",
"Foobbv", "Foobbw", "Foobbx", "Foobby", "Foobbz", "Foobca",
"End"
]
# Ignore the warnings raised by data_validation().
import warnings
warnings.filterwarnings('ignore')
worksheet.data_validation(
'D6', {'validate': 'list',
'value': values,
'input_title': input_title,
'input_message': input_message,
}
)
workbook.close()
self.assertExcelEqual()
|
mdworks2016/work_development | refs/heads/master | Python/20_Third_Certification/venv/lib/python3.7/site-packages/pip/_vendor/idna/core.py | 12 | from . import idnadata
import bisect
import unicodedata
import re
import sys
from .intranges import intranges_contain
_virama_combining_class = 9
_alabel_prefix = b'xn--'
_unicode_dots_re = re.compile(u'[\u002e\u3002\uff0e\uff61]')
if sys.version_info[0] >= 3:
unicode = str
unichr = chr
class IDNAError(UnicodeError):
""" Base exception for all IDNA-encoding related problems """
pass
class IDNABidiError(IDNAError):
""" Exception when bidirectional requirements are not satisfied """
pass
class InvalidCodepoint(IDNAError):
""" Exception when a disallowed or unallocated codepoint is used """
pass
class InvalidCodepointContext(IDNAError):
""" Exception when the codepoint is not valid in the context it is used """
pass
def _combining_class(cp):
v = unicodedata.combining(unichr(cp))
if v == 0:
if not unicodedata.name(unichr(cp)):
raise ValueError("Unknown character in unicodedata")
return v
def _is_script(cp, script):
return intranges_contain(ord(cp), idnadata.scripts[script])
def _punycode(s):
return s.encode('punycode')
def _unot(s):
return 'U+{0:04X}'.format(s)
def valid_label_length(label):
if len(label) > 63:
return False
return True
def valid_string_length(label, trailing_dot):
if len(label) > (254 if trailing_dot else 253):
return False
return True
def check_bidi(label, check_ltr=False):
# Bidi rules should only be applied if string contains RTL characters
bidi_label = False
for (idx, cp) in enumerate(label, 1):
direction = unicodedata.bidirectional(cp)
if direction == '':
# String likely comes from a newer version of Unicode
raise IDNABidiError('Unknown directionality in label {0} at position {1}'.format(repr(label), idx))
if direction in ['R', 'AL', 'AN']:
bidi_label = True
if not bidi_label and not check_ltr:
return True
# Bidi rule 1
direction = unicodedata.bidirectional(label[0])
if direction in ['R', 'AL']:
rtl = True
elif direction == 'L':
rtl = False
else:
raise IDNABidiError('First codepoint in label {0} must be directionality L, R or AL'.format(repr(label)))
valid_ending = False
number_type = False
for (idx, cp) in enumerate(label, 1):
direction = unicodedata.bidirectional(cp)
if rtl:
# Bidi rule 2
if not direction in ['R', 'AL', 'AN', 'EN', 'ES', 'CS', 'ET', 'ON', 'BN', 'NSM']:
raise IDNABidiError('Invalid direction for codepoint at position {0} in a right-to-left label'.format(idx))
# Bidi rule 3
if direction in ['R', 'AL', 'EN', 'AN']:
valid_ending = True
elif direction != 'NSM':
valid_ending = False
# Bidi rule 4
if direction in ['AN', 'EN']:
if not number_type:
number_type = direction
else:
if number_type != direction:
raise IDNABidiError('Can not mix numeral types in a right-to-left label')
else:
# Bidi rule 5
if not direction in ['L', 'EN', 'ES', 'CS', 'ET', 'ON', 'BN', 'NSM']:
raise IDNABidiError('Invalid direction for codepoint at position {0} in a left-to-right label'.format(idx))
# Bidi rule 6
if direction in ['L', 'EN']:
valid_ending = True
elif direction != 'NSM':
valid_ending = False
if not valid_ending:
raise IDNABidiError('Label ends with illegal codepoint directionality')
return True
def check_initial_combiner(label):
if unicodedata.category(label[0])[0] == 'M':
raise IDNAError('Label begins with an illegal combining character')
return True
def check_hyphen_ok(label):
if label[2:4] == '--':
raise IDNAError('Label has disallowed hyphens in 3rd and 4th position')
if label[0] == '-' or label[-1] == '-':
raise IDNAError('Label must not start or end with a hyphen')
return True
def check_nfc(label):
if unicodedata.normalize('NFC', label) != label:
raise IDNAError('Label must be in Normalization Form C')
def valid_contextj(label, pos):
cp_value = ord(label[pos])
if cp_value == 0x200c:
if pos > 0:
if _combining_class(ord(label[pos - 1])) == _virama_combining_class:
return True
ok = False
for i in range(pos-1, -1, -1):
joining_type = idnadata.joining_types.get(ord(label[i]))
if joining_type == ord('T'):
continue
if joining_type in [ord('L'), ord('D')]:
ok = True
break
if not ok:
return False
ok = False
for i in range(pos+1, len(label)):
joining_type = idnadata.joining_types.get(ord(label[i]))
if joining_type == ord('T'):
continue
if joining_type in [ord('R'), ord('D')]:
ok = True
break
return ok
if cp_value == 0x200d:
if pos > 0:
if _combining_class(ord(label[pos - 1])) == _virama_combining_class:
return True
return False
else:
return False
def valid_contexto(label, pos, exception=False):
cp_value = ord(label[pos])
if cp_value == 0x00b7:
if 0 < pos < len(label)-1:
if ord(label[pos - 1]) == 0x006c and ord(label[pos + 1]) == 0x006c:
return True
return False
elif cp_value == 0x0375:
if pos < len(label)-1 and len(label) > 1:
return _is_script(label[pos + 1], 'Greek')
return False
elif cp_value == 0x05f3 or cp_value == 0x05f4:
if pos > 0:
return _is_script(label[pos - 1], 'Hebrew')
return False
elif cp_value == 0x30fb:
for cp in label:
if cp == u'\u30fb':
continue
if _is_script(cp, 'Hiragana') or _is_script(cp, 'Katakana') or _is_script(cp, 'Han'):
return True
return False
elif 0x660 <= cp_value <= 0x669:
for cp in label:
if 0x6f0 <= ord(cp) <= 0x06f9:
return False
return True
elif 0x6f0 <= cp_value <= 0x6f9:
for cp in label:
if 0x660 <= ord(cp) <= 0x0669:
return False
return True
def check_label(label):
if isinstance(label, (bytes, bytearray)):
label = label.decode('utf-8')
if len(label) == 0:
raise IDNAError('Empty Label')
check_nfc(label)
check_hyphen_ok(label)
check_initial_combiner(label)
for (pos, cp) in enumerate(label):
cp_value = ord(cp)
if intranges_contain(cp_value, idnadata.codepoint_classes['PVALID']):
continue
elif intranges_contain(cp_value, idnadata.codepoint_classes['CONTEXTJ']):
try:
if not valid_contextj(label, pos):
raise InvalidCodepointContext('Joiner {0} not allowed at position {1} in {2}'.format(
_unot(cp_value), pos+1, repr(label)))
except ValueError:
raise IDNAError('Unknown codepoint adjacent to joiner {0} at position {1} in {2}'.format(
_unot(cp_value), pos+1, repr(label)))
elif intranges_contain(cp_value, idnadata.codepoint_classes['CONTEXTO']):
if not valid_contexto(label, pos):
raise InvalidCodepointContext('Codepoint {0} not allowed at position {1} in {2}'.format(_unot(cp_value), pos+1, repr(label)))
else:
raise InvalidCodepoint('Codepoint {0} at position {1} of {2} not allowed'.format(_unot(cp_value), pos+1, repr(label)))
check_bidi(label)
def alabel(label):
try:
label = label.encode('ascii')
ulabel(label)
if not valid_label_length(label):
raise IDNAError('Label too long')
return label
except UnicodeEncodeError:
pass
if not label:
raise IDNAError('No Input')
label = unicode(label)
check_label(label)
label = _punycode(label)
label = _alabel_prefix + label
if not valid_label_length(label):
raise IDNAError('Label too long')
return label
def ulabel(label):
if not isinstance(label, (bytes, bytearray)):
try:
label = label.encode('ascii')
except UnicodeEncodeError:
check_label(label)
return label
label = label.lower()
if label.startswith(_alabel_prefix):
label = label[len(_alabel_prefix):]
if label.decode('ascii')[-1] == '-':
raise IDNAError('A-label must not end with a hyphen')
else:
check_label(label)
return label.decode('ascii')
label = label.decode('punycode')
check_label(label)
return label
def uts46_remap(domain, std3_rules=True, transitional=False):
"""Re-map the characters in the string according to UTS46 processing."""
from .uts46data import uts46data
output = u""
try:
for pos, char in enumerate(domain):
code_point = ord(char)
uts46row = uts46data[code_point if code_point < 256 else
bisect.bisect_left(uts46data, (code_point, "Z")) - 1]
status = uts46row[1]
replacement = uts46row[2] if len(uts46row) == 3 else None
if (status == "V" or
(status == "D" and not transitional) or
(status == "3" and not std3_rules and replacement is None)):
output += char
elif replacement is not None and (status == "M" or
(status == "3" and not std3_rules) or
(status == "D" and transitional)):
output += replacement
elif status != "I":
raise IndexError()
return unicodedata.normalize("NFC", output)
except IndexError:
raise InvalidCodepoint(
"Codepoint {0} not allowed at position {1} in {2}".format(
_unot(code_point), pos + 1, repr(domain)))
def encode(s, strict=False, uts46=False, std3_rules=False, transitional=False):
if isinstance(s, (bytes, bytearray)):
s = s.decode("ascii")
if uts46:
s = uts46_remap(s, std3_rules, transitional)
trailing_dot = False
result = []
if strict:
labels = s.split('.')
else:
labels = _unicode_dots_re.split(s)
if not labels or labels == ['']:
raise IDNAError('Empty domain')
if labels[-1] == '':
del labels[-1]
trailing_dot = True
for label in labels:
s = alabel(label)
if s:
result.append(s)
else:
raise IDNAError('Empty label')
if trailing_dot:
result.append(b'')
s = b'.'.join(result)
if not valid_string_length(s, trailing_dot):
raise IDNAError('Domain too long')
return s
def decode(s, strict=False, uts46=False, std3_rules=False):
if isinstance(s, (bytes, bytearray)):
s = s.decode("ascii")
if uts46:
s = uts46_remap(s, std3_rules, False)
trailing_dot = False
result = []
if not strict:
labels = _unicode_dots_re.split(s)
else:
labels = s.split(u'.')
if not labels or labels == ['']:
raise IDNAError('Empty domain')
if not labels[-1]:
del labels[-1]
trailing_dot = True
for label in labels:
s = ulabel(label)
if s:
result.append(s)
else:
raise IDNAError('Empty label')
if trailing_dot:
result.append(u'')
return u'.'.join(result)
|
smishenk/blink-crosswalk | refs/heads/master | Tools/Scripts/webkitpy/common/system/systemhost_mock.py | 46 | # Copyright (c) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from StringIO import StringIO
from webkitpy.common.system.environment import Environment
from webkitpy.common.system.executive_mock import MockExecutive
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.platforminfo_mock import MockPlatformInfo
from webkitpy.common.system.user_mock import MockUser
from webkitpy.common.system.workspace_mock import MockWorkspace
class MockSystemHost(object):
def __init__(self, log_executive=False, executive_throws_when_run=None, os_name=None, os_version=None, executive=None, filesystem=None):
self.executable = 'python'
self.executive = executive or MockExecutive(should_log=log_executive, should_throw_when_run=executive_throws_when_run)
self.filesystem = filesystem or MockFileSystem()
self.user = MockUser()
self.platform = MockPlatformInfo()
if os_name:
self.platform.os_name = os_name
if os_version:
self.platform.os_version = os_version
# FIXME: Should this take pointers to the filesystem and the executive?
self.workspace = MockWorkspace()
self.stdin = StringIO()
self.stdout = StringIO()
self.stderr = StringIO()
def copy_current_environment(self):
return Environment({"MOCK_ENVIRON_COPY": '1'})
def print_(self, *args, **kwargs):
sep = kwargs.get('sep', ' ')
end = kwargs.get('end', '\n')
stream = kwargs.get('stream', self.stdout)
stream.write(sep.join([str(arg) for arg in args]) + end)
|
NetDBNCKU/GAE-Conference-Web-App | refs/heads/master | django/conf/locale/ro/__init__.py | 12133432 | |
ArcherSys/ArcherSys | refs/heads/master | Lib/site-packages/cms/tests/apphooks.py | 6 | # -*- coding: utf-8 -*-
from __future__ import with_statement
import sys
from django.core.urlresolvers import clear_url_caches, reverse
from django.utils import six
from django.utils.timezone import now
from cms.api import create_page, create_title
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from cms.appresolver import applications_page_check, clear_app_resolvers, get_app_patterns
from cms.cms_toolbar import PlaceholderToolbar
from cms.models import Title
from cms.test_utils.project.placeholderapp.models import Example1
from cms.test_utils.testcases import CMSTestCase, SettingsOverrideTestCase
from cms.test_utils.util.context_managers import SettingsOverride
from cms.tests.menu_utils import DumbPageLanguageUrl
from cms.toolbar.toolbar import CMSToolbar
from cms.utils.compat.dj import get_user_model
from cms.utils.conf import get_cms_setting
from cms.utils.i18n import force_language
from menus.utils import DefaultLanguageChanger
APP_NAME = 'SampleApp'
NS_APP_NAME = 'NamespacedApp'
APP_MODULE = "cms.test_utils.project.sampleapp.cms_app"
class ApphooksTestCase(CMSTestCase):
def setUp(self):
clear_app_resolvers()
clear_url_caches()
if APP_MODULE in sys.modules:
del sys.modules[APP_MODULE]
self.reload_urls()
apphook_pool.clear()
def tearDown(self):
clear_app_resolvers()
clear_url_caches()
if APP_MODULE in sys.modules:
del sys.modules[APP_MODULE]
self.reload_urls()
apphook_pool.clear()
def reload_urls(self):
from django.conf import settings
url_modules = [
'cms.urls',
# TODO: Add here intermediary modules which may
# include() the 'cms.urls' if it isn't included
# directly in the root urlconf.
# '...',
'cms.test_utils.project.second_cms_urls_for_apphook_tests',
'cms.test_utils.project.urls_for_apphook_tests',
settings.ROOT_URLCONF,
]
clear_app_resolvers()
clear_url_caches()
for module in url_modules:
if module in sys.modules:
del sys.modules[module]
def create_base_structure(self, apphook, title_langs, namespace=None):
apphook_pool.clear()
superuser = get_user_model().objects.create_superuser('admin', 'admin@admin.com', 'admin')
self.superuser = superuser
page = create_page("home", "nav_playground.html", "en",
created_by=superuser, published=True)
create_title('de', page.get_title(), page)
page.publish('de')
child_page = create_page("child_page", "nav_playground.html", "en",
created_by=superuser, published=True, parent=page)
create_title('de', child_page.get_title(), child_page)
child_page.publish('de')
child_child_page = create_page("child_child_page", "nav_playground.html",
"en", created_by=superuser, published=True, parent=child_page, apphook=apphook,
apphook_namespace=namespace)
create_title("de", child_child_page.get_title(), child_child_page)
child_child_page.publish('de')
# publisher_public is set to draft on publish, issue with onetoone reverse
child_child_page = self.reload(child_child_page)
if isinstance(title_langs, six.string_types):
titles = child_child_page.publisher_public.get_title_obj(title_langs)
else:
titles = [child_child_page.publisher_public.get_title_obj(l) for l in title_langs]
self.reload_urls()
return titles
def test_explicit_apphooks(self):
"""
Test explicit apphook loading with the CMS_APPHOOKS setting.
"""
apphooks = (
'%s.%s' % (APP_MODULE, APP_NAME),
)
with SettingsOverride(CMS_APPHOOKS=apphooks):
apphook_pool.clear()
hooks = apphook_pool.get_apphooks()
app_names = [hook[0] for hook in hooks]
self.assertEqual(len(hooks), 1)
self.assertEqual(app_names, [APP_NAME])
apphook_pool.clear()
def test_implicit_apphooks(self):
"""
Test implicit apphook loading with INSTALLED_APPS cms_app.py
"""
apps = ['cms.test_utils.project.sampleapp']
with SettingsOverride(INSTALLED_APPS=apps, ROOT_URLCONF='cms.test_utils.project.urls_for_apphook_tests'):
apphook_pool.clear()
hooks = apphook_pool.get_apphooks()
app_names = [hook[0] for hook in hooks]
self.assertEqual(len(hooks), 4)
self.assertIn(NS_APP_NAME, app_names)
self.assertIn(APP_NAME, app_names)
apphook_pool.clear()
def test_apphook_on_root(self):
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.urls_for_apphook_tests'):
apphook_pool.clear()
superuser = get_user_model().objects.create_superuser('admin', 'admin@admin.com', 'admin')
page = create_page("apphooked-page", "nav_playground.html", "en",
created_by=superuser, published=True, apphook="SampleApp")
blank_page = create_page("not-apphooked-page", "nav_playground.html", "en",
created_by=superuser, published=True, apphook="", slug='blankapp')
english_title = page.title_set.all()[0]
self.assertEqual(english_title.language, 'en')
create_title("de", "aphooked-page-de", page)
self.assertTrue(page.publish('en'))
self.assertTrue(page.publish('de'))
self.assertTrue(blank_page.publish('en'))
with force_language("en"):
response = self.client.get(self.get_pages_root())
self.assertTemplateUsed(response, 'sampleapp/home.html')
self.assertContains(response, '<--noplaceholder-->')
response = self.client.get('/en/blankapp/')
self.assertTemplateUsed(response, 'nav_playground.html')
apphook_pool.clear()
def test_apphook_on_root_reverse(self):
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.urls_for_apphook_tests'):
apphook_pool.clear()
superuser = get_user_model().objects.create_superuser('admin', 'admin@admin.com', 'admin')
page = create_page("apphooked-page", "nav_playground.html", "en",
created_by=superuser, published=True, apphook="SampleApp")
create_title("de", "aphooked-page-de", page)
self.assertTrue(page.publish('de'))
self.assertTrue(page.publish('en'))
self.reload_urls()
self.assertFalse(reverse('sample-settings').startswith('//'))
apphook_pool.clear()
def test_get_page_for_apphook(self):
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests'):
en_title, de_title = self.create_base_structure(APP_NAME, ['en', 'de'])
with force_language("en"):
path = reverse('sample-settings')
request = self.get_request(path)
request.LANGUAGE_CODE = 'en'
attached_to_page = applications_page_check(request, path=path[1:]) # strip leading slash
self.assertEqual(attached_to_page.pk, en_title.page.pk)
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/home.html')
self.assertContains(response, en_title.title)
with force_language("de"):
path = reverse('sample-settings')
request = self.get_request(path)
request.LANGUAGE_CODE = 'de'
attached_to_page = applications_page_check(request, path=path[1:]) # strip leading slash and language prefix
self.assertEqual(attached_to_page.pk, de_title.page.pk)
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/home.html')
self.assertContains(response, de_title.title)
apphook_pool.clear()
def test_apphook_permissions(self):
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests'):
en_title, de_title = self.create_base_structure(APP_NAME, ['en', 'de'])
with force_language("en"):
path = reverse('sample-settings')
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
page = en_title.page.publisher_public
page.login_required = True
page.save()
page.publish('en')
response = self.client.get(path)
self.assertEqual(response.status_code, 302)
apphook_pool.clear()
def test_apphooks_with_excluded_permissions(self):
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests'):
en_title = self.create_base_structure('SampleAppWithExcludedPermissions', 'en')
with force_language("en"):
excluded_path = reverse('excluded:example')
not_excluded_path = reverse('not_excluded:example')
page = en_title.page.publisher_public
page.login_required = True
page.save()
page.publish('en')
excluded_response = self.client.get(excluded_path)
not_excluded_response = self.client.get(not_excluded_path)
self.assertEqual(excluded_response.status_code, 200)
self.assertEqual(not_excluded_response.status_code, 302)
apphook_pool.clear()
def test_get_page_for_apphook_on_preview_or_edit(self):
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.urls_3'):
if get_user_model().USERNAME_FIELD == 'email':
superuser = get_user_model().objects.create_superuser('admin', 'admin@admin.com', 'admin@admin.com')
else:
superuser = get_user_model().objects.create_superuser('admin', 'admin@admin.com', 'admin')
page = create_page("home", "nav_playground.html", "en",
created_by=superuser, published=True, apphook=APP_NAME)
create_title('de', page.get_title(), page)
page.publish('en')
page.publish('de')
page.save()
public_page = page.get_public_object()
with self.login_user_context(superuser):
with force_language("en"):
path = reverse('sample-settings')
request = self.get_request(path + '?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
request.LANGUAGE_CODE = 'en'
attached_to_page = applications_page_check(request, path=path[1:]) # strip leading slash
response = self.client.get(path+"?edit")
self.assertContains(response, '?redirect=')
with force_language("de"):
path = reverse('sample-settings')
request = self.get_request(path + '?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
request.LANGUAGE_CODE = 'de'
attached_to_page = applications_page_check(request, path=path[1:]) # strip leading slash
self.assertEqual(attached_to_page.pk, public_page.pk)
def test_get_root_page_for_apphook_with_instance_namespace(self):
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests'):
en_title = self.create_base_structure(NS_APP_NAME, 'en', 'instance_ns')
self.reload_urls()
with force_language("en"):
reverse("example_app:example")
reverse("example1:example")
reverse("example2:example")
path = reverse('namespaced_app_ns:sample-root')
path_instance = reverse('instance_ns:sample-root')
self.assertEqual(path, path_instance)
request = self.get_request(path)
request.LANGUAGE_CODE = 'en'
attached_to_page = applications_page_check(request, path=path[1:]) # strip leading slash
self.assertEqual(attached_to_page.pk, en_title.page.pk)
apphook_pool.clear()
def test_get_child_page_for_apphook_with_instance_namespace(self):
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests'):
en_title = self.create_base_structure(NS_APP_NAME, 'en', 'instance_ns')
with force_language("en"):
path = reverse('namespaced_app_ns:sample-settings')
path_instance1 = reverse('instance_ns:sample-settings')
path_instance2 = reverse('namespaced_app_ns:sample-settings', current_app='instance_ns')
self.assertEqual(path, path_instance1)
self.assertEqual(path, path_instance2)
request = self.get_request(path)
request.LANGUAGE_CODE = 'en'
attached_to_page = applications_page_check(request, path=path[1:]) # strip leading slash
self.assertEqual(attached_to_page.pk, en_title.page_id)
apphook_pool.clear()
def test_get_sub_page_for_apphook_with_implicit_current_app(self):
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests'):
en_title = self.create_base_structure(NS_APP_NAME, 'en', 'namespaced_app_ns')
with force_language("en"):
path = reverse('namespaced_app_ns:current-app')
request = self.get_request(path)
request.LANGUAGE_CODE = 'en'
attached_to_page = applications_page_check(request, path=path[1:]) # strip leading slash
self.assertEqual(attached_to_page.pk, en_title.page.pk)
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/app.html')
self.assertContains(response, 'namespaced_app_ns')
self.assertContains(response, path)
apphook_pool.clear()
def test_default_language_changer_with_implicit_current_app(self):
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests'):
titles = self.create_base_structure(NS_APP_NAME, ['en', 'de'], 'namespaced_app_ns') # nopyflakes
self.reload_urls()
with force_language("en"):
path = reverse('namespaced_app_ns:translated-url')
request = self.get_request(path)
request.LANGUAGE_CODE = 'en'
url = DefaultLanguageChanger(request)('en')
self.assertEqual(url, path)
url = DefaultLanguageChanger(request)('de')
self.assertEqual(url, '/de%s' % path[3:].replace('/page', '/Seite'))
apphook_pool.clear()
def test_get_i18n_apphook_with_explicit_current_app(self):
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests'):
titles = self.create_base_structure(NS_APP_NAME, ['en', 'de'], 'instance_1')
public_de_title = titles[1]
de_title = Title.objects.get(page=public_de_title.page.publisher_draft, language="de")
de_title.slug = "de"
de_title.save()
de_title.page.publish('de')
page2 = create_page("page2", "nav_playground.html",
"en", created_by=self.superuser, published=True, parent=de_title.page.parent,
apphook=NS_APP_NAME,
apphook_namespace="instance_2")
create_title("de", "de_title", page2, slug="slug")
page2.publish('de')
clear_app_resolvers()
clear_url_caches()
if APP_MODULE in sys.modules:
del sys.modules[APP_MODULE]
self.reload_urls()
with force_language("de"):
reverse('namespaced_app_ns:current-app', current_app="instance_1")
reverse('namespaced_app_ns:current-app', current_app="instance_2")
reverse('namespaced_app_ns:current-app')
with force_language("en"):
reverse('namespaced_app_ns:current-app', current_app="instance_1")
reverse('namespaced_app_ns:current-app', current_app="instance_2")
reverse('namespaced_app_ns:current-app')
def test_apphook_include_extra_parameters(self):
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests'):
self.create_base_structure(NS_APP_NAME, ['en', 'de'], 'instance_1')
with force_language("en"):
path = reverse('namespaced_app_ns:extra_second')
request = self.get_request(path)
request.LANGUAGE_CODE = 'en'
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/extra.html')
self.assertContains(response, 'someopts')
def test_get_sub_page_for_apphook_with_explicit_current_app(self):
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests'):
en_title = self.create_base_structure(NS_APP_NAME, 'en', 'instance_ns')
with force_language("en"):
path = reverse('namespaced_app_ns:current-app')
request = self.get_request(path)
request.LANGUAGE_CODE = 'en'
attached_to_page = applications_page_check(request, path=path[1:]) # strip leading slash
self.assertEqual(attached_to_page.pk, en_title.page.pk)
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/app.html')
self.assertContains(response, 'instance_ns')
self.assertContains(response, path)
apphook_pool.clear()
def test_include_urlconf(self):
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests'):
self.create_base_structure(APP_NAME, 'en')
path = reverse('extra_second')
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/extra.html')
self.assertContains(response, "test included urlconf")
path = reverse('extra_first')
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/extra.html')
self.assertContains(response, "test urlconf")
with force_language("de"):
path = reverse('extra_first')
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/extra.html')
self.assertContains(response, "test urlconf")
with force_language("de"):
path = reverse('extra_second')
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/extra.html')
self.assertContains(response, "test included urlconf")
apphook_pool.clear()
def test_apphook_breaking_under_home_with_new_path_caching(self):
with SettingsOverride(CMS_PERMISSION=False, ROOT_URLCONF='cms.test_utils.project.urls_2'):
home = create_page("home", "nav_playground.html", "en", published=True)
child = create_page("child", "nav_playground.html", "en", published=True, parent=home)
# not-home is what breaks stuff, because it contains the slug of the home page
not_home = create_page("not-home", "nav_playground.html", "en", published=True, parent=child)
create_page("subchild", "nav_playground.html", "en", published=True, parent=not_home, apphook='SampleApp')
with force_language("en"):
self.reload_urls()
urlpatterns = get_app_patterns()
resolver = urlpatterns[0]
url = resolver.reverse('sample-root')
self.assertEqual(url, 'child/not-home/subchild/')
def test_apphook_urlpattern_order(self):
# this one includes the actual cms.urls, so it can be tested if
# they are loaded in the correct order (the cms page pattern must be last)
# (the other testcases replicate the inclusion code and thus don't test this)
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.urls'):
self.create_base_structure(APP_NAME, 'en')
path = reverse('extra_second')
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/extra.html')
self.assertContains(response, "test included urlconf")
def test_apphooks_receive_url_params(self):
# make sure that urlparams actually reach the apphook views
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.urls'):
self.create_base_structure(APP_NAME, 'en')
path = reverse('sample-params', kwargs=dict(my_params='is-my-param-really-in-the-context-QUESTIONMARK'))
response = self.client.get(path)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'sampleapp/home.html')
self.assertContains(response, 'my_params: is-my-param-really-in-the-context-QUESTIONMARK')
def test_multiple_apphooks(self):
# test for #1538
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.third_urls_for_apphook_tests'):
apphook_pool.clear()
superuser = get_user_model().objects.create_superuser('admin', 'admin@admin.com', 'admin')
create_page("home", "nav_playground.html", "en", created_by=superuser, published=True, )
create_page("apphook1-page", "nav_playground.html", "en",
created_by=superuser, published=True, apphook="SampleApp")
create_page("apphook2-page", "nav_playground.html", "en",
created_by=superuser, published=True, apphook="SampleApp2")
reverse('sample-root')
reverse('sample2-root')
apphook_pool.clear()
def test_apphook_pool_register_returns_apphook(self):
@apphook_pool.register
class TestApp(CMSApp):
name = "Test App"
self.assertIsNotNone(TestApp)
# Now test the quick return codepath, when apphooks is not empty
apphook_pool.apphooks.append("foo")
@apphook_pool.register
class TestApp2(CMSApp):
name = "Test App 2"
self.assertIsNotNone(TestApp2)
def test_toolbar_current_app_namespace(self):
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests'):
en_title = self.create_base_structure(NS_APP_NAME, 'en', 'instance_ns') # nopyflakes
with force_language("en"):
path = reverse('namespaced_app_ns:sample-settings')
request = self.get_request(path)
toolbar = CMSToolbar(request)
self.assertTrue(toolbar.toolbars['cms.test_utils.project.sampleapp.cms_toolbar.CategoryToolbar'].is_current_app)
self.assertFalse(toolbar.toolbars['cms.test_utils.project.extensionapp.cms_toolbar.MyTitleExtensionToolbar'].is_current_app)
# Testing a decorated view
with force_language("en"):
path = reverse('namespaced_app_ns:sample-exempt')
request = self.get_request(path)
toolbar = CMSToolbar(request)
self.assertEqual(toolbar.toolbars['cms.test_utils.project.sampleapp.cms_toolbar.CategoryToolbar'].app_path,
'cms.test_utils.project.sampleapp')
self.assertTrue(toolbar.toolbars['cms.test_utils.project.sampleapp.cms_toolbar.CategoryToolbar'].is_current_app)
self.assertEqual(toolbar.toolbars['cms.test_utils.project.extensionapp.cms_toolbar.MyTitleExtensionToolbar'].app_path,
'cms.test_utils.project.sampleapp')
self.assertFalse(toolbar.toolbars['cms.test_utils.project.extensionapp.cms_toolbar.MyTitleExtensionToolbar'].is_current_app)
def test_toolbar_current_app_apphook_with_implicit_current_app(self):
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.second_urls_for_apphook_tests'):
en_title = self.create_base_structure(NS_APP_NAME, 'en', 'namespaced_app_ns') # nopyflakes
with force_language("en"):
path = reverse('namespaced_app_ns:current-app')
request = self.get_request(path)
toolbar = CMSToolbar(request)
self.assertEqual(toolbar.toolbars['cms.test_utils.project.sampleapp.cms_toolbar.CategoryToolbar'].app_path,
'cms.test_utils.project.sampleapp')
self.assertTrue(toolbar.toolbars['cms.test_utils.project.sampleapp.cms_toolbar.CategoryToolbar'].is_current_app)
self.assertEqual(toolbar.toolbars['cms.test_utils.project.extensionapp.cms_toolbar.MyTitleExtensionToolbar'].app_path,
'cms.test_utils.project.sampleapp')
self.assertFalse(toolbar.toolbars['cms.test_utils.project.extensionapp.cms_toolbar.MyTitleExtensionToolbar'].is_current_app)
def test_toolbar_no_namespace(self):
# Test with a basic application with no defined app_name and no namespace
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.placeholderapp_urls'):
self.create_base_structure(APP_NAME, 'en')
path = reverse('detail', kwargs={'id': 20})
request = self.get_request(path)
toolbar = CMSToolbar(request)
self.assertFalse(toolbar.toolbars['cms.test_utils.project.sampleapp.cms_toolbar.CategoryToolbar'].is_current_app)
self.assertFalse(toolbar.toolbars['cms.test_utils.project.extensionapp.cms_toolbar.MyTitleExtensionToolbar'].is_current_app)
self.assertTrue(toolbar.toolbars['cms.test_utils.project.placeholderapp.cms_toolbar.Example1Toolbar'].is_current_app)
def test_toolbar_multiple_supported_apps(self):
# Test with a basic application with no defined app_name and no namespace
with SettingsOverride(ROOT_URLCONF='cms.test_utils.project.placeholderapp_urls'):
self.create_base_structure(APP_NAME, 'en')
path = reverse('detail', kwargs={'id': 20})
request = self.get_request(path)
toolbar = CMSToolbar(request)
self.assertEqual(toolbar.toolbars['cms.test_utils.project.sampleapp.cms_toolbar.CategoryToolbar'].app_path,
'cms.test_utils.project.placeholderapp')
self.assertFalse(toolbar.toolbars['cms.test_utils.project.sampleapp.cms_toolbar.CategoryToolbar'].is_current_app)
self.assertEqual(toolbar.toolbars['cms.test_utils.project.extensionapp.cms_toolbar.MyTitleExtensionToolbar'].app_path,
'cms.test_utils.project.placeholderapp')
self.assertFalse(toolbar.toolbars['cms.test_utils.project.extensionapp.cms_toolbar.MyTitleExtensionToolbar'].is_current_app)
self.assertEqual(toolbar.toolbars['cms.test_utils.project.extensionapp.cms_toolbar.MyPageExtensionToolbar'].app_path,
'cms.test_utils.project.placeholderapp')
self.assertTrue(toolbar.toolbars['cms.test_utils.project.extensionapp.cms_toolbar.MyPageExtensionToolbar'].is_current_app)
self.assertEqual(toolbar.toolbars['cms.test_utils.project.placeholderapp.cms_toolbar.Example1Toolbar'].app_path,
'cms.test_utils.project.placeholderapp')
self.assertTrue(toolbar.toolbars['cms.test_utils.project.placeholderapp.cms_toolbar.Example1Toolbar'].is_current_app)
def test_toolbar_staff(self):
# Test that the toolbar contains edito mode switcher if placeholders are available
apphooks = (
'cms.test_utils.project.placeholderapp.cms_app.Example1App',
)
with SettingsOverride(CMS_APPHOOKS=apphooks, ROOT_URLCONF='cms.test_utils.project.placeholderapp_urls'):
self.create_base_structure('Example1App', 'en')
ex1 = Example1.objects.create(char_1='1', char_2='2', char_3='3', char_4='4', date_field=now())
path = reverse('example_detail', kwargs={'pk': ex1.pk})
self.user = self._create_user('admin_staff', True, True)
response = self.client.get(path+"?edit")
toolbar = CMSToolbar(response.context['request'])
toolbar.populate()
placeholder_toolbar = PlaceholderToolbar(response.context['request'], toolbar, True, path)
placeholder_toolbar.populate()
placeholder_toolbar.init_placeholders_from_request()
placeholder_toolbar.add_structure_mode()
self.assertEqual(len(placeholder_toolbar.toolbar.get_right_items()), 1)
self.user = self._create_user('staff', True, False)
response = self.client.get(path+"?edit")
toolbar = CMSToolbar(response.context['request'])
toolbar.populate()
placeholder_toolbar = PlaceholderToolbar(response.context['request'], toolbar, True, path)
placeholder_toolbar.populate()
placeholder_toolbar.init_placeholders_from_request()
placeholder_toolbar.add_structure_mode()
self.assertEqual(len(placeholder_toolbar.toolbar.get_right_items()), 1)
self.user = None
class ApphooksPageLanguageUrlTestCase(SettingsOverrideTestCase):
settings_overrides = {'ROOT_URLCONF': 'cms.test_utils.project.second_urls_for_apphook_tests'}
def setUp(self):
clear_app_resolvers()
clear_url_caches()
if APP_MODULE in sys.modules:
del sys.modules[APP_MODULE]
self.reload_urls()
def tearDown(self):
clear_app_resolvers()
clear_url_caches()
if APP_MODULE in sys.modules:
del sys.modules[APP_MODULE]
apphook_pool.clear()
def reload_urls(self):
from django.conf import settings
url_modules = [
'cms.urls',
'cms.test_utils.project.second_cms_urls_for_apphook_tests',
settings.ROOT_URLCONF,
]
clear_app_resolvers()
clear_url_caches()
for module in url_modules:
if module in sys.modules:
del sys.modules[module]
def test_page_language_url_for_apphook(self):
apphook_pool.clear()
superuser = get_user_model().objects.create_superuser('admin', 'admin@admin.com', 'admin')
page = create_page("home", "nav_playground.html", "en",
created_by=superuser)
create_title('de', page.get_title(), page)
page.publish('en')
page.publish('de')
child_page = create_page("child_page", "nav_playground.html", "en",
created_by=superuser, parent=page)
create_title('de', child_page.get_title(), child_page)
child_page.publish('en')
child_page.publish('de')
child_child_page = create_page("child_child_page", "nav_playground.html",
"en", created_by=superuser, parent=child_page, apphook='SampleApp')
create_title("de", '%s_de' % child_child_page.get_title(), child_child_page)
child_child_page.publish('en')
child_child_page.publish('de')
# publisher_public is set to draft on publish, issue with one to one reverse
child_child_page = self.reload(child_child_page)
with force_language("en"):
path = reverse('extra_first')
request = self.get_request(path)
request.LANGUAGE_CODE = 'en'
request.current_page = child_child_page
fake_context = {'request': request}
tag = DumbPageLanguageUrl()
output = tag.get_context(fake_context, 'en')
url = output['content']
self.assertEqual(url, '/en/child_page/child_child_page/extra_1/')
output = tag.get_context(fake_context, 'de')
url = output['content']
# look the extra "_de"
self.assertEqual(url, '/de/child_page/child_child_page_de/extra_1/')
output = tag.get_context(fake_context, 'fr')
url = output['content']
self.assertEqual(url, '/fr/child_page/child_child_page/extra_1/')
apphook_pool.clear()
|
windyuuy/opera | refs/heads/master | chromium/src/tools/grit/grit/format/policy_templates/writers/xml_formatted_writer.py | 10 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from grit.format.policy_templates.writers import template_writer
class XMLFormattedWriter(template_writer.TemplateWriter):
'''Helper class for generating XML-based templates.
'''
def AddElement(self, parent, name, attrs=None, text=None):
'''
Adds a new XML Element as a child to an existing element or the Document.
Args:
parent: An XML element or the document, where the new element will be
added.
name: The name of the new element.
attrs: A dictionary of the attributes' names and values for the new
element.
text: Text content for the new element.
Returns:
The created new element.
'''
if attrs == None:
attrs = {}
doc = parent.ownerDocument
element = doc.createElement(name)
for key, value in attrs.iteritems():
element.setAttribute(key, value)
if text:
element.appendChild(doc.createTextNode(text))
parent.appendChild(element)
return element
def AddText(self, parent, text):
'''Adds text to a parent node.
'''
doc = parent.ownerDocument
parent.appendChild(doc.createTextNode(text))
def AddAttribute(self, parent, name, value):
'''Adds a new attribute to the parent Element. If an attribute with the
given name already exists then it will be replaced.
'''
doc = parent.ownerDocument
attribute = doc.createAttribute(name)
attribute.value = value
parent.setAttributeNode(attribute)
def ToPrettyXml(self, doc):
# return doc.toprettyxml(indent=' ')
# The above pretty-printer does not print the doctype and adds spaces
# around texts, e.g.:
# <string>
# value of the string
# </string>
# This is problematic both for the OSX Workgroup Manager (plist files) and
# the Windows Group Policy Editor (admx files). What they need instead:
# <string>value of string</string>
# So we use the poor man's pretty printer here. It assumes that there are
# no mixed-content nodes.
# Get all the XML content in a one-line string.
xml = doc.toxml()
# Determine where the line breaks will be. (They will only be between tags.)
lines = xml[1:len(xml) - 1].split('><')
indent = ''
res = ''
# Determine indent for each line.
for i, line in enumerate(lines):
if line[0] == '/':
# If the current line starts with a closing tag, decrease indent before
# printing.
indent = indent[2:]
lines[i] = indent + '<' + line + '>'
if (line[0] not in ['/', '?', '!'] and '</' not in line and
line[len(line) - 1] != '/'):
# If the current line starts with an opening tag and does not conatin a
# closing tag, increase indent after the line is printed.
indent += ' '
# Reconstruct XML text from the lines.
return '\n'.join(lines)
|
Liangjianghao/powerline | refs/heads/develop | tests/test_selectors.py | 27 | # vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import sys
from functools import partial
import tests.vim as vim_module
from tests.lib import Pl
from tests import TestCase
class TestVim(TestCase):
def test_single_tab(self):
pl = Pl()
single_tab = partial(self.vim.single_tab, pl=pl, segment_info=None, mode=None)
with vim_module._with('tabpage'):
self.assertEqual(single_tab(), False)
self.assertEqual(single_tab(), True)
@classmethod
def setUpClass(cls):
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), 'path')))
from powerline.selectors import vim
cls.vim = vim
@classmethod
def tearDownClass(cls):
sys.path.pop(0)
if __name__ == '__main__':
from tests import main
main()
|
recursix/spearmint-salad | refs/heads/master | spearmint_salad/example/__init__.py | 1 | # -*- coding: utf-8 -*-
'''
Created on Mar 31, 2014
@author: alex
'''
|
klabit87/SCH-I545_NA_LL_VZW | refs/heads/master | scripts/rt-tester/rt-tester.py | 11005 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
|
gangadharkadam/smrterp | refs/heads/develop | erpnext/setup/doctype/sms_parameter/sms_parameter.py | 41 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class SMSParameter(Document):
pass |
EliotBerriot/django | refs/heads/master | tests/migrations/migrations_test_apps/lookuperror_c/migrations/__init__.py | 12133432 | |
martinbuc/missionplanner | refs/heads/master | Lib/email/mime/__init__.py | 12133432 | |
dennis-sheil/commandergenius | refs/heads/sdl_android | project/jni/python/src/Lib/ctypes/test/test_callbacks.py | 50 | import unittest
from ctypes import *
import _ctypes_test
class Callbacks(unittest.TestCase):
functype = CFUNCTYPE
## def tearDown(self):
## import gc
## gc.collect()
def callback(self, *args):
self.got_args = args
return args[-1]
def check_type(self, typ, arg):
PROTO = self.functype.im_func(typ, typ)
result = PROTO(self.callback)(arg)
if typ == c_float:
self.failUnlessAlmostEqual(result, arg, places=5)
else:
self.failUnlessEqual(self.got_args, (arg,))
self.failUnlessEqual(result, arg)
PROTO = self.functype.im_func(typ, c_byte, typ)
result = PROTO(self.callback)(-3, arg)
if typ == c_float:
self.failUnlessAlmostEqual(result, arg, places=5)
else:
self.failUnlessEqual(self.got_args, (-3, arg))
self.failUnlessEqual(result, arg)
################
def test_byte(self):
self.check_type(c_byte, 42)
self.check_type(c_byte, -42)
def test_ubyte(self):
self.check_type(c_ubyte, 42)
def test_short(self):
self.check_type(c_short, 42)
self.check_type(c_short, -42)
def test_ushort(self):
self.check_type(c_ushort, 42)
def test_int(self):
self.check_type(c_int, 42)
self.check_type(c_int, -42)
def test_uint(self):
self.check_type(c_uint, 42)
def test_long(self):
self.check_type(c_long, 42)
self.check_type(c_long, -42)
def test_ulong(self):
self.check_type(c_ulong, 42)
def test_longlong(self):
self.check_type(c_longlong, 42)
self.check_type(c_longlong, -42)
def test_ulonglong(self):
self.check_type(c_ulonglong, 42)
def test_float(self):
# only almost equal: double -> float -> double
import math
self.check_type(c_float, math.e)
self.check_type(c_float, -math.e)
def test_double(self):
self.check_type(c_double, 3.14)
self.check_type(c_double, -3.14)
def test_longdouble(self):
self.check_type(c_longdouble, 3.14)
self.check_type(c_longdouble, -3.14)
def test_char(self):
self.check_type(c_char, "x")
self.check_type(c_char, "a")
# disabled: would now (correctly) raise a RuntimeWarning about
# a memory leak. A callback function cannot return a non-integral
# C type without causing a memory leak.
## def test_char_p(self):
## self.check_type(c_char_p, "abc")
## self.check_type(c_char_p, "def")
def test_pyobject(self):
o = ()
from sys import getrefcount as grc
for o in (), [], object():
initial = grc(o)
# This call leaks a reference to 'o'...
self.check_type(py_object, o)
before = grc(o)
# ...but this call doesn't leak any more. Where is the refcount?
self.check_type(py_object, o)
after = grc(o)
self.failUnlessEqual((after, o), (before, o))
def test_unsupported_restype_1(self):
# Only "fundamental" result types are supported for callback
# functions, the type must have a non-NULL stgdict->setfunc.
# POINTER(c_double), for example, is not supported.
prototype = self.functype.im_func(POINTER(c_double))
# The type is checked when the prototype is called
self.assertRaises(TypeError, prototype, lambda: None)
def test_unsupported_restype_2(self):
prototype = self.functype.im_func(object)
self.assertRaises(TypeError, prototype, lambda: None)
try:
WINFUNCTYPE
except NameError:
pass
else:
class StdcallCallbacks(Callbacks):
functype = WINFUNCTYPE
################################################################
class SampleCallbacksTestCase(unittest.TestCase):
def test_integrate(self):
# Derived from some then non-working code, posted by David Foster
dll = CDLL(_ctypes_test.__file__)
# The function prototype called by 'integrate': double func(double);
CALLBACK = CFUNCTYPE(c_double, c_double)
# The integrate function itself, exposed from the _ctypes_test dll
integrate = dll.integrate
integrate.argtypes = (c_double, c_double, CALLBACK, c_long)
integrate.restype = c_double
def func(x):
return x**2
result = integrate(0.0, 1.0, CALLBACK(func), 10)
diff = abs(result - 1./3.)
self.failUnless(diff < 0.01, "%s not less than 0.01" % diff)
################################################################
if __name__ == '__main__':
unittest.main()
|
vipullakhani/mi-instrument | refs/heads/master | mi/dataset/parser/fdchp_a_dcl.py | 5 | """
@package mi.dataset.parser
@file mi/dataset/parser/fdchp_a_dcl.py
@author Emily Hahn
@brief A parser for the fdchp series a instrument through a DCL
"""
import re
import ntplib
from mi.core.log import get_logger
from mi.core.common import BaseEnum
from mi.core.exceptions import SampleException, UnexpectedDataException
from mi.core.instrument.dataset_data_particle import DataParticle, DataParticleKey
from mi.dataset.dataset_parser import SimpleParser
from mi.dataset.parser.utilities import dcl_time_to_ntp
log = get_logger()
__author__ = 'Emily Hahn'
__license__ = 'Apache 2.0'
# the common regexes were not used here due to desire to have one group containing the
# full date time rather than each number in a group
DATE_TIME_REGEX = r'(\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2}\.\d{3})'
INSTRUMENT_STARTED_REGEX = r'Instrument Started'
INSTRUMENT_STARTED_MATCHER = re.compile(INSTRUMENT_STARTED_REGEX)
LOG_START_REGEX = DATE_TIME_REGEX + r' \[fdchp:DLOGP\d+\]:,*(.*)'
LOG_START_MATCHER = re.compile(LOG_START_REGEX)
INSTRUMENT_START_LOG_REGEX = LOG_START_REGEX + INSTRUMENT_STARTED_REGEX
INSTRUMENT_START_LOG_MATCHER = re.compile(INSTRUMENT_START_LOG_REGEX)
# match the start of the data line
DATA_START_REGEX = DATE_TIME_REGEX + r' FLUXDATA '
DATA_START_MATCHER = re.compile(DATA_START_REGEX)
# number of chars starting a data line through the end of 'FLUXDATA '
START_N_CHARS = 33
# the expected number of comma separated values in the data line
N_FIELDS = 66
DCL_CONTROLLER_TIMESTAMP = 1
DATA_COLLECTION_TIME = 2
class DataParticleType(BaseEnum):
TELEMETERED = 'fdchp_a_dcl_instrument'
RECOVERED = 'fdchp_a_dcl_instrument_recovered'
class FdchpADclCommonParticle(DataParticle):
# dictionary for unpacking float fields which map directly to a parameters (string -> float)
# index 1 is dcl_controller_timestamp strring
# index 2 is data collection time string
UNPACK_DICT = {
'v_num_datacollection': 3,
# index 4 is a string
'wind_u_avg': 5,
'wind_v_avg': 6,
'wind_w_avg': 7,
'speed_of_sound_avg': 8,
'wind_u_std': 9,
'wind_v_std': 10,
'wind_w_std': 11,
'speed_of_sound_std': 12,
'wind_u_max': 13,
'wind_v_max': 14,
'wind_w_max': 15,
'speed_of_sound_max': 16,
'wind_u_min': 17,
'wind_v_min': 18,
'wind_w_min': 19,
'speed_of_sound_min': 20,
'x_accel': 21,
'y_accel': 22,
'z_accel': 23,
'x_accel_std': 24,
'y_accel_std': 25,
'z_accel_std': 26,
'x_accel_max': 27,
'y_accel_max': 28,
'z_accel_max': 29,
'x_accel_min': 30,
'y_accel_min': 31,
'z_accel_min': 32,
'x_ang_rate_avg': 33,
'y_ang_rate_avg': 34,
'z_ang_rate_avg': 35,
'x_ang_rate_std': 36,
'y_ang_rate_std': 37,
'z_ang_rate_std': 38,
'x_ang_rate_max': 39,
'y_ang_rate_max': 40,
'z_ang_rate_max': 41,
'x_ang_rate_min': 42,
'y_ang_rate_min': 43,
'z_ang_rate_min': 44,
'heading': 45,
'pitch': 46,
'roll': 47,
'heading_std': 48,
'pitch_std': 49,
'roll_std': 50,
'heading_max': 51,
'pitch_max': 52,
'roll_max': 53,
'heading_min': 54,
'pitch_min': 55,
'roll_min': 56,
'u_corr': 57,
'v_corr': 58,
'w_corr': 59,
'u_corr_std': 60,
'v_corr_std': 61,
'w_corr_std': 62,
'wind_speed': 63,
'uw_momentum_flux': 64,
'vw_momentum_flux': 65,
'buoyance_flux': 66,
'eng_wave_motion': 67
}
def _build_parsed_values(self):
# loop through unpack dictionary and encode floats
parameters = list()
for name, index in self.UNPACK_DICT.iteritems():
parameters.append(self._encode_value(name, self.raw_data[index], float))
return parameters
@staticmethod
def str_or_none(str_val):
if str_val is None:
return None
return str(str_val)
class FdchpADclTelemeteredParticle(FdchpADclCommonParticle):
_data_particle_type = DataParticleType.TELEMETERED
class FdchpADclRecoveredParticle(FdchpADclCommonParticle):
_data_particle_type = DataParticleType.RECOVERED
class FdchpADclParser(SimpleParser):
def __init__(self,
stream_handle,
exception_callback,
is_telemetered):
if is_telemetered:
# this is a telemetered parser
self.particle_class = FdchpADclTelemeteredParticle
else:
# this is a recovered parser
self.particle_class = FdchpADclRecoveredParticle
# no config for this parser, pass in empty dict
super(FdchpADclParser, self).__init__({},
stream_handle,
exception_callback)
def parse_file(self):
"""
Entry point into parsing the file, loop over each line and interpret it until the entire file is parsed
"""
stored_start_timestamp = None
# read the first line in the file
line = self._stream_handle.readline()
while line:
# data will be at start of line so use match
data_match = DATA_START_MATCHER.match(line)
# instrument started may be in middle so use search
log_match = LOG_START_MATCHER.match(line)
if data_match:
# found a data line
dcl_timestamp = data_match.group(1)
# Note Bug #10002 found early deployments created data missing commas
# between some fields. Replace commas with space and then split to
# correctly parse files from deployments with either firmware
fields_set = line[START_N_CHARS:].replace(',', ' ')
fields = fields_set.split()
if len(fields) != N_FIELDS:
msg = 'Expected %d fields but received %d' % (N_FIELDS, len(fields))
log.warn(msg)
self._exception_callback(SampleException(msg))
else:
# create an array of the fields to parse in the particle
raw_data = [stored_start_timestamp, dcl_timestamp]
raw_data.extend(fields)
# DCL controller timestamp is the port_timestamp
port_timestamp = dcl_time_to_ntp(raw_data[DCL_CONTROLLER_TIMESTAMP])
# datacollection time is the internal_timestamp
unix_ts = float(raw_data[DATA_COLLECTION_TIME])
internal_timestamp = ntplib.system_to_ntp_time(unix_ts)
# extract this particle
particle = self._extract_sample(self.particle_class,
None,
raw_data,
port_timestamp=port_timestamp,
internal_timestamp=internal_timestamp,
preferred_ts=DataParticleKey.PORT_TIMESTAMP)
self._record_buffer.append(particle)
stored_start_timestamp = None
elif log_match:
# pull out whatever text is within the log
log_contents = log_match.group(2)
# there are two cases, a log message simply contains the 'Instrument Started' text, or it contains
# an entire other log message which may contain 'Instrument Started'
instr_log_match = INSTRUMENT_STARTED_MATCHER.match(log_contents)
full_log_instr_match = INSTRUMENT_START_LOG_MATCHER.match(log_contents)
# text other than instrument started is ignored within log messages
if instr_log_match:
# found a line containing a single log instrument started, hold on to it until we get a data line
stored_start_timestamp = log_match.group(1)
elif full_log_instr_match:
# found a log within a log, use the inner timestamp associated with the instrument start
stored_start_timestamp = full_log_instr_match.group(1)
else:
msg = 'Data with unexpected format received: %s' % line
log.warn(msg)
self._exception_callback(UnexpectedDataException(msg))
line = self._stream_handle.readline()
|
gjaldon/otp | refs/heads/maint | lib/asn1/test/asn1_SUITE_data/Cho.py | 97 | Cho DEFINITIONS IMPLICIT TAGS ::=
BEGIN
ChoCon ::= CHOICE
{
nested Cho2,
bool0 [0] BOOLEAN,
bool1 [1] BOOLEAN,
int2 [2] INTEGER
}
ChoExp ::= CHOICE
{
int10 [APPLICATION 10] EXPLICIT INTEGER {first(1),last(31)},
bool11 [APPLICATION 11] EXPLICIT BOOLEAN,
enum12 [APPLICATION 12] EXPLICIT ENUMERATED {one(1),two(2),three(3)}
}
Cho2 ::= CHOICE
{
i INTEGER,
b BOOLEAN
}
END
|
ddico/project | refs/heads/8.0 | analytic_hours_block/hours_block.py | 28 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Vincent Renaville, ported by Joel Grand-Guillaume
# Copyright 2010-2012 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
class AccountHoursBlock(orm.Model):
_name = "account.hours.block"
_inherit = ['mail.thread']
def _get_last_action(self, cr, uid, ids, name, arg, context=None):
""" Return the last analytic line date for an invoice"""
res = {}
for block in self.browse(cr, uid, ids, context=context):
cr.execute("SELECT max(al.date) FROM account_analytic_line AS al"
" WHERE al.invoice_id = %s", (block.invoice_id.id,))
fetch_res = cr.fetchone()
res[block.id] = fetch_res[0] if fetch_res else False
return res
def _compute_hours(self, cr, uid, ids, fields, args, context=None):
"""Return a dict of [id][fields]"""
if isinstance(ids, (int, long)):
ids = [ids]
result = {}
aal_obj = self.pool.get('account.analytic.line')
for block in self.browse(cr, uid, ids, context=context):
result[block.id] = {'amount_hours_block': 0.0,
'amount_hours_block_done': 0.0}
# Compute hours bought
for line in block.invoice_id.invoice_line:
hours_bought = 0.0
if line.product_id and line.product_id.is_in_hours_block:
# We will now calculate the product_quantity
factor = line.uos_id.factor
if factor == 0.0:
factor = 1.0
amount = line.quantity
hours_bought += (amount / factor)
result[block.id]['amount_hours_block'] += hours_bought
# Compute hours spent
hours_used = 0.0
# Get ids of analytic line generated from
# timesheet associated to the current block
cr.execute("SELECT al.id "
"FROM account_analytic_line AS al, "
" account_analytic_journal AS aj "
"WHERE aj.id = al.journal_id "
"AND aj.type = 'general' "
"AND al.invoice_id = %s", (block.invoice_id.id,))
res_line_ids = cr.fetchall()
line_ids = [l[0] for l in res_line_ids] if res_line_ids else []
for line in aal_obj.browse(cr, uid, line_ids, context=context):
factor = 1.0
if line.product_uom_id and line.product_uom_id.factor != 0.0:
factor = line.product_uom_id.factor
factor_invoicing = 1.0
if line.to_invoice and line.to_invoice.factor != 0.0:
factor_invoicing = 1.0 - line.to_invoice.factor / 100
hours_used += ((line.unit_amount / factor) * factor_invoicing)
result[block.id]['amount_hours_block_done'] = hours_used
return result
def _compute_amount(self, cr, uid, ids, fields, args, context=None):
if context is None:
context = {}
result = {}
aal_obj = self.pool.get('account.analytic.line')
pricelist_obj = self.pool.get('product.pricelist')
for block in self.browse(cr, uid, ids, context=context):
result[block.id] = {'amount_hours_block': 0.0,
'amount_hours_block_done': 0.0}
# Compute amount bought
for line in block.invoice_id.invoice_line:
amount_bought = 0.0
if line.product_id:
## We will now calculate the product_quantity
factor = line.uos_id.factor
if factor == 0.0:
factor = 1.0
amount = line.quantity * line.price_unit
amount_bought += (amount / factor)
result[block.id]['amount_hours_block'] += amount_bought
# Compute total amount
# Get ids of analytic line generated from timesheet associated to current block
cr.execute("SELECT al.id FROM account_analytic_line AS al,"
" account_analytic_journal AS aj"
" WHERE aj.id = al.journal_id"
" AND aj.type='general'"
" AND al.invoice_id = %s", (block.invoice_id.id,))
res_line_ids = cr.fetchall()
line_ids = [l[0] for l in res_line_ids] if res_line_ids else []
total_amount = 0.0
for line in aal_obj.browse(cr, uid, line_ids, context=context):
factor_invoicing = 1.0
if line.to_invoice and line.to_invoice.factor != 0.0:
factor_invoicing = 1.0 - line.to_invoice.factor / 100
ctx = dict(context, uom=line.product_uom_id.id)
amount = pricelist_obj.price_get(
cr, uid,
[line.account_id.pricelist_id.id],
line.product_id.id,
line.unit_amount or 1.0,
line.account_id.partner_id.id or False,
ctx)[line.account_id.pricelist_id.id]
total_amount += amount * line.unit_amount * factor_invoicing
result[block.id]['amount_hours_block_done'] += total_amount
return result
def _compute(self, cr, uid, ids, fields, args, context=None):
result = {}
block_per_types = {}
for block in self.browse(cr, uid, ids, context=context):
block_per_types.setdefault(block.type, []).append(block.id)
for block_type in block_per_types:
if block_type:
func = getattr(self, "_compute_%s" % block_type)
result.update(func(cr, uid, ids, fields, args, context=context))
for block in result:
result[block]['amount_hours_block_delta'] = \
result[block]['amount_hours_block'] - \
result[block]['amount_hours_block_done']
return result
def _get_analytic_line(self, cr, uid, ids, context=None):
invoice_ids = []
an_lines_obj = self.pool.get('account.analytic.line')
block_obj = self.pool.get('account.hours.block')
for line in an_lines_obj.browse(cr, uid, ids, context=context):
if line.invoice_id:
invoice_ids.append(line.invoice_id.id)
return block_obj.search(
cr, uid, [('invoice_id', 'in', invoice_ids)], context=context)
def _get_invoice(self, cr, uid, ids, context=None):
block_ids = set()
inv_obj = self.pool.get('account.invoice')
for invoice in inv_obj.browse(cr, uid, ids, context=context):
block_ids.update([inv.id for inv in invoice.account_hours_block_ids])
return list(block_ids)
def action_send_block(self, cr, uid, ids, context=None):
"""Open a form to send by email. Return an action dict."""
assert len(ids) == 1, '''\
This option should only be used for a single ID at a time.'''
ir_model_data = self.pool.get('ir.model.data')
try:
template_id = ir_model_data.get_object_reference(
cr, uid, 'analytic_hours_block', 'email_template_hours_block'
)[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference(
cr, uid, 'mail', 'email_compose_message_wizard_form'
)[1]
except ValueError:
compose_form_id = False
ctx = {
'default_model': self._name,
'default_res_id': ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
}
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
_recompute_triggers = {
'account.hours.block': (lambda self, cr, uid, ids, c=None:
ids, ['invoice_id', 'type'], 10),
'account.invoice': (_get_invoice, ['analytic_line_ids'], 10),
'account.analytic.line': (
_get_analytic_line,
['product_uom_id', 'unit_amount', 'to_invoice', 'invoice_id'],
10),
}
_columns = {
'amount_hours_block': fields.function(
_compute,
type='float',
string='Quantity / Amount bought',
store=_recompute_triggers,
multi='amount_hours_block_delta',
help="Amount bought by the customer. "
"This amount is expressed in the base Unit of Measure "
"(factor=1.0)"),
'amount_hours_block_done': fields.function(
_compute,
type='float',
string='Quantity / Amount used',
store=_recompute_triggers,
multi='amount_hours_block_delta',
help="Amount done by the staff. "
"This amount is expressed in the base Unit of Measure "
"(factor=1.0)"),
'amount_hours_block_delta': fields.function(
_compute,
type='float',
string='Difference',
store=_recompute_triggers,
multi='amount_hours_block_delta',
help="Difference between bought and used. "
"This amount is expressed in the base Unit of Measure "
"(factor=1.0)"),
'last_action_date': fields.function(
_get_last_action,
type='date',
string='Last action date',
help="Date of the last analytic line linked to the invoice "
"related to this block hours."),
'close_date': fields.date('Closed Date'),
'invoice_id': fields.many2one(
'account.invoice',
'Invoice',
ondelete='cascade',
required=True),
'type': fields.selection(
[('hours', 'Hours'),
('amount', 'Amount')],
string='Type of Block',
required=True,
help="The block is based on the quantity of hours "
"or on the amount."),
# Invoices related infos
'date_invoice': fields.related(
'invoice_id', 'date_invoice',
type="date",
string="Invoice Date",
store={
'account.hours.block': (lambda self, cr, uid, ids, c=None: ids,
['invoice_id'], 10),
'account.invoice': (_get_invoice, ['date_invoice'], 10),
},
readonly=True),
'user_id': fields.related(
'invoice_id', 'user_id',
type="many2one",
relation="res.users",
string="Salesman",
store={
'account.hours.block': (lambda self, cr, uid, ids, c=None: ids,
['invoice_id'], 10),
'account.invoice': (_get_invoice, ['user_id'], 10),
},
readonly=True),
'partner_id': fields.related(
'invoice_id', 'partner_id',
type="many2one",
relation="res.partner",
string="Partner",
store={
'account.hours.block': (lambda self, cr, uid, ids, c=None: ids,
['invoice_id'], 10),
'account.invoice': (_get_invoice, ['partner_id'], 10),
},
readonly=True),
'name': fields.related(
'invoice_id', 'name',
type="char",
string="Description",
store={
'account.hours.block': (lambda self, cr, uid, ids, c=None: ids,
['invoice_id'], 10),
'account.invoice': (_get_invoice, ['name'], 10),
},
readonly=True),
'number': fields.related(
'invoice_id', 'number',
type="char",
string="Number",
store={
'account.hours.block': (lambda self, cr, uid, ids, c=None: ids,
['invoice_id'], 10),
'account.invoice': (_get_invoice, ['number'], 10),
},
readonly=True),
'journal_id': fields.related(
'invoice_id', 'journal_id',
type="many2one",
relation="account.journal",
string="Journal",
store={
'account.hours.block': (lambda self, cr, uid, ids, c=None: ids,
['invoice_id'], 10),
'account.invoice': (_get_invoice, ['journal_id'], 10),
},
readonly=True),
'period_id': fields.related(
'invoice_id', 'period_id',
type="many2one",
relation="account.period",
string="Period",
store={
'account.hours.block': (lambda self, cr, uid, ids, c=None: ids,
['invoice_id'], 10),
'account.invoice': (_get_invoice, ['period_id'], 10),
},
readonly=True),
'company_id': fields.related(
'invoice_id', 'company_id',
type="many2one",
relation="res.company",
string="Company",
store={
'account.hours.block': (lambda self, cr, uid, ids, c=None: ids,
['invoice_id'], 10),
'account.invoice': (_get_invoice, ['company_id'], 10),
},
readonly=True),
'currency_id': fields.related(
'invoice_id', 'currency_id',
type="many2one",
relation="res.currency",
string="Currency",
store={
'account.hours.block': (lambda self, cr, uid, ids, c=None: ids,
['invoice_id'], 10),
'account.invoice': (_get_invoice, ['currency_id'], 10),
},
readonly=True),
'residual': fields.related(
'invoice_id', 'residual',
type="float",
string="Residual",
store={
'account.hours.block': (lambda self, cr, uid, ids, c=None: ids,
['invoice_id'], 10),
'account.invoice': (_get_invoice, ['residual'], 10),
},
readonly=True),
'amount_total': fields.related(
'invoice_id', 'amount_total',
type="float",
string="Total",
store={
'account.hours.block': (lambda self, cr, uid, ids, c=None: ids,
['invoice_id'], 10),
'account.invoice': (_get_invoice, ['amount_total'], 10),
},
readonly=True),
'department_id': fields.related(
'invoice_id', 'department_id',
type='many2one',
relation='hr.department',
string='Department',
store={
'account.hours.block': (lambda self, cr, uid, ids, c=None: ids,
['invoice_id'], 10),
'account.invoice': (_get_invoice, ['department_id'], 10),
},
readonly=True),
'state': fields.related(
'invoice_id', 'state',
type='selection',
selection=[
('draft', 'Draft'),
('proforma', 'Pro-forma'),
('proforma2', 'Pro-forma'),
('open', 'Open'),
('paid', 'Paid'),
('cancel', 'Cancelled'),
],
string='State',
readonly=True,
store={
'account.hours.block': (lambda self, cr, uid, ids, c=None: ids,
['invoice_id'], 10),
'account.invoice': (_get_invoice, ['state'], 10),
}),
}
############################################################################
## Add hours blocks on invoice
############################################################################
class AccountInvoice(orm.Model):
_inherit = 'account.invoice'
_columns = {
'account_hours_block_ids': fields.one2many(
'account.hours.block',
'invoice_id',
string='Hours Block')
}
|
TerraFERMA/TerraFERMA | refs/heads/master | buckettools/python/buckettools/debug.py | 1 | # (originally part of fluidity, modified for buckettools by Cian Wilson)
# Copyright (C) 2006 Imperial College London and others.
#
# Please see the AUTHORS file in the main source directory for a full list
# of copyright holders.
#
# Prof. C Pain
# Applied Modelling and Computation Group
# Department of Earth Science and Engineering
# Imperial College London
#
# amcgsoftware@imperial.ac.uk
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation,
# version 2.1 of the License.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
# Copyright (C) 2013 Columbia University in the City of New York and others.
#
# Please see the AUTHORS file in the main source directory for a full list
# of contributors.
#
# This file is part of TerraFERMA.
#
# TerraFERMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# TerraFERMA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with TerraFERMA. If not, see <http://www.gnu.org/licenses/>.
def shell():
'''
shell()
Return ipython shell. To actually start the shell, invoke the function
returned by this function.
This is particularly useful for debugging embedded
python or for crawling over the data when something has gone wrong.
'''
import sys
if not hasattr(sys,"argv"):
sys.argv=[]
try:
from IPython.Shell import IPShellEmbed
except ImportError:
sys.stderr.write(
"""
*****************************************************
*** Failed to import IPython. This probably means ***
*** you don't have it installed. Please install ***
*** IPython and try again. ***
*****************************************************
""")
raise
banner = """
This is an IPython shell embedded in buckettools. You can use it to examine
or even set variables. Press CTRL+d to exit and return to your program.
"""
ipshell = IPShellEmbed(banner=banner)
return ipshell
|
scorpilix/Golemtest | refs/heads/develop | golem/resource/__init__.py | 12133432 | |
JTarball/docker-django-polymer | refs/heads/master | docker/app/app/backend/apps/_archive/content_2/__init__.py | 12133432 | |
letuananh/visualkopasu | refs/heads/main | visko/web/migrations/__init__.py | 12133432 | |
aakash-cr7/zulip | refs/heads/master | analytics/__init__.py | 12133432 | |
wli/django-allauth | refs/heads/master | allauth/socialaccount/providers/stripe/tests.py | 10 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from allauth.socialaccount.tests import OAuth2TestsMixin
from allauth.tests import MockedResponse, TestCase
from .provider import StripeProvider
class StripeTests(OAuth2TestsMixin, TestCase):
provider_id = StripeProvider.id
def get_mocked_response(self):
return MockedResponse(200, """{
"id": "acct_sometestid",
"object": "account",
"business_logo": null,
"business_name": null,
"business_url": "test.com",
"charges_enabled": true,
"country": "SE",
"currencies_supported": [
"usd",
"eur",
"sek"
],
"default_currency": "eur",
"details_submitted": true,
"display_name": "Test",
"email": "test@test.com",
"managed": false,
"metadata": {},
"statement_descriptor": "TEST.COM",
"support_phone": "+460123456789",
"timezone": "Europe/Stockholm",
"transfers_enabled": true
}""")
def get_login_response_json(self, with_refresh_token=True):
rt = ''
if with_refresh_token:
rt = ',"refresh_token": "testrf"'
return """{
"uid":"weibo",
"access_token":"testac",
"livemode": false,
"token_type": "bearer",
"stripe_publishable_key": "pk_test_someteskey",
"stripe_user_id": "acct_sometestid",
"scope": "read_write"
%s }""" % rt
|
UnionGospelMission/UGM-Database | refs/heads/master | truckmanagement/admin.py | 2 | from django.contrib import admin
# Register your models here.
from truckmanagement.models import Truck,Driver,Donor,Donation
class DonorAdmin(admin.ModelAdmin):
fieldsets=[
('Name', {'fields':['first_name','last_name','company_name'],}),
('Street Address', {'fields':['address1','address2','city','state','zip_code'], 'classes': ['collapse']}),
('Mailing Address', {'fields':['mailing_address1','mailing_address2','mailing_city','mailing_state','mailing_zip_code'], 'classes': ['collapse']}),
('Contact Information', {'fields':['home_phone','business_phone','business_fax','mobile_phone','email','contact_preference'], 'classes': ['collapse']}),
('Administration', {'fields':['nearest_facility','exported_to_donor_perfect','thankyou','mailing_list'], 'classes': ['collapse']}),
('Notes', {'fields':['first_contact_date','referral','comments'], 'classes': ['collapse']}),
]
list_display = ('id','last_name','first_name','company_name','address1','address2','city','state','zip_code','comments')
list_filter = ['city','state','zip_code']
search_fields = ['first_name','last_name','company_name','address1','address2','city','state','zip_code']
admin.site.register(Truck)
admin.site.register(Driver)
admin.site.register(Donor,DonorAdmin)
#admin.site.register(Donation)
|
ryandougherty/mwa-capstone | refs/heads/heroku | MWA_Tools/build/matplotlib/examples/pylab_examples/centered_ticklabels.py | 3 | # sometimes it is nice to have ticklabels centered. mpl currently
# associates a label with a tick, and the label can be aligned
# 'center', 'feft', or 'right' using the horizontal alignment property:
#
#
# for label in ax.xaxis.get_xticklabels():
# label.set_horizntal_alignment('right')
#
#
# but this doesn't help center the label between ticks. One solution
# is to "face it". Use the minor ticks to place a tick centered
# between the major ticks. Here is an example that labels the months,
# centered between the ticks
import datetime
import numpy as np
import matplotlib
import matplotlib.cbook as cbook
import matplotlib.dates as dates
import matplotlib.ticker as ticker
import matplotlib.pyplot as plt
# load some financial data; apple's stock price
fh = cbook.get_sample_data('aapl.npy')
r = np.load(fh); fh.close()
r = r[-250:] # get the last 250 days
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(r.date, r.adj_close)
ax.xaxis.set_major_locator(dates.MonthLocator())
ax.xaxis.set_minor_locator(dates.MonthLocator(bymonthday=15))
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.xaxis.set_minor_formatter(dates.DateFormatter('%b'))
for tick in ax.xaxis.get_minor_ticks():
tick.tick1line.set_markersize(0)
tick.tick2line.set_markersize(0)
tick.label1.set_horizontalalignment('center')
imid = len(r)/2
ax.set_xlabel(str(r.date[imid].year))
plt.show()
|
eerohele/sublime-lxml | refs/heads/master | st3_linux_x64/lxml/html/ElementSoup.py | 148 | __doc__ = """Legacy interface to the BeautifulSoup HTML parser.
"""
__all__ = ["parse", "convert_tree"]
from soupparser import convert_tree, parse as _parse
def parse(file, beautifulsoup=None, makeelement=None):
root = _parse(file, beautifulsoup=beautifulsoup, makeelement=makeelement)
return root.getroot()
|
ArcherSys/ArcherSys | refs/heads/master | Lib/site-packages/pip/_vendor/distlib/database.py | 129 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2016 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""PEP 376 implementation."""
from __future__ import unicode_literals
import base64
import codecs
import contextlib
import hashlib
import logging
import os
import posixpath
import sys
import zipimport
from . import DistlibException, resources
from .compat import StringIO
from .version import get_scheme, UnsupportedVersionError
from .metadata import Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME
from .util import (parse_requirement, cached_property, parse_name_and_version,
read_exports, write_exports, CSVReader, CSVWriter)
__all__ = ['Distribution', 'BaseInstalledDistribution',
'InstalledDistribution', 'EggInfoDistribution',
'DistributionPath']
logger = logging.getLogger(__name__)
EXPORTS_FILENAME = 'pydist-exports.json'
COMMANDS_FILENAME = 'pydist-commands.json'
DIST_FILES = ('INSTALLER', METADATA_FILENAME, 'RECORD', 'REQUESTED',
'RESOURCES', EXPORTS_FILENAME, 'SHARED')
DISTINFO_EXT = '.dist-info'
class _Cache(object):
"""
A simple cache mapping names and .dist-info paths to distributions
"""
def __init__(self):
"""
Initialise an instance. There is normally one for each DistributionPath.
"""
self.name = {}
self.path = {}
self.generated = False
def clear(self):
"""
Clear the cache, setting it to its initial state.
"""
self.name.clear()
self.path.clear()
self.generated = False
def add(self, dist):
"""
Add a distribution to the cache.
:param dist: The distribution to add.
"""
if dist.path not in self.path:
self.path[dist.path] = dist
self.name.setdefault(dist.key, []).append(dist)
class DistributionPath(object):
"""
Represents a set of distributions installed on a path (typically sys.path).
"""
def __init__(self, path=None, include_egg=False):
"""
Create an instance from a path, optionally including legacy (distutils/
setuptools/distribute) distributions.
:param path: The path to use, as a list of directories. If not specified,
sys.path is used.
:param include_egg: If True, this instance will look for and return legacy
distributions as well as those based on PEP 376.
"""
if path is None:
path = sys.path
self.path = path
self._include_dist = True
self._include_egg = include_egg
self._cache = _Cache()
self._cache_egg = _Cache()
self._cache_enabled = True
self._scheme = get_scheme('default')
def _get_cache_enabled(self):
return self._cache_enabled
def _set_cache_enabled(self, value):
self._cache_enabled = value
cache_enabled = property(_get_cache_enabled, _set_cache_enabled)
def clear_cache(self):
"""
Clears the internal cache.
"""
self._cache.clear()
self._cache_egg.clear()
def _yield_distributions(self):
"""
Yield .dist-info and/or .egg(-info) distributions.
"""
# We need to check if we've seen some resources already, because on
# some Linux systems (e.g. some Debian/Ubuntu variants) there are
# symlinks which alias other files in the environment.
seen = set()
for path in self.path:
finder = resources.finder_for_path(path)
if finder is None:
continue
r = finder.find('')
if not r or not r.is_container:
continue
rset = sorted(r.resources)
for entry in rset:
r = finder.find(entry)
if not r or r.path in seen:
continue
if self._include_dist and entry.endswith(DISTINFO_EXT):
possible_filenames = [METADATA_FILENAME, WHEEL_METADATA_FILENAME]
for metadata_filename in possible_filenames:
metadata_path = posixpath.join(entry, metadata_filename)
pydist = finder.find(metadata_path)
if pydist:
break
else:
continue
with contextlib.closing(pydist.as_stream()) as stream:
metadata = Metadata(fileobj=stream, scheme='legacy')
logger.debug('Found %s', r.path)
seen.add(r.path)
yield new_dist_class(r.path, metadata=metadata,
env=self)
elif self._include_egg and entry.endswith(('.egg-info',
'.egg')):
logger.debug('Found %s', r.path)
seen.add(r.path)
yield old_dist_class(r.path, self)
def _generate_cache(self):
"""
Scan the path for distributions and populate the cache with
those that are found.
"""
gen_dist = not self._cache.generated
gen_egg = self._include_egg and not self._cache_egg.generated
if gen_dist or gen_egg:
for dist in self._yield_distributions():
if isinstance(dist, InstalledDistribution):
self._cache.add(dist)
else:
self._cache_egg.add(dist)
if gen_dist:
self._cache.generated = True
if gen_egg:
self._cache_egg.generated = True
@classmethod
def distinfo_dirname(cls, name, version):
"""
The *name* and *version* parameters are converted into their
filename-escaped form, i.e. any ``'-'`` characters are replaced
with ``'_'`` other than the one in ``'dist-info'`` and the one
separating the name from the version number.
:parameter name: is converted to a standard distribution name by replacing
any runs of non- alphanumeric characters with a single
``'-'``.
:type name: string
:parameter version: is converted to a standard version string. Spaces
become dots, and all other non-alphanumeric characters
(except dots) become dashes, with runs of multiple
dashes condensed to a single dash.
:type version: string
:returns: directory name
:rtype: string"""
name = name.replace('-', '_')
return '-'.join([name, version]) + DISTINFO_EXT
def get_distributions(self):
"""
Provides an iterator that looks for distributions and returns
:class:`InstalledDistribution` or
:class:`EggInfoDistribution` instances for each one of them.
:rtype: iterator of :class:`InstalledDistribution` and
:class:`EggInfoDistribution` instances
"""
if not self._cache_enabled:
for dist in self._yield_distributions():
yield dist
else:
self._generate_cache()
for dist in self._cache.path.values():
yield dist
if self._include_egg:
for dist in self._cache_egg.path.values():
yield dist
def get_distribution(self, name):
"""
Looks for a named distribution on the path.
This function only returns the first result found, as no more than one
value is expected. If nothing is found, ``None`` is returned.
:rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution`
or ``None``
"""
result = None
name = name.lower()
if not self._cache_enabled:
for dist in self._yield_distributions():
if dist.key == name:
result = dist
break
else:
self._generate_cache()
if name in self._cache.name:
result = self._cache.name[name][0]
elif self._include_egg and name in self._cache_egg.name:
result = self._cache_egg.name[name][0]
return result
def provides_distribution(self, name, version=None):
"""
Iterates over all distributions to find which distributions provide *name*.
If a *version* is provided, it will be used to filter the results.
This function only returns the first result found, since no more than
one values are expected. If the directory is not found, returns ``None``.
:parameter version: a version specifier that indicates the version
required, conforming to the format in ``PEP-345``
:type name: string
:type version: string
"""
matcher = None
if not version is None:
try:
matcher = self._scheme.matcher('%s (%s)' % (name, version))
except ValueError:
raise DistlibException('invalid name or version: %r, %r' %
(name, version))
for dist in self.get_distributions():
provided = dist.provides
for p in provided:
p_name, p_ver = parse_name_and_version(p)
if matcher is None:
if p_name == name:
yield dist
break
else:
if p_name == name and matcher.match(p_ver):
yield dist
break
def get_file_path(self, name, relative_path):
"""
Return the path to a resource file.
"""
dist = self.get_distribution(name)
if dist is None:
raise LookupError('no distribution named %r found' % name)
return dist.get_resource_path(relative_path)
def get_exported_entries(self, category, name=None):
"""
Return all of the exported entries in a particular category.
:param category: The category to search for entries.
:param name: If specified, only entries with that name are returned.
"""
for dist in self.get_distributions():
r = dist.exports
if category in r:
d = r[category]
if name is not None:
if name in d:
yield d[name]
else:
for v in d.values():
yield v
class Distribution(object):
"""
A base class for distributions, whether installed or from indexes.
Either way, it must have some metadata, so that's all that's needed
for construction.
"""
build_time_dependency = False
"""
Set to True if it's known to be only a build-time dependency (i.e.
not needed after installation).
"""
requested = False
"""A boolean that indicates whether the ``REQUESTED`` metadata file is
present (in other words, whether the package was installed by user
request or it was installed as a dependency)."""
def __init__(self, metadata):
"""
Initialise an instance.
:param metadata: The instance of :class:`Metadata` describing this
distribution.
"""
self.metadata = metadata
self.name = metadata.name
self.key = self.name.lower() # for case-insensitive comparisons
self.version = metadata.version
self.locator = None
self.digest = None
self.extras = None # additional features requested
self.context = None # environment marker overrides
self.download_urls = set()
self.digests = {}
@property
def source_url(self):
"""
The source archive download URL for this distribution.
"""
return self.metadata.source_url
download_url = source_url # Backward compatibility
@property
def name_and_version(self):
"""
A utility property which displays the name and version in parentheses.
"""
return '%s (%s)' % (self.name, self.version)
@property
def provides(self):
"""
A set of distribution names and versions provided by this distribution.
:return: A set of "name (version)" strings.
"""
plist = self.metadata.provides
s = '%s (%s)' % (self.name, self.version)
if s not in plist:
plist.append(s)
return plist
def _get_requirements(self, req_attr):
md = self.metadata
logger.debug('Getting requirements from metadata %r', md.todict())
reqts = getattr(md, req_attr)
return set(md.get_requirements(reqts, extras=self.extras,
env=self.context))
@property
def run_requires(self):
return self._get_requirements('run_requires')
@property
def meta_requires(self):
return self._get_requirements('meta_requires')
@property
def build_requires(self):
return self._get_requirements('build_requires')
@property
def test_requires(self):
return self._get_requirements('test_requires')
@property
def dev_requires(self):
return self._get_requirements('dev_requires')
def matches_requirement(self, req):
"""
Say if this instance matches (fulfills) a requirement.
:param req: The requirement to match.
:rtype req: str
:return: True if it matches, else False.
"""
# Requirement may contain extras - parse to lose those
# from what's passed to the matcher
r = parse_requirement(req)
scheme = get_scheme(self.metadata.scheme)
try:
matcher = scheme.matcher(r.requirement)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only',
req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
result = False
for p in self.provides:
p_name, p_ver = parse_name_and_version(p)
if p_name != name:
continue
try:
result = matcher.match(p_ver)
break
except UnsupportedVersionError:
pass
return result
def __repr__(self):
"""
Return a textual representation of this instance,
"""
if self.source_url:
suffix = ' [%s]' % self.source_url
else:
suffix = ''
return '<Distribution %s (%s)%s>' % (self.name, self.version, suffix)
def __eq__(self, other):
"""
See if this distribution is the same as another.
:param other: The distribution to compare with. To be equal to one
another. distributions must have the same type, name,
version and source_url.
:return: True if it is the same, else False.
"""
if type(other) is not type(self):
result = False
else:
result = (self.name == other.name and
self.version == other.version and
self.source_url == other.source_url)
return result
def __hash__(self):
"""
Compute hash in a way which matches the equality test.
"""
return hash(self.name) + hash(self.version) + hash(self.source_url)
class BaseInstalledDistribution(Distribution):
"""
This is the base class for installed distributions (whether PEP 376 or
legacy).
"""
hasher = None
def __init__(self, metadata, path, env=None):
"""
Initialise an instance.
:param metadata: An instance of :class:`Metadata` which describes the
distribution. This will normally have been initialised
from a metadata file in the ``path``.
:param path: The path of the ``.dist-info`` or ``.egg-info``
directory for the distribution.
:param env: This is normally the :class:`DistributionPath`
instance where this distribution was found.
"""
super(BaseInstalledDistribution, self).__init__(metadata)
self.path = path
self.dist_path = env
def get_hash(self, data, hasher=None):
"""
Get the hash of some data, using a particular hash algorithm, if
specified.
:param data: The data to be hashed.
:type data: bytes
:param hasher: The name of a hash implementation, supported by hashlib,
or ``None``. Examples of valid values are ``'sha1'``,
``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and
``'sha512'``. If no hasher is specified, the ``hasher``
attribute of the :class:`InstalledDistribution` instance
is used. If the hasher is determined to be ``None``, MD5
is used as the hashing algorithm.
:returns: The hash of the data. If a hasher was explicitly specified,
the returned hash will be prefixed with the specified hasher
followed by '='.
:rtype: str
"""
if hasher is None:
hasher = self.hasher
if hasher is None:
hasher = hashlib.md5
prefix = ''
else:
hasher = getattr(hashlib, hasher)
prefix = '%s=' % self.hasher
digest = hasher(data).digest()
digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii')
return '%s%s' % (prefix, digest)
class InstalledDistribution(BaseInstalledDistribution):
"""
Created with the *path* of the ``.dist-info`` directory provided to the
constructor. It reads the metadata contained in ``pydist.json`` when it is
instantiated., or uses a passed in Metadata instance (useful for when
dry-run mode is being used).
"""
hasher = 'sha256'
def __init__(self, path, metadata=None, env=None):
self.finder = finder = resources.finder_for_path(path)
if finder is None:
import pdb; pdb.set_trace ()
if env and env._cache_enabled and path in env._cache.path:
metadata = env._cache.path[path].metadata
elif metadata is None:
r = finder.find(METADATA_FILENAME)
# Temporary - for Wheel 0.23 support
if r is None:
r = finder.find(WHEEL_METADATA_FILENAME)
# Temporary - for legacy support
if r is None:
r = finder.find('METADATA')
if r is None:
raise ValueError('no %s found in %s' % (METADATA_FILENAME,
path))
with contextlib.closing(r.as_stream()) as stream:
metadata = Metadata(fileobj=stream, scheme='legacy')
super(InstalledDistribution, self).__init__(metadata, path, env)
if env and env._cache_enabled:
env._cache.add(self)
try:
r = finder.find('REQUESTED')
except AttributeError:
import pdb; pdb.set_trace ()
self.requested = r is not None
def __repr__(self):
return '<InstalledDistribution %r %s at %r>' % (
self.name, self.version, self.path)
def __str__(self):
return "%s %s" % (self.name, self.version)
def _get_records(self):
"""
Get the list of installed files for the distribution
:return: A list of tuples of path, hash and size. Note that hash and
size might be ``None`` for some entries. The path is exactly
as stored in the file (which is as in PEP 376).
"""
results = []
r = self.get_distinfo_resource('RECORD')
with contextlib.closing(r.as_stream()) as stream:
with CSVReader(stream=stream) as record_reader:
# Base location is parent dir of .dist-info dir
#base_location = os.path.dirname(self.path)
#base_location = os.path.abspath(base_location)
for row in record_reader:
missing = [None for i in range(len(row), 3)]
path, checksum, size = row + missing
#if not os.path.isabs(path):
# path = path.replace('/', os.sep)
# path = os.path.join(base_location, path)
results.append((path, checksum, size))
return results
@cached_property
def exports(self):
"""
Return the information exported by this distribution.
:return: A dictionary of exports, mapping an export category to a dict
of :class:`ExportEntry` instances describing the individual
export entries, and keyed by name.
"""
result = {}
r = self.get_distinfo_resource(EXPORTS_FILENAME)
if r:
result = self.read_exports()
return result
def read_exports(self):
"""
Read exports data from a file in .ini format.
:return: A dictionary of exports, mapping an export category to a list
of :class:`ExportEntry` instances describing the individual
export entries.
"""
result = {}
r = self.get_distinfo_resource(EXPORTS_FILENAME)
if r:
with contextlib.closing(r.as_stream()) as stream:
result = read_exports(stream)
return result
def write_exports(self, exports):
"""
Write a dictionary of exports to a file in .ini format.
:param exports: A dictionary of exports, mapping an export category to
a list of :class:`ExportEntry` instances describing the
individual export entries.
"""
rf = self.get_distinfo_file(EXPORTS_FILENAME)
with open(rf, 'w') as f:
write_exports(exports, f)
def get_resource_path(self, relative_path):
"""
NOTE: This API may change in the future.
Return the absolute path to a resource file with the given relative
path.
:param relative_path: The path, relative to .dist-info, of the resource
of interest.
:return: The absolute path where the resource is to be found.
"""
r = self.get_distinfo_resource('RESOURCES')
with contextlib.closing(r.as_stream()) as stream:
with CSVReader(stream=stream) as resources_reader:
for relative, destination in resources_reader:
if relative == relative_path:
return destination
raise KeyError('no resource file with relative path %r '
'is installed' % relative_path)
def list_installed_files(self):
"""
Iterates over the ``RECORD`` entries and returns a tuple
``(path, hash, size)`` for each line.
:returns: iterator of (path, hash, size)
"""
for result in self._get_records():
yield result
def write_installed_files(self, paths, prefix, dry_run=False):
"""
Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any
existing ``RECORD`` file is silently overwritten.
prefix is used to determine when to write absolute paths.
"""
prefix = os.path.join(prefix, '')
base = os.path.dirname(self.path)
base_under_prefix = base.startswith(prefix)
base = os.path.join(base, '')
record_path = self.get_distinfo_file('RECORD')
logger.info('creating %s', record_path)
if dry_run:
return None
with CSVWriter(record_path) as writer:
for path in paths:
if os.path.isdir(path) or path.endswith(('.pyc', '.pyo')):
# do not put size and hash, as in PEP-376
hash_value = size = ''
else:
size = '%d' % os.path.getsize(path)
with open(path, 'rb') as fp:
hash_value = self.get_hash(fp.read())
if path.startswith(base) or (base_under_prefix and
path.startswith(prefix)):
path = os.path.relpath(path, base)
writer.writerow((path, hash_value, size))
# add the RECORD file itself
if record_path.startswith(base):
record_path = os.path.relpath(record_path, base)
writer.writerow((record_path, '', ''))
return record_path
def check_installed_files(self):
"""
Checks that the hashes and sizes of the files in ``RECORD`` are
matched by the files themselves. Returns a (possibly empty) list of
mismatches. Each entry in the mismatch list will be a tuple consisting
of the path, 'exists', 'size' or 'hash' according to what didn't match
(existence is checked first, then size, then hash), the expected
value and the actual value.
"""
mismatches = []
base = os.path.dirname(self.path)
record_path = self.get_distinfo_file('RECORD')
for path, hash_value, size in self.list_installed_files():
if not os.path.isabs(path):
path = os.path.join(base, path)
if path == record_path:
continue
if not os.path.exists(path):
mismatches.append((path, 'exists', True, False))
elif os.path.isfile(path):
actual_size = str(os.path.getsize(path))
if size and actual_size != size:
mismatches.append((path, 'size', size, actual_size))
elif hash_value:
if '=' in hash_value:
hasher = hash_value.split('=', 1)[0]
else:
hasher = None
with open(path, 'rb') as f:
actual_hash = self.get_hash(f.read(), hasher)
if actual_hash != hash_value:
mismatches.append((path, 'hash', hash_value, actual_hash))
return mismatches
@cached_property
def shared_locations(self):
"""
A dictionary of shared locations whose keys are in the set 'prefix',
'purelib', 'platlib', 'scripts', 'headers', 'data' and 'namespace'.
The corresponding value is the absolute path of that category for
this distribution, and takes into account any paths selected by the
user at installation time (e.g. via command-line arguments). In the
case of the 'namespace' key, this would be a list of absolute paths
for the roots of namespace packages in this distribution.
The first time this property is accessed, the relevant information is
read from the SHARED file in the .dist-info directory.
"""
result = {}
shared_path = os.path.join(self.path, 'SHARED')
if os.path.isfile(shared_path):
with codecs.open(shared_path, 'r', encoding='utf-8') as f:
lines = f.read().splitlines()
for line in lines:
key, value = line.split('=', 1)
if key == 'namespace':
result.setdefault(key, []).append(value)
else:
result[key] = value
return result
def write_shared_locations(self, paths, dry_run=False):
"""
Write shared location information to the SHARED file in .dist-info.
:param paths: A dictionary as described in the documentation for
:meth:`shared_locations`.
:param dry_run: If True, the action is logged but no file is actually
written.
:return: The path of the file written to.
"""
shared_path = os.path.join(self.path, 'SHARED')
logger.info('creating %s', shared_path)
if dry_run:
return None
lines = []
for key in ('prefix', 'lib', 'headers', 'scripts', 'data'):
path = paths[key]
if os.path.isdir(paths[key]):
lines.append('%s=%s' % (key, path))
for ns in paths.get('namespace', ()):
lines.append('namespace=%s' % ns)
with codecs.open(shared_path, 'w', encoding='utf-8') as f:
f.write('\n'.join(lines))
return shared_path
def get_distinfo_resource(self, path):
if path not in DIST_FILES:
raise DistlibException('invalid path for a dist-info file: '
'%r at %r' % (path, self.path))
finder = resources.finder_for_path(self.path)
if finder is None:
raise DistlibException('Unable to get a finder for %s' % self.path)
return finder.find(path)
def get_distinfo_file(self, path):
"""
Returns a path located under the ``.dist-info`` directory. Returns a
string representing the path.
:parameter path: a ``'/'``-separated path relative to the
``.dist-info`` directory or an absolute path;
If *path* is an absolute path and doesn't start
with the ``.dist-info`` directory path,
a :class:`DistlibException` is raised
:type path: str
:rtype: str
"""
# Check if it is an absolute path # XXX use relpath, add tests
if path.find(os.sep) >= 0:
# it's an absolute path?
distinfo_dirname, path = path.split(os.sep)[-2:]
if distinfo_dirname != self.path.split(os.sep)[-1]:
raise DistlibException(
'dist-info file %r does not belong to the %r %s '
'distribution' % (path, self.name, self.version))
# The file must be relative
if path not in DIST_FILES:
raise DistlibException('invalid path for a dist-info file: '
'%r at %r' % (path, self.path))
return os.path.join(self.path, path)
def list_distinfo_files(self):
"""
Iterates over the ``RECORD`` entries and returns paths for each line if
the path is pointing to a file located in the ``.dist-info`` directory
or one of its subdirectories.
:returns: iterator of paths
"""
base = os.path.dirname(self.path)
for path, checksum, size in self._get_records():
# XXX add separator or use real relpath algo
if not os.path.isabs(path):
path = os.path.join(base, path)
if path.startswith(self.path):
yield path
def __eq__(self, other):
return (isinstance(other, InstalledDistribution) and
self.path == other.path)
# See http://docs.python.org/reference/datamodel#object.__hash__
__hash__ = object.__hash__
class EggInfoDistribution(BaseInstalledDistribution):
"""Created with the *path* of the ``.egg-info`` directory or file provided
to the constructor. It reads the metadata contained in the file itself, or
if the given path happens to be a directory, the metadata is read from the
file ``PKG-INFO`` under that directory."""
requested = True # as we have no way of knowing, assume it was
shared_locations = {}
def __init__(self, path, env=None):
def set_name_and_version(s, n, v):
s.name = n
s.key = n.lower() # for case-insensitive comparisons
s.version = v
self.path = path
self.dist_path = env
if env and env._cache_enabled and path in env._cache_egg.path:
metadata = env._cache_egg.path[path].metadata
set_name_and_version(self, metadata.name, metadata.version)
else:
metadata = self._get_metadata(path)
# Need to be set before caching
set_name_and_version(self, metadata.name, metadata.version)
if env and env._cache_enabled:
env._cache_egg.add(self)
super(EggInfoDistribution, self).__init__(metadata, path, env)
def _get_metadata(self, path):
requires = None
def parse_requires_data(data):
"""Create a list of dependencies from a requires.txt file.
*data*: the contents of a setuptools-produced requires.txt file.
"""
reqs = []
lines = data.splitlines()
for line in lines:
line = line.strip()
if line.startswith('['):
logger.warning('Unexpected line: quitting requirement scan: %r',
line)
break
r = parse_requirement(line)
if not r:
logger.warning('Not recognised as a requirement: %r', line)
continue
if r.extras:
logger.warning('extra requirements in requires.txt are '
'not supported')
if not r.constraints:
reqs.append(r.name)
else:
cons = ', '.join('%s%s' % c for c in r.constraints)
reqs.append('%s (%s)' % (r.name, cons))
return reqs
def parse_requires_path(req_path):
"""Create a list of dependencies from a requires.txt file.
*req_path*: the path to a setuptools-produced requires.txt file.
"""
reqs = []
try:
with codecs.open(req_path, 'r', 'utf-8') as fp:
reqs = parse_requires_data(fp.read())
except IOError:
pass
return reqs
if path.endswith('.egg'):
if os.path.isdir(path):
meta_path = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
metadata = Metadata(path=meta_path, scheme='legacy')
req_path = os.path.join(path, 'EGG-INFO', 'requires.txt')
requires = parse_requires_path(req_path)
else:
# FIXME handle the case where zipfile is not available
zipf = zipimport.zipimporter(path)
fileobj = StringIO(
zipf.get_data('EGG-INFO/PKG-INFO').decode('utf8'))
metadata = Metadata(fileobj=fileobj, scheme='legacy')
try:
data = zipf.get_data('EGG-INFO/requires.txt')
requires = parse_requires_data(data.decode('utf-8'))
except IOError:
requires = None
elif path.endswith('.egg-info'):
if os.path.isdir(path):
req_path = os.path.join(path, 'requires.txt')
requires = parse_requires_path(req_path)
path = os.path.join(path, 'PKG-INFO')
metadata = Metadata(path=path, scheme='legacy')
else:
raise DistlibException('path must end with .egg-info or .egg, '
'got %r' % path)
if requires:
metadata.add_requirements(requires)
return metadata
def __repr__(self):
return '<EggInfoDistribution %r %s at %r>' % (
self.name, self.version, self.path)
def __str__(self):
return "%s %s" % (self.name, self.version)
def check_installed_files(self):
"""
Checks that the hashes and sizes of the files in ``RECORD`` are
matched by the files themselves. Returns a (possibly empty) list of
mismatches. Each entry in the mismatch list will be a tuple consisting
of the path, 'exists', 'size' or 'hash' according to what didn't match
(existence is checked first, then size, then hash), the expected
value and the actual value.
"""
mismatches = []
record_path = os.path.join(self.path, 'installed-files.txt')
if os.path.exists(record_path):
for path, _, _ in self.list_installed_files():
if path == record_path:
continue
if not os.path.exists(path):
mismatches.append((path, 'exists', True, False))
return mismatches
def list_installed_files(self):
"""
Iterates over the ``installed-files.txt`` entries and returns a tuple
``(path, hash, size)`` for each line.
:returns: a list of (path, hash, size)
"""
def _md5(path):
f = open(path, 'rb')
try:
content = f.read()
finally:
f.close()
return hashlib.md5(content).hexdigest()
def _size(path):
return os.stat(path).st_size
record_path = os.path.join(self.path, 'installed-files.txt')
result = []
if os.path.exists(record_path):
with codecs.open(record_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
p = os.path.normpath(os.path.join(self.path, line))
# "./" is present as a marker between installed files
# and installation metadata files
if not os.path.exists(p):
logger.warning('Non-existent file: %s', p)
if p.endswith(('.pyc', '.pyo')):
continue
#otherwise fall through and fail
if not os.path.isdir(p):
result.append((p, _md5(p), _size(p)))
result.append((record_path, None, None))
return result
def list_distinfo_files(self, absolute=False):
"""
Iterates over the ``installed-files.txt`` entries and returns paths for
each line if the path is pointing to a file located in the
``.egg-info`` directory or one of its subdirectories.
:parameter absolute: If *absolute* is ``True``, each returned path is
transformed into a local absolute path. Otherwise the
raw value from ``installed-files.txt`` is returned.
:type absolute: boolean
:returns: iterator of paths
"""
record_path = os.path.join(self.path, 'installed-files.txt')
skip = True
with codecs.open(record_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if line == './':
skip = False
continue
if not skip:
p = os.path.normpath(os.path.join(self.path, line))
if p.startswith(self.path):
if absolute:
yield p
else:
yield line
def __eq__(self, other):
return (isinstance(other, EggInfoDistribution) and
self.path == other.path)
# See http://docs.python.org/reference/datamodel#object.__hash__
__hash__ = object.__hash__
new_dist_class = InstalledDistribution
old_dist_class = EggInfoDistribution
class DependencyGraph(object):
"""
Represents a dependency graph between distributions.
The dependency relationships are stored in an ``adjacency_list`` that maps
distributions to a list of ``(other, label)`` tuples where ``other``
is a distribution and the edge is labeled with ``label`` (i.e. the version
specifier, if such was provided). Also, for more efficient traversal, for
every distribution ``x``, a list of predecessors is kept in
``reverse_list[x]``. An edge from distribution ``a`` to
distribution ``b`` means that ``a`` depends on ``b``. If any missing
dependencies are found, they are stored in ``missing``, which is a
dictionary that maps distributions to a list of requirements that were not
provided by any other distributions.
"""
def __init__(self):
self.adjacency_list = {}
self.reverse_list = {}
self.missing = {}
def add_distribution(self, distribution):
"""Add the *distribution* to the graph.
:type distribution: :class:`distutils2.database.InstalledDistribution`
or :class:`distutils2.database.EggInfoDistribution`
"""
self.adjacency_list[distribution] = []
self.reverse_list[distribution] = []
#self.missing[distribution] = []
def add_edge(self, x, y, label=None):
"""Add an edge from distribution *x* to distribution *y* with the given
*label*.
:type x: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type y: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type label: ``str`` or ``None``
"""
self.adjacency_list[x].append((y, label))
# multiple edges are allowed, so be careful
if x not in self.reverse_list[y]:
self.reverse_list[y].append(x)
def add_missing(self, distribution, requirement):
"""
Add a missing *requirement* for the given *distribution*.
:type distribution: :class:`distutils2.database.InstalledDistribution`
or :class:`distutils2.database.EggInfoDistribution`
:type requirement: ``str``
"""
logger.debug('%s missing %r', distribution, requirement)
self.missing.setdefault(distribution, []).append(requirement)
def _repr_dist(self, dist):
return '%s %s' % (dist.name, dist.version)
def repr_node(self, dist, level=1):
"""Prints only a subgraph"""
output = [self._repr_dist(dist)]
for other, label in self.adjacency_list[dist]:
dist = self._repr_dist(other)
if label is not None:
dist = '%s [%s]' % (dist, label)
output.append(' ' * level + str(dist))
suboutput = self.repr_node(other, level + 1)
subs = suboutput.split('\n')
output.extend(subs[1:])
return '\n'.join(output)
def to_dot(self, f, skip_disconnected=True):
"""Writes a DOT output for the graph to the provided file *f*.
If *skip_disconnected* is set to ``True``, then all distributions
that are not dependent on any other distribution are skipped.
:type f: has to support ``file``-like operations
:type skip_disconnected: ``bool``
"""
disconnected = []
f.write("digraph dependencies {\n")
for dist, adjs in self.adjacency_list.items():
if len(adjs) == 0 and not skip_disconnected:
disconnected.append(dist)
for other, label in adjs:
if not label is None:
f.write('"%s" -> "%s" [label="%s"]\n' %
(dist.name, other.name, label))
else:
f.write('"%s" -> "%s"\n' % (dist.name, other.name))
if not skip_disconnected and len(disconnected) > 0:
f.write('subgraph disconnected {\n')
f.write('label = "Disconnected"\n')
f.write('bgcolor = red\n')
for dist in disconnected:
f.write('"%s"' % dist.name)
f.write('\n')
f.write('}\n')
f.write('}\n')
def topological_sort(self):
"""
Perform a topological sort of the graph.
:return: A tuple, the first element of which is a topologically sorted
list of distributions, and the second element of which is a
list of distributions that cannot be sorted because they have
circular dependencies and so form a cycle.
"""
result = []
# Make a shallow copy of the adjacency list
alist = {}
for k, v in self.adjacency_list.items():
alist[k] = v[:]
while True:
# See what we can remove in this run
to_remove = []
for k, v in list(alist.items())[:]:
if not v:
to_remove.append(k)
del alist[k]
if not to_remove:
# What's left in alist (if anything) is a cycle.
break
# Remove from the adjacency list of others
for k, v in alist.items():
alist[k] = [(d, r) for d, r in v if d not in to_remove]
logger.debug('Moving to result: %s',
['%s (%s)' % (d.name, d.version) for d in to_remove])
result.extend(to_remove)
return result, list(alist.keys())
def __repr__(self):
"""Representation of the graph"""
output = []
for dist, adjs in self.adjacency_list.items():
output.append(self.repr_node(dist))
return '\n'.join(output)
def make_graph(dists, scheme='default'):
"""Makes a dependency graph from the given distributions.
:parameter dists: a list of distributions
:type dists: list of :class:`distutils2.database.InstalledDistribution` and
:class:`distutils2.database.EggInfoDistribution` instances
:rtype: a :class:`DependencyGraph` instance
"""
scheme = get_scheme(scheme)
graph = DependencyGraph()
provided = {} # maps names to lists of (version, dist) tuples
# first, build the graph and find out what's provided
for dist in dists:
graph.add_distribution(dist)
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
provided.setdefault(name, []).append((version, dist))
# now make the edges
for dist in dists:
requires = (dist.run_requires | dist.meta_requires |
dist.build_requires | dist.dev_requires)
for req in requires:
try:
matcher = scheme.matcher(req)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only',
req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
matched = False
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
graph.add_edge(dist, provider, req)
matched = True
break
if not matched:
graph.add_missing(dist, req)
return graph
def get_dependent_dists(dists, dist):
"""Recursively generate a list of distributions from *dists* that are
dependent on *dist*.
:param dists: a list of distributions
:param dist: a distribution, member of *dists* for which we are interested
"""
if dist not in dists:
raise DistlibException('given distribution %r is not a member '
'of the list' % dist.name)
graph = make_graph(dists)
dep = [dist] # dependent distributions
todo = graph.reverse_list[dist] # list of nodes we should inspect
while todo:
d = todo.pop()
dep.append(d)
for succ in graph.reverse_list[d]:
if succ not in dep:
todo.append(succ)
dep.pop(0) # remove dist from dep, was there to prevent infinite loops
return dep
def get_required_dists(dists, dist):
"""Recursively generate a list of distributions from *dists* that are
required by *dist*.
:param dists: a list of distributions
:param dist: a distribution, member of *dists* for which we are interested
"""
if dist not in dists:
raise DistlibException('given distribution %r is not a member '
'of the list' % dist.name)
graph = make_graph(dists)
req = [] # required distributions
todo = graph.adjacency_list[dist] # list of nodes we should inspect
while todo:
d = todo.pop()[0]
req.append(d)
for pred in graph.adjacency_list[d]:
if pred not in req:
todo.append(pred)
return req
def make_dist(name, version, **kwargs):
"""
A convenience method for making a dist given just a name and version.
"""
summary = kwargs.pop('summary', 'Placeholder for summary')
md = Metadata(**kwargs)
md.name = name
md.version = version
md.summary = summary or 'Plaeholder for summary'
return Distribution(md)
|
1ns/project-r2 | refs/heads/master | web/themes/contrib/bootstrap_sass_starterkit/node_modules/node-gyp/gyp/pylib/gyp/__init__.py | 1524 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import gyp.input
import optparse
import os.path
import re
import shlex
import sys
import traceback
from gyp.common import GypError
# Default debug modes for GYP
debug = {}
# List of "official" debug modes, but you can use anything you like.
DEBUG_GENERAL = 'general'
DEBUG_VARIABLES = 'variables'
DEBUG_INCLUDES = 'includes'
def DebugOutput(mode, message, *args):
if 'all' in gyp.debug or mode in gyp.debug:
ctx = ('unknown', 0, 'unknown')
try:
f = traceback.extract_stack(limit=2)
if f:
ctx = f[0][:3]
except:
pass
if args:
message %= args
print '%s:%s:%d:%s %s' % (mode.upper(), os.path.basename(ctx[0]),
ctx[1], ctx[2], message)
def FindBuildFiles():
extension = '.gyp'
files = os.listdir(os.getcwd())
build_files = []
for file in files:
if file.endswith(extension):
build_files.append(file)
return build_files
def Load(build_files, format, default_variables={},
includes=[], depth='.', params=None, check=False,
circular_check=True, duplicate_basename_check=True):
"""
Loads one or more specified build files.
default_variables and includes will be copied before use.
Returns the generator for the specified format and the
data returned by loading the specified build files.
"""
if params is None:
params = {}
if '-' in format:
format, params['flavor'] = format.split('-', 1)
default_variables = copy.copy(default_variables)
# Default variables provided by this program and its modules should be
# named WITH_CAPITAL_LETTERS to provide a distinct "best practice" namespace,
# avoiding collisions with user and automatic variables.
default_variables['GENERATOR'] = format
default_variables['GENERATOR_FLAVOR'] = params.get('flavor', '')
# Format can be a custom python file, or by default the name of a module
# within gyp.generator.
if format.endswith('.py'):
generator_name = os.path.splitext(format)[0]
path, generator_name = os.path.split(generator_name)
# Make sure the path to the custom generator is in sys.path
# Don't worry about removing it once we are done. Keeping the path
# to each generator that is used in sys.path is likely harmless and
# arguably a good idea.
path = os.path.abspath(path)
if path not in sys.path:
sys.path.insert(0, path)
else:
generator_name = 'gyp.generator.' + format
# These parameters are passed in order (as opposed to by key)
# because ActivePython cannot handle key parameters to __import__.
generator = __import__(generator_name, globals(), locals(), generator_name)
for (key, val) in generator.generator_default_variables.items():
default_variables.setdefault(key, val)
# Give the generator the opportunity to set additional variables based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateVariables', None):
generator.CalculateVariables(default_variables, params)
# Give the generator the opportunity to set generator_input_info based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateGeneratorInputInfo', None):
generator.CalculateGeneratorInputInfo(params)
# Fetch the generator specific info that gets fed to input, we use getattr
# so we can default things and the generators only have to provide what
# they need.
generator_input_info = {
'non_configuration_keys':
getattr(generator, 'generator_additional_non_configuration_keys', []),
'path_sections':
getattr(generator, 'generator_additional_path_sections', []),
'extra_sources_for_rules':
getattr(generator, 'generator_extra_sources_for_rules', []),
'generator_supports_multiple_toolsets':
getattr(generator, 'generator_supports_multiple_toolsets', False),
'generator_wants_static_library_dependencies_adjusted':
getattr(generator,
'generator_wants_static_library_dependencies_adjusted', True),
'generator_wants_sorted_dependencies':
getattr(generator, 'generator_wants_sorted_dependencies', False),
'generator_filelist_paths':
getattr(generator, 'generator_filelist_paths', None),
}
# Process the input specific to this generator.
result = gyp.input.Load(build_files, default_variables, includes[:],
depth, generator_input_info, check, circular_check,
duplicate_basename_check,
params['parallel'], params['root_targets'])
return [generator] + result
def NameValueListToDict(name_value_list):
"""
Takes an array of strings of the form 'NAME=VALUE' and creates a dictionary
of the pairs. If a string is simply NAME, then the value in the dictionary
is set to True. If VALUE can be converted to an integer, it is.
"""
result = { }
for item in name_value_list:
tokens = item.split('=', 1)
if len(tokens) == 2:
# If we can make it an int, use that, otherwise, use the string.
try:
token_value = int(tokens[1])
except ValueError:
token_value = tokens[1]
# Set the variable to the supplied value.
result[tokens[0]] = token_value
else:
# No value supplied, treat it as a boolean and set it.
result[tokens[0]] = True
return result
def ShlexEnv(env_name):
flags = os.environ.get(env_name, [])
if flags:
flags = shlex.split(flags)
return flags
def FormatOpt(opt, value):
if opt.startswith('--'):
return '%s=%s' % (opt, value)
return opt + value
def RegenerateAppendFlag(flag, values, predicate, env_name, options):
"""Regenerate a list of command line flags, for an option of action='append'.
The |env_name|, if given, is checked in the environment and used to generate
an initial list of options, then the options that were specified on the
command line (given in |values|) are appended. This matches the handling of
environment variables and command line flags where command line flags override
the environment, while not requiring the environment to be set when the flags
are used again.
"""
flags = []
if options.use_environment and env_name:
for flag_value in ShlexEnv(env_name):
value = FormatOpt(flag, predicate(flag_value))
if value in flags:
flags.remove(value)
flags.append(value)
if values:
for flag_value in values:
flags.append(FormatOpt(flag, predicate(flag_value)))
return flags
def RegenerateFlags(options):
"""Given a parsed options object, and taking the environment variables into
account, returns a list of flags that should regenerate an equivalent options
object (even in the absence of the environment variables.)
Any path options will be normalized relative to depth.
The format flag is not included, as it is assumed the calling generator will
set that as appropriate.
"""
def FixPath(path):
path = gyp.common.FixIfRelativePath(path, options.depth)
if not path:
return os.path.curdir
return path
def Noop(value):
return value
# We always want to ignore the environment when regenerating, to avoid
# duplicate or changed flags in the environment at the time of regeneration.
flags = ['--ignore-environment']
for name, metadata in options._regeneration_metadata.iteritems():
opt = metadata['opt']
value = getattr(options, name)
value_predicate = metadata['type'] == 'path' and FixPath or Noop
action = metadata['action']
env_name = metadata['env_name']
if action == 'append':
flags.extend(RegenerateAppendFlag(opt, value, value_predicate,
env_name, options))
elif action in ('store', None): # None is a synonym for 'store'.
if value:
flags.append(FormatOpt(opt, value_predicate(value)))
elif options.use_environment and env_name and os.environ.get(env_name):
flags.append(FormatOpt(opt, value_predicate(os.environ.get(env_name))))
elif action in ('store_true', 'store_false'):
if ((action == 'store_true' and value) or
(action == 'store_false' and not value)):
flags.append(opt)
elif options.use_environment and env_name:
print >>sys.stderr, ('Warning: environment regeneration unimplemented '
'for %s flag %r env_name %r' % (action, opt,
env_name))
else:
print >>sys.stderr, ('Warning: regeneration unimplemented for action %r '
'flag %r' % (action, opt))
return flags
class RegeneratableOptionParser(optparse.OptionParser):
def __init__(self):
self.__regeneratable_options = {}
optparse.OptionParser.__init__(self)
def add_option(self, *args, **kw):
"""Add an option to the parser.
This accepts the same arguments as OptionParser.add_option, plus the
following:
regenerate: can be set to False to prevent this option from being included
in regeneration.
env_name: name of environment variable that additional values for this
option come from.
type: adds type='path', to tell the regenerator that the values of
this option need to be made relative to options.depth
"""
env_name = kw.pop('env_name', None)
if 'dest' in kw and kw.pop('regenerate', True):
dest = kw['dest']
# The path type is needed for regenerating, for optparse we can just treat
# it as a string.
type = kw.get('type')
if type == 'path':
kw['type'] = 'string'
self.__regeneratable_options[dest] = {
'action': kw.get('action'),
'type': type,
'env_name': env_name,
'opt': args[0],
}
optparse.OptionParser.add_option(self, *args, **kw)
def parse_args(self, *args):
values, args = optparse.OptionParser.parse_args(self, *args)
values._regeneration_metadata = self.__regeneratable_options
return values, args
def gyp_main(args):
my_name = os.path.basename(sys.argv[0])
parser = RegeneratableOptionParser()
usage = 'usage: %s [options ...] [build_file ...]'
parser.set_usage(usage.replace('%s', '%prog'))
parser.add_option('--build', dest='configs', action='append',
help='configuration for build after project generation')
parser.add_option('--check', dest='check', action='store_true',
help='check format of gyp files')
parser.add_option('--config-dir', dest='config_dir', action='store',
env_name='GYP_CONFIG_DIR', default=None,
help='The location for configuration files like '
'include.gypi.')
parser.add_option('-d', '--debug', dest='debug', metavar='DEBUGMODE',
action='append', default=[], help='turn on a debugging '
'mode for debugging GYP. Supported modes are "variables", '
'"includes" and "general" or "all" for all of them.')
parser.add_option('-D', dest='defines', action='append', metavar='VAR=VAL',
env_name='GYP_DEFINES',
help='sets variable VAR to value VAL')
parser.add_option('--depth', dest='depth', metavar='PATH', type='path',
help='set DEPTH gyp variable to a relative path to PATH')
parser.add_option('-f', '--format', dest='formats', action='append',
env_name='GYP_GENERATORS', regenerate=False,
help='output formats to generate')
parser.add_option('-G', dest='generator_flags', action='append', default=[],
metavar='FLAG=VAL', env_name='GYP_GENERATOR_FLAGS',
help='sets generator flag FLAG to VAL')
parser.add_option('--generator-output', dest='generator_output',
action='store', default=None, metavar='DIR', type='path',
env_name='GYP_GENERATOR_OUTPUT',
help='puts generated build files under DIR')
parser.add_option('--ignore-environment', dest='use_environment',
action='store_false', default=True, regenerate=False,
help='do not read options from environment variables')
parser.add_option('-I', '--include', dest='includes', action='append',
metavar='INCLUDE', type='path',
help='files to include in all loaded .gyp files')
# --no-circular-check disables the check for circular relationships between
# .gyp files. These relationships should not exist, but they've only been
# observed to be harmful with the Xcode generator. Chromium's .gyp files
# currently have some circular relationships on non-Mac platforms, so this
# option allows the strict behavior to be used on Macs and the lenient
# behavior to be used elsewhere.
# TODO(mark): Remove this option when http://crbug.com/35878 is fixed.
parser.add_option('--no-circular-check', dest='circular_check',
action='store_false', default=True, regenerate=False,
help="don't check for circular relationships between files")
# --no-duplicate-basename-check disables the check for duplicate basenames
# in a static_library/shared_library project. Visual C++ 2008 generator
# doesn't support this configuration. Libtool on Mac also generates warnings
# when duplicate basenames are passed into Make generator on Mac.
# TODO(yukawa): Remove this option when these legacy generators are
# deprecated.
parser.add_option('--no-duplicate-basename-check',
dest='duplicate_basename_check', action='store_false',
default=True, regenerate=False,
help="don't check for duplicate basenames")
parser.add_option('--no-parallel', action='store_true', default=False,
help='Disable multiprocessing')
parser.add_option('-S', '--suffix', dest='suffix', default='',
help='suffix to add to generated files')
parser.add_option('--toplevel-dir', dest='toplevel_dir', action='store',
default=None, metavar='DIR', type='path',
help='directory to use as the root of the source tree')
parser.add_option('-R', '--root-target', dest='root_targets',
action='append', metavar='TARGET',
help='include only TARGET and its deep dependencies')
options, build_files_arg = parser.parse_args(args)
build_files = build_files_arg
# Set up the configuration directory (defaults to ~/.gyp)
if not options.config_dir:
home = None
home_dot_gyp = None
if options.use_environment:
home_dot_gyp = os.environ.get('GYP_CONFIG_DIR', None)
if home_dot_gyp:
home_dot_gyp = os.path.expanduser(home_dot_gyp)
if not home_dot_gyp:
home_vars = ['HOME']
if sys.platform in ('cygwin', 'win32'):
home_vars.append('USERPROFILE')
for home_var in home_vars:
home = os.getenv(home_var)
if home != None:
home_dot_gyp = os.path.join(home, '.gyp')
if not os.path.exists(home_dot_gyp):
home_dot_gyp = None
else:
break
else:
home_dot_gyp = os.path.expanduser(options.config_dir)
if home_dot_gyp and not os.path.exists(home_dot_gyp):
home_dot_gyp = None
if not options.formats:
# If no format was given on the command line, then check the env variable.
generate_formats = []
if options.use_environment:
generate_formats = os.environ.get('GYP_GENERATORS', [])
if generate_formats:
generate_formats = re.split(r'[\s,]', generate_formats)
if generate_formats:
options.formats = generate_formats
else:
# Nothing in the variable, default based on platform.
if sys.platform == 'darwin':
options.formats = ['xcode']
elif sys.platform in ('win32', 'cygwin'):
options.formats = ['msvs']
else:
options.formats = ['make']
if not options.generator_output and options.use_environment:
g_o = os.environ.get('GYP_GENERATOR_OUTPUT')
if g_o:
options.generator_output = g_o
options.parallel = not options.no_parallel
for mode in options.debug:
gyp.debug[mode] = 1
# Do an extra check to avoid work when we're not debugging.
if DEBUG_GENERAL in gyp.debug:
DebugOutput(DEBUG_GENERAL, 'running with these options:')
for option, value in sorted(options.__dict__.items()):
if option[0] == '_':
continue
if isinstance(value, basestring):
DebugOutput(DEBUG_GENERAL, " %s: '%s'", option, value)
else:
DebugOutput(DEBUG_GENERAL, " %s: %s", option, value)
if not build_files:
build_files = FindBuildFiles()
if not build_files:
raise GypError((usage + '\n\n%s: error: no build_file') %
(my_name, my_name))
# TODO(mark): Chromium-specific hack!
# For Chromium, the gyp "depth" variable should always be a relative path
# to Chromium's top-level "src" directory. If no depth variable was set
# on the command line, try to find a "src" directory by looking at the
# absolute path to each build file's directory. The first "src" component
# found will be treated as though it were the path used for --depth.
if not options.depth:
for build_file in build_files:
build_file_dir = os.path.abspath(os.path.dirname(build_file))
build_file_dir_components = build_file_dir.split(os.path.sep)
components_len = len(build_file_dir_components)
for index in xrange(components_len - 1, -1, -1):
if build_file_dir_components[index] == 'src':
options.depth = os.path.sep.join(build_file_dir_components)
break
del build_file_dir_components[index]
# If the inner loop found something, break without advancing to another
# build file.
if options.depth:
break
if not options.depth:
raise GypError('Could not automatically locate src directory. This is'
'a temporary Chromium feature that will be removed. Use'
'--depth as a workaround.')
# If toplevel-dir is not set, we assume that depth is the root of our source
# tree.
if not options.toplevel_dir:
options.toplevel_dir = options.depth
# -D on the command line sets variable defaults - D isn't just for define,
# it's for default. Perhaps there should be a way to force (-F?) a
# variable's value so that it can't be overridden by anything else.
cmdline_default_variables = {}
defines = []
if options.use_environment:
defines += ShlexEnv('GYP_DEFINES')
if options.defines:
defines += options.defines
cmdline_default_variables = NameValueListToDict(defines)
if DEBUG_GENERAL in gyp.debug:
DebugOutput(DEBUG_GENERAL,
"cmdline_default_variables: %s", cmdline_default_variables)
# Set up includes.
includes = []
# If ~/.gyp/include.gypi exists, it'll be forcibly included into every
# .gyp file that's loaded, before anything else is included.
if home_dot_gyp != None:
default_include = os.path.join(home_dot_gyp, 'include.gypi')
if os.path.exists(default_include):
print 'Using overrides found in ' + default_include
includes.append(default_include)
# Command-line --include files come after the default include.
if options.includes:
includes.extend(options.includes)
# Generator flags should be prefixed with the target generator since they
# are global across all generator runs.
gen_flags = []
if options.use_environment:
gen_flags += ShlexEnv('GYP_GENERATOR_FLAGS')
if options.generator_flags:
gen_flags += options.generator_flags
generator_flags = NameValueListToDict(gen_flags)
if DEBUG_GENERAL in gyp.debug.keys():
DebugOutput(DEBUG_GENERAL, "generator_flags: %s", generator_flags)
# Generate all requested formats (use a set in case we got one format request
# twice)
for format in set(options.formats):
params = {'options': options,
'build_files': build_files,
'generator_flags': generator_flags,
'cwd': os.getcwd(),
'build_files_arg': build_files_arg,
'gyp_binary': sys.argv[0],
'home_dot_gyp': home_dot_gyp,
'parallel': options.parallel,
'root_targets': options.root_targets,
'target_arch': cmdline_default_variables.get('target_arch', '')}
# Start with the default variables from the command line.
[generator, flat_list, targets, data] = Load(
build_files, format, cmdline_default_variables, includes, options.depth,
params, options.check, options.circular_check,
options.duplicate_basename_check)
# TODO(mark): Pass |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
# NOTE: flat_list is the flattened dependency graph specifying the order
# that targets may be built. Build systems that operate serially or that
# need to have dependencies defined before dependents reference them should
# generate targets in the order specified in flat_list.
generator.GenerateOutput(flat_list, targets, data, params)
if options.configs:
valid_configs = targets[flat_list[0]]['configurations'].keys()
for conf in options.configs:
if conf not in valid_configs:
raise GypError('Invalid config specified via --build: %s' % conf)
generator.PerformBuild(data, options.configs, params)
# Done
return 0
def main(args):
try:
return gyp_main(args)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return 1
# NOTE: setuptools generated console_scripts calls function with no arguments
def script_main():
return main(sys.argv[1:])
if __name__ == '__main__':
sys.exit(script_main())
|
Open-Party/python-beaver | refs/heads/master | docs/conf.py | 4 | # -*- coding: utf-8 -*-
#
# beaver documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 21 11:21:22 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'beaver'
copyright = u'2013, Jose Diaz-Gonzalez'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '33.3.0'
# The full version, including alpha/beta/rc tags.
release = '33.3.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'beaverdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'beaver.tex', u'beaver Documentation',
u'Jose Diaz-Gonzalez', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'beaver', u'beaver Documentation',
[u'Jose Diaz-Gonzalez'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'beaver', u'beaver Documentation',
u'Jose Diaz-Gonzalez', 'beaver', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
bjolivot/ansible | refs/heads/devel | lib/ansible/modules/network/nxos/nxos_vxlan_vtep_vni.py | 21 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_vxlan_vtep_vni
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Creates a Virtual Network Identifier member (VNI)
description:
- Creates a Virtual Network Identifier member (VNI) for an NVE
overlay interface.
author: Gabriele Gerbino (@GGabriele)
notes:
- default, where supported, restores params default value.
options:
interface:
description:
- Interface name for the VXLAN Network Virtualization Endpoint.
required: true
vni:
description:
- ID of the Virtual Network Identifier.
required: true
assoc_vrf:
description:
- This attribute is used to identify and separate processing VNIs
that are associated with a VRF and used for routing. The VRF
and VNI specified with this command must match the configuration
of the VNI under the VRF.
required: false
choices: ['true','false']
default: null
ingress_replication:
description:
- Specifies mechanism for host reachability advertisement.
required: false
choices: ['bgp','static']
default: null
multicast_group:
description:
- The multicast group (range) of the VNI. Valid values are
string and keyword 'default'.
required: false
default: null
peer_list:
description:
- Set the ingress-replication static peer list. Valid values
are an array, a space-separated string of ip addresses,
or the keyword 'default'.
required: false
default: null
suppress_arp:
description:
- Suppress arp under layer 2 VNI.
required: false
choices: ['true','false']
default: null
state:
description:
- Determines whether the config should be present or not
on the device.
required: false
default: present
choices: ['present','absent']
include_defaults:
description:
- Specify to use or not the complete running configuration
for module operations.
required: false
default: true
choices: ['true','true']
config:
description:
- Configuration string to be used for module operations. If not
specified, the module will use the current running configuration.
required: false
default: null
save:
description:
- Specify to save the running configuration after
module operations.
required: false
default: false
choices: ['true','false']
'''
EXAMPLES = '''
- nxos_vxlan_vtep_vni:
interface: nve1
vni: 6000
ingress_replication: default
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"ingress_replication": "default", "interface": "nve1", "vni": "6000"}
existing:
description: k/v pairs of existing configuration
returned: verbose mode
type: dict
sample: {}
end_state:
description: k/v pairs of configuration after module execution
returned: verbose mode
type: dict
sample: {"assoc_vrf": false, "ingress_replication": "", "interface": "nve1",
"multicast_group": "", "peer_list": [],
"suppress_arp": false, "vni": "6000"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["interface nve1", "member vni 6000"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import re
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import CustomNetworkConfig
BOOL_PARAMS = ['suppress_arp']
PARAM_TO_COMMAND_KEYMAP = {
'assoc_vrf': 'associate-vrf',
'interface': 'interface',
'vni': 'member vni',
'ingress_replication': 'ingress-replication protocol',
'multicast_group': 'mcast-group',
'peer_list': 'peer-ip',
'suppress_arp': 'suppress-arp'
}
PARAM_TO_DEFAULT_KEYMAP = {}
WARNINGS = []
def invoke(name, *args, **kwargs):
func = globals().get(name)
if func:
return func(*args, **kwargs)
def get_value(arg, config, module):
if arg in BOOL_PARAMS:
REGEX = re.compile(r'\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
value = False
try:
if REGEX.search(config):
value = True
except TypeError:
value = False
else:
REGEX = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
value = ''
if PARAM_TO_COMMAND_KEYMAP[arg] in config:
value = REGEX.search(config).group('value')
return value
def check_interface(module, netcfg):
config = str(netcfg)
REGEX = re.compile(r'(?:interface nve)(?P<value>.*)$', re.M)
value = ''
if 'interface nve' in config:
value = 'nve{0}'.format(REGEX.search(config).group('value'))
return value
def get_custom_value(arg, config, module):
splitted_config = config.splitlines()
if arg == 'assoc_vrf':
value = False
if 'associate-vrf' in config:
value = True
elif arg == 'peer_list':
value = []
REGEX = re.compile(r'(?:peer-ip\s)(?P<peer_value>.*)$', re.M)
for line in splitted_config:
peer_value = ''
if PARAM_TO_COMMAND_KEYMAP[arg] in line:
peer_value = REGEX.search(line).group('peer_value')
if peer_value:
value.append(peer_value)
return value
def get_existing(module, args):
existing = {}
netcfg = get_config(module)
custom = [
'assoc_vrf',
'peer_list'
]
interface_exist = check_interface(module, netcfg)
if interface_exist:
parents = ['interface {0}'.format(interface_exist)]
temp_config = netcfg.get_section(parents)
if 'member vni {0} associate-vrf'.format(module.params['vni']) in temp_config:
parents.append('member vni {0} associate-vrf'.format(
module.params['vni']))
config = netcfg.get_section(parents)
elif "member vni {0}".format(module.params['vni']) in temp_config:
parents.append('member vni {0}'.format(module.params['vni']))
config = netcfg.get_section(parents)
else:
config = {}
if config:
for arg in args:
if arg not in ['interface', 'vni']:
if arg in custom:
existing[arg] = get_custom_value(arg, config, module)
else:
existing[arg] = get_value(arg, config, module)
existing['interface'] = interface_exist
existing['vni'] = module.params['vni']
return existing, interface_exist
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = value
else:
new_dict[new_key] = value
return new_dict
def state_present(module, existing, proposed, candidate):
commands = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, value in proposed_commands.items():
if key == 'associate-vrf':
command = 'member vni {0} {1}'.format(module.params['vni'], key)
if value:
commands.append(command)
else:
commands.append('no {0}'.format(command))
elif key == 'peer-ip' and value != 'default':
for peer in value:
commands.append('{0} {1}'.format(key, peer))
elif value is True:
commands.append(key)
elif value is False:
commands.append('no {0}'.format(key))
elif value == 'default':
if existing_commands.get(key):
existing_value = existing_commands.get(key)
if key == 'peer-ip':
for peer in existing_value:
commands.append('no {0} {1}'.format(key, peer))
else:
commands.append('no {0} {1}'.format(key, existing_value))
else:
if key.replace(' ', '_').replace('-', '_') in BOOL_PARAMS:
commands.append('no {0}'.format(key.lower()))
else:
command = '{0} {1}'.format(key, value.lower())
commands.append(command)
if commands:
vni_command = 'member vni {0}'.format(module.params['vni'])
ingress_replication_command = 'ingress-replication protocol static'
interface_command = 'interface {0}'.format(module.params['interface'])
if ingress_replication_command in commands:
static_level_cmds = [cmd for cmd in commands if 'peer' in cmd]
parents = [interface_command, vni_command, ingress_replication_command]
candidate.add(static_level_cmds, parents=parents)
commands = [cmd for cmd in commands if 'peer' not in cmd]
if vni_command in commands:
parents = [interface_command]
commands.remove(vni_command)
if module.params['assoc_vrf'] is None:
parents.append(vni_command)
candidate.add(commands, parents=parents)
def state_absent(module, existing, proposed, candidate):
if existing['assoc_vrf']:
commands = ['no member vni {0} associate-vrf'.format(
module.params['vni'])]
else:
commands = ['no member vni {0}'.format(module.params['vni'])]
parents = ['interface {0}'.format(module.params['interface'])]
candidate.add(commands, parents=parents)
def main():
argument_spec = dict(
interface=dict(required=True, type='str'),
vni=dict(required=True, type='str'),
assoc_vrf=dict(required=False, type='bool'),
multicast_group=dict(required=False, type='str'),
peer_list=dict(required=False, type='list'),
suppress_arp=dict(required=False, type='bool'),
ingress_replication=dict(required=False, type='str',
choices=['bgp', 'static', 'default']),
state=dict(choices=['present', 'absent'], default='present',
required=False),
include_defaults=dict(default=True),
config=dict(),
save=dict(type='bool', default=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
if module.params['assoc_vrf']:
mutually_exclusive_params = ['multicast_group',
'suppress_arp',
'ingress_replication']
for param in mutually_exclusive_params:
if module.params[param]:
module.fail_json(msg='assoc_vrf cannot be used with '
'{0} param'.format(param))
if module.params['peer_list']:
if module.params['ingress_replication'] != 'static':
module.fail_json(msg='ingress_replication=static is required '
'when using peer_list param')
else:
peer_list = module.params['peer_list']
if peer_list[0] == 'default':
module.params['peer_list'] = 'default'
else:
stripped_peer_list = map(str.strip, peer_list)
module.params['peer_list'] = stripped_peer_list
state = module.params['state']
args = [
'assoc_vrf',
'interface',
'vni',
'ingress_replication',
'multicast_group',
'peer_list',
'suppress_arp'
]
existing, interface_exist = invoke('get_existing', module, args)
end_state = existing
proposed_args = dict((k, v) for k, v in module.params.items()
if v is not None and k in args)
proposed = {}
for key, value in proposed_args.items():
if key != 'interface':
if str(value).lower() == 'default':
value = PARAM_TO_DEFAULT_KEYMAP.get(key)
if value is None:
value = 'default'
if existing.get(key) or (not existing.get(key) and value):
proposed[key] = value
result = {}
if state == 'present' or (state == 'absent' and existing):
if not interface_exist:
WARNINGS.append("The proposed NVE interface does not exist. "
"Use nxos_interface to create it first.")
elif interface_exist != module.params['interface']:
module.fail_json(msg='Only 1 NVE interface is allowed on '
'the switch.')
elif (existing and state == 'absent' and
existing['vni'] != module.params['vni']):
module.fail_json(msg="ERROR: VNI delete failed: Could not find"
" vni node for {0}".format(
module.params['vni']),
existing_vni=existing['vni'])
else:
candidate = CustomNetworkConfig(indent=3)
invoke('state_%s' % state, module, existing, proposed, candidate)
try:
response = load_config(module, candidate)
result.update(response)
except ShellError:
exc = get_exception()
module.fail_json(msg=str(exc))
else:
result['updates'] = []
if module._verbosity > 0:
end_state, interface_exist = invoke('get_existing', module, args)
result['end_state'] = end_state
result['existing'] = existing
result['proposed'] = proposed_args
if WARNINGS:
result['warnings'] = WARNINGS
module.exit_json(**result)
if __name__ == '__main__':
main()
|
sarakha63/persomov | refs/heads/master | libs/pyasn1/type/namedval.py | 172 | # ASN.1 named integers
from pyasn1 import error
__all__ = [ 'NamedValues' ]
class NamedValues:
def __init__(self, *namedValues):
self.nameToValIdx = {}; self.valToNameIdx = {}
self.namedValues = ()
automaticVal = 1
for namedValue in namedValues:
if isinstance(namedValue, tuple):
name, val = namedValue
else:
name = namedValue
val = automaticVal
if name in self.nameToValIdx:
raise error.PyAsn1Error('Duplicate name %s' % (name,))
self.nameToValIdx[name] = val
if val in self.valToNameIdx:
raise error.PyAsn1Error('Duplicate value %s=%s' % (name, val))
self.valToNameIdx[val] = name
self.namedValues = self.namedValues + ((name, val),)
automaticVal = automaticVal + 1
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, ', '.join([repr(x) for x in self.namedValues]))
def __str__(self): return str(self.namedValues)
def __eq__(self, other): return tuple(self) == tuple(other)
def __ne__(self, other): return tuple(self) != tuple(other)
def __lt__(self, other): return tuple(self) < tuple(other)
def __le__(self, other): return tuple(self) <= tuple(other)
def __gt__(self, other): return tuple(self) > tuple(other)
def __ge__(self, other): return tuple(self) >= tuple(other)
def __hash__(self): return hash(tuple(self))
def getName(self, value):
if value in self.valToNameIdx:
return self.valToNameIdx[value]
def getValue(self, name):
if name in self.nameToValIdx:
return self.nameToValIdx[name]
def __getitem__(self, i): return self.namedValues[i]
def __len__(self): return len(self.namedValues)
def __add__(self, namedValues):
return self.__class__(*self.namedValues + namedValues)
def __radd__(self, namedValues):
return self.__class__(*namedValues + tuple(self))
def clone(self, *namedValues):
return self.__class__(*tuple(self) + namedValues)
# XXX clone/subtype?
|
mikestebbins/Openaps | refs/heads/master | !ARCHIVE/bg_data_plotting_01.py | 2 |
# coding: utf-8
# In[1]:
#get_ipython().magic(u'matplotlib inline')
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import requests
import json
import pprint
import datetime
from datetime import date
base_url = 'https://mikestebbinscgmtest.azurewebsites.net/api/v1/entries.json?'
extra_url='find[date][$lte]=1462905809000&count=1000'
url = base_url + extra_url
print (url)
# In[2]:
r = requests.get(url)
#print(r.status_code)
#print()
#print(r.headers['content-type'])
#print(r.encoding)
#print(r.text)
#print(pprint.pprint(r.json()))
#print()
print('-----------------------------------------------------------------------------------------------------------')
# In[3]:
data=json.loads(r.text)
# In[4]:
time_and_data=[]
# In[5]:
data[1]
# In[6]:
for item in data:
try:
properdatetime = datetime.datetime.strptime(item['dateString'], "%a %b %d %H:%M:%S PDT %Y")
sgv = item['sgv']
time_and_data.append((properdatetime,sgv))
except:
print('missed one')
# In[7]:
print(time_and_data[0:5])
# In[8]:
time_and_data[0][0]
# In[9]:
datetime.datetime.date(time_and_data[0][0])
# In[10]:
dates_and_data=[]
for item in time_and_data:
properdate = datetime.datetime.date(item[0])
sgv = item[1]
dates_and_data.append((properdate,sgv))
# In[11]:
print(dates_and_data[0:5])
# In[12]:
one_date_all_bgs={}
#lists of lists, each list is a unique date and a list of BG's
for item in dates_and_data:
date = item[0]
one_date_all_bgs.setdefault(date,[])
one_date_all_bgs[date].append(item[1])
# In[ ]:
one_date_all_bgs
# In[ ]:
totaldata = []
for key in one_date_all_bgs:
print (one_date_all_bgs[key])
plt.figure()
plt.boxplot(np.asarray(one_date_all_bgs[key]))
for key in one_date_all_bgs:
totaldata.append(key)
plt.figure()
plt.boxplot(totaldata)
# In[ ]:
## fake up some data
#spread = np.random.rand(50) * 100
#center = np.ones(25) * 50
#flier_high = np.random.rand(10) * 100 + 100
#flier_low = np.random.rand(10) * -100
#data = np.concatenate((spread, center, flier_high, flier_low), 0)
#
## basic plot
#plt.boxplot(data)
# In[ ]:
|
overtherain/scriptfile | refs/heads/master | software/googleAppEngine/lib/django_1_4/tests/regressiontests/views/tests/shortcuts.py | 30 | import warnings
from django.conf import settings
from django.test import TestCase
class ShortcutTests(TestCase):
urls = 'regressiontests.views.generic_urls'
def setUp(self):
self.save_warnings_state()
warnings.filterwarnings('ignore', category=DeprecationWarning,
module='django.views.generic.simple')
self.old_STATIC_URL = settings.STATIC_URL
self.old_TEMPLATE_CONTEXT_PROCESSORS = settings.TEMPLATE_CONTEXT_PROCESSORS
settings.STATIC_URL = '/path/to/static/media/'
settings.TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.static'
)
def tearDown(self):
self.restore_warnings_state()
def tearDown(self):
settings.STATIC_URL = self.old_STATIC_URL
settings.TEMPLATE_CONTEXT_PROCESSORS = self.old_TEMPLATE_CONTEXT_PROCESSORS
def test_render_to_response(self):
response = self.client.get('/shortcuts/render_to_response/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'FOO.BAR..\n')
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
def test_render_to_response_with_request_context(self):
response = self.client.get('/shortcuts/render_to_response/request_context/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'FOO.BAR../path/to/static/media/\n')
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
def test_render_to_response_with_mimetype(self):
response = self.client.get('/shortcuts/render_to_response/mimetype/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'FOO.BAR..\n')
self.assertEqual(response['Content-Type'], 'application/x-rendertest')
def test_render(self):
response = self.client.get('/shortcuts/render/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'FOO.BAR../path/to/static/media/\n')
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
self.assertEqual(response.context.current_app, None)
def test_render_with_base_context(self):
response = self.client.get('/shortcuts/render/base_context/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'FOO.BAR..\n')
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
def test_render_with_content_type(self):
response = self.client.get('/shortcuts/render/content_type/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'FOO.BAR../path/to/static/media/\n')
self.assertEqual(response['Content-Type'], 'application/x-rendertest')
def test_render_with_status(self):
response = self.client.get('/shortcuts/render/status/')
self.assertEqual(response.status_code, 403)
self.assertEqual(response.content, 'FOO.BAR../path/to/static/media/\n')
def test_render_with_current_app(self):
response = self.client.get('/shortcuts/render/current_app/')
self.assertEqual(response.context.current_app, "foobar_app")
def test_render_with_current_app_conflict(self):
self.assertRaises(ValueError, self.client.get, '/shortcuts/render/current_app_conflict/')
|
Arcanemagus/plexpy | refs/heads/master | lib/ipwhois/whois.py | 3 | # Copyright (c) 2013, 2014, 2015, 2016 Philip Hane
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import re
import copy
from datetime import datetime
import logging
from .utils import unique_everseen
from . import (BlacklistError, WhoisLookupError, NetError)
if sys.version_info >= (3, 3): # pragma: no cover
from ipaddress import (ip_address,
ip_network,
summarize_address_range,
collapse_addresses)
else: # pragma: no cover
from ipaddr import (IPAddress as ip_address,
IPNetwork as ip_network,
summarize_address_range,
collapse_address_list as collapse_addresses)
log = logging.getLogger(__name__)
# Legacy base whois output dictionary.
BASE_NET = {
'cidr': None,
'name': None,
'handle': None,
'range': None,
'description': None,
'country': None,
'state': None,
'city': None,
'address': None,
'postal_code': None,
'emails': None,
'created': None,
'updated': None
}
RIR_WHOIS = {
'arin': {
'server': 'whois.arin.net',
'fields': {
'name': r'(NetName):[^\S\n]+(?P<val>.+?)\n',
'handle': r'(NetHandle):[^\S\n]+(?P<val>.+?)\n',
'description': r'(OrgName|CustName):[^\S\n]+(?P<val>.+?)'
'(?=(\n\S):?)',
'country': r'(Country):[^\S\n]+(?P<val>.+?)\n',
'state': r'(StateProv):[^\S\n]+(?P<val>.+?)\n',
'city': r'(City):[^\S\n]+(?P<val>.+?)\n',
'address': r'(Address):[^\S\n]+(?P<val>.+?)(?=(\n\S):?)',
'postal_code': r'(PostalCode):[^\S\n]+(?P<val>.+?)\n',
'emails': (
r'.+?:.*?[^\S\n]+(?P<val>[\w\-\.]+?@[\w\-\.]+\.[\w\-]+)('
'[^\S\n]+.*?)*?\n'
),
'created': r'(RegDate):[^\S\n]+(?P<val>.+?)\n',
'updated': r'(Updated):[^\S\n]+(?P<val>.+?)\n',
},
'dt_format': '%Y-%m-%d'
},
'ripencc': {
'server': 'whois.ripe.net',
'fields': {
'name': r'(netname):[^\S\n]+(?P<val>.+?)\n',
'handle': r'(nic-hdl):[^\S\n]+(?P<val>.+?)\n',
'description': r'(descr):[^\S\n]+(?P<val>.+?)(?=(\n\S):?)',
'country': r'(country):[^\S\n]+(?P<val>.+?)\n',
'address': r'(address):[^\S\n]+(?P<val>.+?)(?=(\n\S):?)',
'emails': (
r'.+?:.*?[^\S\n]+(?P<val>[\w\-\.]+?@[\w\-\.]+\.[\w\-]+)('
'[^\S\n]+.*?)*?\n'
),
'created': (
r'(created):[^\S\n]+(?P<val>[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]'
'{2}:[0-9]{2}:[0-9]{2}Z).*?\n'
),
'updated': (
r'(last-modified):[^\S\n]+(?P<val>[0-9]{4}-[0-9]{2}-[0-9]{2}T'
'[0-9]{2}:[0-9]{2}:[0-9]{2}Z).*?\n'
)
},
'dt_format': '%Y-%m-%dT%H:%M:%SZ'
},
'apnic': {
'server': 'whois.apnic.net',
'fields': {
'name': r'(netname):[^\S\n]+(?P<val>.+?)\n',
'handle': r'(nic-hdl):[^\S\n]+(?P<val>.+?)\n',
'description': r'(descr):[^\S\n]+(?P<val>.+?)(?=(\n\S):?)',
'country': r'(country):[^\S\n]+(?P<val>.+?)\n',
'address': r'(address):[^\S\n]+(?P<val>.+?)(?=(\n\S):?)',
'emails': (
r'.+?:.*?[^\S\n]+(?P<val>[\w\-\.]+?@[\w\-\.]+\.[\w\-]+)('
'[^\S\n]+.*?)*?\n'
),
'updated': r'(changed):[^\S\n]+.*(?P<val>[0-9]{8}).*?\n'
},
'dt_format': '%Y%m%d'
},
'lacnic': {
'server': 'whois.lacnic.net',
'fields': {
'handle': r'(nic-hdl):[^\S\n]+(?P<val>.+?)\n',
'description': r'(owner):[^\S\n]+(?P<val>.+?)(?=(\n\S):?)',
'country': r'(country):[^\S\n]+(?P<val>.+?)\n',
'emails': (
r'.+?:.*?[^\S\n]+(?P<val>[\w\-\.]+?@[\w\-\.]+\.[\w\-]+)('
'[^\S\n]+.*?)*?\n'
),
'created': r'(created):[^\S\n]+(?P<val>[0-9]{8}).*?\n',
'updated': r'(changed):[^\S\n]+(?P<val>[0-9]{8}).*?\n'
},
'dt_format': '%Y%m%d'
},
'afrinic': {
'server': 'whois.afrinic.net',
'fields': {
'name': r'(netname):[^\S\n]+(?P<val>.+?)\n',
'handle': r'(nic-hdl):[^\S\n]+(?P<val>.+?)\n',
'description': r'(descr):[^\S\n]+(?P<val>.+?)(?=(\n\S):?)',
'country': r'(country):[^\S\n]+(?P<val>.+?)\n',
'address': r'(address):[^\S\n]+(?P<val>.+?)(?=(\n\S):?)',
'emails': (
r'.+?:.*?[^\S\n]+(?P<val>[\w\-\.]+?@[\w\-\.]+\.[\w\-]+)('
'[^\S\n]+.*?)*?\n'
),
}
}
}
RWHOIS = {
'fields': {
'cidr': r'(network:IP-Network):(?P<val>.+?)\n',
'name': r'(network:ID):(?P<val>.+?)\n',
'description': (
r'(network:(Org-Name|Organization(;I)?)):(?P<val>.+?)\n'
),
'country': r'(network:(Country|Country-Code)):(?P<val>.+?)\n',
'state': r'(network:State):(?P<val>.+?)\n',
'city': r'(network:City):(?P<val>.+?)\n',
'address': r'(network:Street-Address):(?P<val>.+?)\n',
'postal_code': r'(network:Postal-Code):(?P<val>.+?)\n',
'emails': (
r'.+?:.*?[^\S\n]+(?P<val>[\w\-\.]+?@[\w\-\.]+\.[\w\-]+)('
'[^\S\n]+.*?)*?\n'
),
'created': r'(network:Created):(?P<val>.+?)\n',
'updated': r'(network:Updated):(?P<val>.+?)\n'
}
}
ASN_REFERRALS = {
'whois://whois.ripe.net': 'ripencc',
'whois://whois.apnic.net': 'apnic',
'whois://whois.lacnic.net': 'lacnic',
'whois://whois.afrinic.net': 'afrinic',
}
class Whois:
"""
The class for parsing via whois
Args:
net: A ipwhois.net.Net object.
Raises:
NetError: The parameter provided is not an instance of
ipwhois.net.Net
IPDefinedError: The address provided is defined (does not need to be
resolved).
"""
def __init__(self, net):
from .net import Net
# ipwhois.net.Net validation
if isinstance(net, Net):
self._net = net
else:
raise NetError('The provided net parameter is not an instance of '
'ipwhois.net.Net')
def _parse_fields(self, response, fields_dict, net_start=None,
net_end=None, dt_format=None, field_list=None):
"""
The function for parsing whois fields from a data input.
Args:
response: The response from the whois/rwhois server.
fields_dict: The dictionary of fields -> regex search values.
net_start: The starting point of the network (if parsing multiple
networks).
net_end: The ending point of the network (if parsing multiple
networks).
dt_format: The format of datetime fields if known.
field_list: If provided, a list of fields to parse:
['name', 'handle', 'description', 'country', 'state', 'city',
'address', 'postal_code', 'emails', 'created', 'updated']
Returns:
Dictionary: A dictionary of fields provided in fields_dict.
"""
ret = {}
if not field_list:
field_list = ['name', 'handle', 'description', 'country', 'state',
'city', 'address', 'postal_code', 'emails',
'created', 'updated']
generate = ((field, pattern) for (field, pattern) in
fields_dict.items() if field in field_list)
for field, pattern in generate:
pattern = re.compile(
str(pattern),
re.DOTALL
)
if net_start is not None:
match = pattern.finditer(response, net_end, net_start)
elif net_end is not None:
match = pattern.finditer(response, net_end)
else:
match = pattern.finditer(response)
values = []
sub_section_end = None
for m in match:
if sub_section_end:
if field not in (
'emails'
) and (sub_section_end != (m.start() - 1)):
break
try:
values.append(m.group('val').strip())
except IndexError:
pass
sub_section_end = m.end()
if len(values) > 0:
value = None
try:
if field == 'country':
value = values[0].upper()
elif field in ['created', 'updated'] and dt_format:
value = datetime.strptime(
values[0],
str(dt_format)).isoformat('T')
else:
values = unique_everseen(values)
value = '\n'.join(values)
except ValueError as e:
log.debug('Whois field parsing failed for {0}: {1}'.format(
field, e))
pass
ret[field] = value
return ret
def _get_nets_arin(self, response):
"""
The function for parsing network blocks from ARIN whois data.
Args:
response: The response from the ARIN whois server.
Returns:
List: A of dictionaries containing keys: cidr, start, end.
"""
nets = []
# Find the first NetRange value.
pattern = re.compile(
r'^NetRange:[^\S\n]+(.+)$',
re.MULTILINE
)
temp = pattern.search(response)
net_range = None
net_range_start = None
if temp is not None:
net_range = temp.group(1).strip()
net_range_start = temp.start()
# Iterate through all of the networks found, storing the CIDR value
# and the start and end positions.
for match in re.finditer(
r'^CIDR:[^\S\n]+(.+?,[^\S\n].+|.+)$',
response,
re.MULTILINE
):
try:
net = copy.deepcopy(BASE_NET)
if len(nets) > 0:
temp = pattern.search(response, match.start())
net_range = None
net_range_start = None
if temp is not None:
net_range = temp.group(1).strip()
net_range_start = temp.start()
if net_range is not None:
if net_range_start < match.start() or len(nets) > 0:
net['range'] = net_range
net['cidr'] = ', '.join(
[ip_network(c.strip()).__str__()
for c in match.group(1).split(', ')]
)
net['start'] = match.start()
net['end'] = match.end()
nets.append(net)
except ValueError:
pass
return nets
def _get_nets_lacnic(self, response):
"""
The function for parsing network blocks from LACNIC whois data.
Args:
response: The response from the LACNIC whois server.
Returns:
List: A of dictionaries containing keys: cidr, start, end.
"""
nets = []
# Iterate through all of the networks found, storing the CIDR value
# and the start and end positions.
for match in re.finditer(
r'^(inetnum|inet6num|route):[^\S\n]+(.+?,[^\S\n].+|.+)$',
response,
re.MULTILINE
):
try:
net = copy.deepcopy(BASE_NET)
net['range'] = match.group(2).strip()
temp = []
for addr in match.group(2).strip().split(', '):
count = addr.count('.')
if count is not 0 and count < 4:
addr_split = addr.strip().split('/')
for i in range(count + 1, 4):
addr_split[0] += '.0'
addr = '/'.join(addr_split)
temp.append(ip_network(addr.strip()).__str__())
net['cidr'] = ', '.join(temp)
net['start'] = match.start()
net['end'] = match.end()
nets.append(net)
except ValueError:
pass
return nets
def _get_nets_other(self, response):
"""
The function for parsing network blocks from generic whois data.
Args:
response: The response from the whois/rwhois server.
Returns:
List: A of dictionaries containing keys: cidr, start, end.
"""
nets = []
# Iterate through all of the networks found, storing the CIDR value
# and the start and end positions.
for match in re.finditer(
r'^(inetnum|inet6num|route):[^\S\n]+((.+?)[^\S\n]-[^\S\n](.+)|'
'.+)$',
response,
re.MULTILINE
):
try:
net = copy.deepcopy(BASE_NET)
net['range'] = match.group(2)
if match.group(3) and match.group(4):
addrs = []
addrs.extend(summarize_address_range(
ip_address(match.group(3).strip()),
ip_address(match.group(4).strip())))
cidr = ', '.join(
[i.__str__() for i in collapse_addresses(addrs)]
)
else:
cidr = ip_network(match.group(2).strip()).__str__()
net['cidr'] = cidr
net['start'] = match.start()
net['end'] = match.end()
nets.append(net)
except (ValueError, TypeError):
pass
return nets
def lookup(self, inc_raw=False, retry_count=3, response=None,
get_referral=False, extra_blacklist=None,
ignore_referral_errors=False, asn_data=None,
field_list=None, is_offline=False):
"""
The function for retrieving and parsing whois information for an IP
address via port 43/tcp (WHOIS).
Args:
inc_raw: Boolean for whether to include the raw results in the
returned dictionary.
retry_count: The number of times to retry in case socket errors,
timeouts, connection resets, etc. are encountered.
response: Optional response object, this bypasses the Whois lookup.
get_referral: Boolean for whether to retrieve referral whois
information, if available.
extra_blacklist: A list of blacklisted whois servers in addition to
the global BLACKLIST.
ignore_referral_errors: Boolean for whether to ignore and continue
when an exception is encountered on referral whois lookups.
asn_data: Optional ASN result object, this bypasses the ASN lookup.
field_list: If provided, a list of fields to parse:
['name', 'handle', 'description', 'country', 'state', 'city',
'address', 'postal_code', 'emails', 'created', 'updated']
is_offline: Boolean for whether to perform lookups offline. If
True, response and asn_data must be provided. Primarily used
for testing.
Returns:
Dictionary:
:query: The IP address (String)
:asn: The Autonomous System Number (String)
:asn_date: The ASN Allocation date (String)
:asn_registry: The assigned ASN registry (String)
:asn_cidr: The assigned ASN CIDR (String)
:asn_country_code: The assigned ASN country code (String)
:nets: Dictionaries containing network information which consists
of the fields listed in the NIC_WHOIS dictionary. (List)
:raw: Raw whois results if the inc_raw parameter is True. (String)
:referral: Dictionary of referral whois information if get_referral
is True and the server isn't blacklisted. Consists of fields
listed in the RWHOIS dictionary.
:raw_referral: Raw referral whois results if the inc_raw parameter
is True. (String)
"""
# Create the return dictionary.
results = {
'query': self._net.address_str,
'nets': [],
'raw': None,
'referral': None,
'raw_referral': None
}
# The referral server and port. Only used if get_referral is True.
referral_server = None
referral_port = 0
# Only fetch the response if we haven't already.
if response is None or (not is_offline and
asn_data['asn_registry'] is not 'arin'):
log.debug('Response not given, perform WHOIS lookup for {0}'
.format(self._net.address_str))
# Retrieve the whois data.
response = self._net.get_whois(
asn_registry=asn_data['asn_registry'], retry_count=retry_count,
extra_blacklist=extra_blacklist
)
if get_referral:
# Search for a referral server.
for match in re.finditer(
r'^ReferralServer:[^\S\n]+(.+:[0-9]+)$',
response,
re.MULTILINE
):
try:
temp = match.group(1)
if 'rwhois://' not in temp: # pragma: no cover
raise ValueError
temp = temp.replace('rwhois://', '').split(':')
if int(temp[1]) > 65535: # pragma: no cover
raise ValueError
referral_server = temp[0]
referral_port = int(temp[1])
except (ValueError, KeyError): # pragma: no cover
continue
break
# Retrieve the referral whois data.
if get_referral and referral_server:
log.debug('Perform referral WHOIS lookup')
response_ref = None
try:
response_ref = self._net.get_whois(
asn_registry='', retry_count=retry_count,
server=referral_server, port=referral_port,
extra_blacklist=extra_blacklist
)
except (BlacklistError, WhoisLookupError):
if ignore_referral_errors:
pass
else:
raise
if response_ref:
log.debug('Parsing referral WHOIS data')
if inc_raw:
results['raw_referral'] = response_ref
temp_rnet = self._parse_fields(
response_ref,
RWHOIS['fields'],
field_list=field_list
)
# Add the networks to the return dictionary.
results['referral'] = temp_rnet
# If inc_raw parameter is True, add the response to return dictionary.
if inc_raw:
results['raw'] = response
nets = []
if asn_data['asn_registry'] == 'arin':
nets_response = self._get_nets_arin(response)
elif asn_data['asn_registry'] == 'lacnic':
nets_response = self._get_nets_lacnic(response)
else:
nets_response = self._get_nets_other(response)
nets.extend(nets_response)
# Iterate through all of the network sections and parse out the
# appropriate fields for each.
log.debug('Parsing WHOIS data')
for index, net in enumerate(nets):
section_end = None
if index + 1 < len(nets):
section_end = nets[index + 1]['start']
try:
dt_format = RIR_WHOIS[results['asn_registry']]['dt_format']
except KeyError:
dt_format = None
temp_net = self._parse_fields(
response,
RIR_WHOIS[asn_data['asn_registry']]['fields'],
section_end,
net['end'],
dt_format,
field_list
)
# Merge the net dictionaries.
net.update(temp_net)
# The start and end values are no longer needed.
del net['start'], net['end']
# Add the networks to the return dictionary.
results['nets'] = nets
return results
|
fake-name/ReadableWebProxy | refs/heads/master | WebMirror/processor/XmlProcessor.py | 1 |
from . import ProcessorBase
import bs4
class XmlProcessor(ProcessorBase.PageProcessor):
wanted_mimetypes = ['text/xml', 'application/xml']
want_priority = 40
loggerPath = "Main.Text.XmlProcessor"
# def __init__(self, pageUrl, loggerPath, content, pbLut, **kwargs):
def __init__(self, baseUrls, pageUrl, pgContent, loggerPath, relinkable, **kwargs):
'''
I'm assuming that pastebin content doesn't have any links, because lazy, mostly.
'''
self.loggerPath = (loggerPath+".XmlProcessor") if not self.loggerPath.endswith(".XmlProcessor") else self.loggerPath
self.pageUrl = pageUrl
self.content = pgContent
self.urlLut = {}
# Methods to allow the child-class to modify the content at various points.
def extractTitle(self, content, url):
return "XML Blob"
# Process a Google-Doc resource page.
# This call does a set of operations to permute and clean a google doc page.
def extractContent(self):
title = self.extractTitle(self.content, self.pageUrl)
procContent = bs4.BeautifulSoup(self.content, "xml")
procContent = "<pre>" + procContent.prettify() + "</pre>"
self.log.info("Processed title: '%s'", title)
ret = {}
# No links here
ret['plainLinks'] = []
ret['rsrcLinks'] = []
ret['title'] = title
ret['contents'] = procContent
return ret
def test():
print("Test mode!")
import WebRequest
import logSetup
logSetup.initLogging()
if __name__ == "__main__":
test()
|
lz1988/company-site | refs/heads/master | tests/regressiontests/forms/tests/models.py | 51 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import datetime
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db import models
from django.forms import Form, ModelForm, FileField, ModelChoiceField
from django.forms.models import ModelFormMetaclass
from django.test import TestCase
from django.utils import six
from ..models import (ChoiceOptionModel, ChoiceFieldModel, FileModel, Group,
BoundaryModel, Defaults, OptionalMultiChoiceModel)
class ChoiceFieldForm(ModelForm):
class Meta:
model = ChoiceFieldModel
class OptionalMultiChoiceModelForm(ModelForm):
class Meta:
model = OptionalMultiChoiceModel
class FileForm(Form):
file1 = FileField()
class TestTicket12510(TestCase):
''' It is not necessary to generate choices for ModelChoiceField (regression test for #12510). '''
def setUp(self):
self.groups = [Group.objects.create(name=name) for name in 'abc']
def test_choices_not_fetched_when_not_rendering(self):
# only one query is required to pull the model from DB
with self.assertNumQueries(1):
field = ModelChoiceField(Group.objects.order_by('-name'))
self.assertEqual('a', field.clean(self.groups[0].pk).name)
class TestTicket14567(TestCase):
"""
Check that the return values of ModelMultipleChoiceFields are QuerySets
"""
def test_empty_queryset_return(self):
"If a model's ManyToManyField has blank=True and is saved with no data, a queryset is returned."
form = OptionalMultiChoiceModelForm({'multi_choice_optional': '', 'multi_choice': ['1']})
self.assertTrue(form.is_valid())
# Check that the empty value is a QuerySet
self.assertTrue(isinstance(form.cleaned_data['multi_choice_optional'], models.query.QuerySet))
# While we're at it, test whether a QuerySet is returned if there *is* a value.
self.assertTrue(isinstance(form.cleaned_data['multi_choice'], models.query.QuerySet))
class ModelFormCallableModelDefault(TestCase):
def test_no_empty_option(self):
"If a model's ForeignKey has blank=False and a default, no empty option is created (Refs #10792)."
option = ChoiceOptionModel.objects.create(name='default')
choices = list(ChoiceFieldForm().fields['choice'].choices)
self.assertEqual(len(choices), 1)
self.assertEqual(choices[0], (option.pk, six.text_type(option)))
def test_callable_initial_value(self):
"The initial value for a callable default returning a queryset is the pk (refs #13769)"
obj1 = ChoiceOptionModel.objects.create(id=1, name='default')
obj2 = ChoiceOptionModel.objects.create(id=2, name='option 2')
obj3 = ChoiceOptionModel.objects.create(id=3, name='option 3')
self.assertHTMLEqual(ChoiceFieldForm().as_p(), """<p><label for="id_choice">Choice:</label> <select name="choice" id="id_choice">
<option value="1" selected="selected">ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-choice" value="1" id="initial-id_choice" /></p>
<p><label for="id_choice_int">Choice int:</label> <select name="choice_int" id="id_choice_int">
<option value="1" selected="selected">ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-choice_int" value="1" id="initial-id_choice_int" /></p>
<p><label for="id_multi_choice">Multi choice:</label> <select multiple="multiple" name="multi_choice" id="id_multi_choice">
<option value="1" selected="selected">ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-multi_choice" value="1" id="initial-id_multi_choice_0" /> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></p>
<p><label for="id_multi_choice_int">Multi choice int:</label> <select multiple="multiple" name="multi_choice_int" id="id_multi_choice_int">
<option value="1" selected="selected">ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-multi_choice_int" value="1" id="initial-id_multi_choice_int_0" /> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></p>""")
def test_initial_instance_value(self):
"Initial instances for model fields may also be instances (refs #7287)"
obj1 = ChoiceOptionModel.objects.create(id=1, name='default')
obj2 = ChoiceOptionModel.objects.create(id=2, name='option 2')
obj3 = ChoiceOptionModel.objects.create(id=3, name='option 3')
self.assertHTMLEqual(ChoiceFieldForm(initial={
'choice': obj2,
'choice_int': obj2,
'multi_choice': [obj2,obj3],
'multi_choice_int': ChoiceOptionModel.objects.exclude(name="default"),
}).as_p(), """<p><label for="id_choice">Choice:</label> <select name="choice" id="id_choice">
<option value="1">ChoiceOption 1</option>
<option value="2" selected="selected">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-choice" value="2" id="initial-id_choice" /></p>
<p><label for="id_choice_int">Choice int:</label> <select name="choice_int" id="id_choice_int">
<option value="1">ChoiceOption 1</option>
<option value="2" selected="selected">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-choice_int" value="2" id="initial-id_choice_int" /></p>
<p><label for="id_multi_choice">Multi choice:</label> <select multiple="multiple" name="multi_choice" id="id_multi_choice">
<option value="1">ChoiceOption 1</option>
<option value="2" selected="selected">ChoiceOption 2</option>
<option value="3" selected="selected">ChoiceOption 3</option>
</select><input type="hidden" name="initial-multi_choice" value="2" id="initial-id_multi_choice_0" />
<input type="hidden" name="initial-multi_choice" value="3" id="initial-id_multi_choice_1" /> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></p>
<p><label for="id_multi_choice_int">Multi choice int:</label> <select multiple="multiple" name="multi_choice_int" id="id_multi_choice_int">
<option value="1">ChoiceOption 1</option>
<option value="2" selected="selected">ChoiceOption 2</option>
<option value="3" selected="selected">ChoiceOption 3</option>
</select><input type="hidden" name="initial-multi_choice_int" value="2" id="initial-id_multi_choice_int_0" />
<input type="hidden" name="initial-multi_choice_int" value="3" id="initial-id_multi_choice_int_1" /> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></p>""")
class FormsModelTestCase(TestCase):
def test_unicode_filename(self):
# FileModel with unicode filename and data #########################
f = FileForm(data={}, files={'file1': SimpleUploadedFile('我隻氣墊船裝滿晒鱔.txt', 'मेरी मँडराने वाली नाव सर्पमीनों से भरी ह'.encode('utf-8'))}, auto_id=False)
self.assertTrue(f.is_valid())
self.assertTrue('file1' in f.cleaned_data)
m = FileModel.objects.create(file=f.cleaned_data['file1'])
self.assertEqual(m.file.name, 'tests/\u6211\u96bb\u6c23\u588a\u8239\u88dd\u6eff\u6652\u9c54.txt')
m.delete()
def test_boundary_conditions(self):
# Boundary conditions on a PostitiveIntegerField #########################
class BoundaryForm(ModelForm):
class Meta:
model = BoundaryModel
f = BoundaryForm({'positive_integer': 100})
self.assertTrue(f.is_valid())
f = BoundaryForm({'positive_integer': 0})
self.assertTrue(f.is_valid())
f = BoundaryForm({'positive_integer': -100})
self.assertFalse(f.is_valid())
def test_formfield_initial(self):
# Formfield initial values ########
# If the model has default values for some fields, they are used as the formfield
# initial values.
class DefaultsForm(ModelForm):
class Meta:
model = Defaults
self.assertEqual(DefaultsForm().fields['name'].initial, 'class default value')
self.assertEqual(DefaultsForm().fields['def_date'].initial, datetime.date(1980, 1, 1))
self.assertEqual(DefaultsForm().fields['value'].initial, 42)
r1 = DefaultsForm()['callable_default'].as_widget()
r2 = DefaultsForm()['callable_default'].as_widget()
self.assertNotEqual(r1, r2)
# In a ModelForm that is passed an instance, the initial values come from the
# instance's values, not the model's defaults.
foo_instance = Defaults(name='instance value', def_date=datetime.date(1969, 4, 4), value=12)
instance_form = DefaultsForm(instance=foo_instance)
self.assertEqual(instance_form.initial['name'], 'instance value')
self.assertEqual(instance_form.initial['def_date'], datetime.date(1969, 4, 4))
self.assertEqual(instance_form.initial['value'], 12)
from django.forms import CharField
class ExcludingForm(ModelForm):
name = CharField(max_length=255)
class Meta:
model = Defaults
exclude = ['name', 'callable_default']
f = ExcludingForm({'name': 'Hello', 'value': 99, 'def_date': datetime.date(1999, 3, 2)})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['name'], 'Hello')
obj = f.save()
self.assertEqual(obj.name, 'class default value')
self.assertEqual(obj.value, 99)
self.assertEqual(obj.def_date, datetime.date(1999, 3, 2))
class RelatedModelFormTests(TestCase):
def test_invalid_loading_order(self):
"""
Test for issue 10405
"""
class A(models.Model):
ref = models.ForeignKey("B")
class Meta:
model=A
self.assertRaises(ValueError, ModelFormMetaclass, str('Form'), (ModelForm,), {'Meta': Meta})
class B(models.Model):
pass
def test_valid_loading_order(self):
"""
Test for issue 10405
"""
class A(models.Model):
ref = models.ForeignKey("B")
class B(models.Model):
pass
class Meta:
model=A
self.assertTrue(issubclass(ModelFormMetaclass(str('Form'), (ModelForm,), {'Meta': Meta}), ModelForm))
|
adusca/treeherder | refs/heads/master | treeherder/etl/__init__.py | 12133432 | |
retomerz/intellij-community | refs/heads/master | python/testData/refactoring/move/packageImport/before/src/b.py | 12133432 | |
imtapps/django-imt-fork | refs/heads/IMT | tests/modeltests/one_to_one/__init__.py | 12133432 | |
ymcagodme/Norwalk-Judo | refs/heads/master | django/conf/locale/pt/__init__.py | 12133432 | |
manpages/rum | refs/heads/master | prototype.py | 1 | import sys
sys.path.append('.')
import json
import tornado
import tornado.ioloop
import tornado.web
import sys
import rumcfg
import storage
from requesthandler import handle
class Main(tornado.web.RequestHandler):
def get(self):
self.set_header("Content-Type", "text/html")
self.write("Meow?")
class Dumb(tornado.web.RequestHandler):
def post(self):
self.set_header("Content-Type", "application/json")
try:
x = self.request.body
k = x.rfind('}')
c = lambda x: "Got request ``" + x + "''"
d = json.loads(x[:(k+1)])
print(c(x))
except:
self.write('{"error": "Malformed request"}such padded much random')
return
y = json.dumps(handle(d))
print("Woohoo ``" + y + "''")
self.write(y)
return
application = tornado.web.Application([ (r"/", Main)
, (r"/dumb", Dumb) ])
if __name__ == "__main__":
print("Listening to 10081 (sqlite: %s)" % storage.version())
application.listen(10081)
tornado.ioloop.IOLoop.instance().start()
|
dr0pz0ne/sibble | refs/heads/add_block_rescue_task_variables | test/units/parsing/vault/test_vault.py | 60 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import getpass
import os
import shutil
import time
import tempfile
import six
from binascii import unhexlify
from binascii import hexlify
from nose.plugins.skip import SkipTest
from ansible.compat.tests import unittest
from ansible.utils.unicode import to_bytes, to_unicode
from ansible import errors
from ansible.parsing.vault import VaultLib
# Counter import fails for 2.0.1, requires >= 2.6.1 from pip
try:
from Crypto.Util import Counter
HAS_COUNTER = True
except ImportError:
HAS_COUNTER = False
# KDF import fails for 2.0.1, requires >= 2.6.1 from pip
try:
from Crypto.Protocol.KDF import PBKDF2
HAS_PBKDF2 = True
except ImportError:
HAS_PBKDF2 = False
# AES IMPORTS
try:
from Crypto.Cipher import AES as AES
HAS_AES = True
except ImportError:
HAS_AES = False
class TestVaultLib(unittest.TestCase):
def test_methods_exist(self):
v = VaultLib('ansible')
slots = ['is_encrypted',
'encrypt',
'decrypt',
'_format_output',
'_split_header',]
for slot in slots:
assert hasattr(v, slot), "VaultLib is missing the %s method" % slot
def test_is_encrypted(self):
v = VaultLib(None)
assert not v.is_encrypted(u"foobar"), "encryption check on plaintext failed"
data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible")
assert v.is_encrypted(data), "encryption check on headered text failed"
def test_format_output(self):
v = VaultLib('ansible')
v.cipher_name = "TEST"
sensitive_data = b"ansible"
data = v._format_output(sensitive_data)
lines = data.split(b'\n')
assert len(lines) > 1, "failed to properly add header"
header = to_bytes(lines[0])
assert header.endswith(b';TEST'), "header does end with cipher name"
header_parts = header.split(b';')
assert len(header_parts) == 3, "header has the wrong number of parts"
assert header_parts[0] == b'$ANSIBLE_VAULT', "header does not start with $ANSIBLE_VAULT"
assert header_parts[1] == v.b_version, "header version is incorrect"
assert header_parts[2] == b'TEST', "header does end with cipher name"
def test_split_header(self):
v = VaultLib('ansible')
data = b"$ANSIBLE_VAULT;9.9;TEST\nansible"
rdata = v._split_header(data)
lines = rdata.split(b'\n')
assert lines[0] == b"ansible"
assert v.cipher_name == 'TEST', "cipher name was not set"
assert v.b_version == b"9.9"
def test_encrypt_decrypt_aes(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v = VaultLib('ansible')
v.cipher_name = u'AES'
# AES encryption code has been removed, so this is old output for
# AES-encrypted 'foobar' with password 'ansible'.
enc_data = b'$ANSIBLE_VAULT;1.1;AES\n53616c7465645f5fc107ce1ef4d7b455e038a13b053225776458052f8f8f332d554809d3f150bfa3\nfe3db930508b65e0ff5947e4386b79af8ab094017629590ef6ba486814cf70f8e4ab0ed0c7d2587e\n786a5a15efeb787e1958cbdd480d076c\n'
dec_data = v.decrypt(enc_data)
assert dec_data == b"foobar", "decryption failed"
def test_encrypt_decrypt_aes256(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v = VaultLib('ansible')
v.cipher_name = 'AES256'
enc_data = v.encrypt(b"foobar")
dec_data = v.decrypt(enc_data)
assert enc_data != b"foobar", "encryption failed"
assert dec_data == b"foobar", "decryption failed"
def test_encrypt_encrypted(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v = VaultLib('ansible')
v.cipher_name = 'AES'
data = "$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(six.b("ansible"))
error_hit = False
try:
enc_data = v.encrypt(data)
except errors.AnsibleError as e:
error_hit = True
assert error_hit, "No error was thrown when trying to encrypt data with a header"
def test_decrypt_decrypted(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v = VaultLib('ansible')
data = "ansible"
error_hit = False
try:
dec_data = v.decrypt(data)
except errors.AnsibleError as e:
error_hit = True
assert error_hit, "No error was thrown when trying to decrypt data without a header"
def test_cipher_not_set(self):
# not setting the cipher should default to AES256
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v = VaultLib('ansible')
data = "ansible"
error_hit = False
try:
enc_data = v.encrypt(data)
except errors.AnsibleError as e:
error_hit = True
assert not error_hit, "An error was thrown when trying to encrypt data without the cipher set"
assert v.cipher_name == "AES256", "cipher name is not set to AES256: %s" % v.cipher_name
|
OpenTrons/opentrons_sdk | refs/heads/master | labware-library/cypress/fixtures/TestLabwareProtocol.py | 1 | import json
from opentrons import protocol_api, types
CALIBRATION_CROSS_COORDS = {
'1': {
'x': 12.13,
'y': 9.0,
'z': 0.0
},
'3': {
'x': 380.87,
'y': 9.0,
'z': 0.0
},
'7': {
'x': 12.13,
'y': 258.0,
'z': 0.0
}
}
CALIBRATION_CROSS_SLOTS = ['1', '3', '7']
TEST_LABWARE_SLOT = '2'
RATE = 0.25 # % of default speeds
SLOWER_RATE = 0.1
PIPETTE_MOUNT = 'right'
PIPETTE_NAME = 'p10_single'
TIPRACK_SLOT = '5'
TIPRACK_LOADNAME = 'opentrons_96_tiprack_20ul'
LABWARE_DEF_JSON = """{"ordering":[["A1","B1","C1"],["A2","B2","C2"],["A3","B3","C3"],["A4","B4","C4"],["A5","B5","C5"]],"brand":{"brand":"TestPro","brandId":["001"]},"metadata":{"displayName":"TestPro 15 Well Plate 5 µL","displayCategory":"wellPlate","displayVolumeUnits":"µL","tags":[]},"dimensions":{"xDimension":127,"yDimension":85,"zDimension":5},"wells":{"A1":{"depth":5,"totalLiquidVolume":5,"shape":"circular","diameter":5,"x":10,"y":75,"z":0},"B1":{"depth":5,"totalLiquidVolume":5,"shape":"circular","diameter":5,"x":10,"y":50,"z":0},"C1":{"depth":5,"totalLiquidVolume":5,"shape":"circular","diameter":5,"x":10,"y":25,"z":0},"A2":{"depth":5,"totalLiquidVolume":5,"shape":"circular","diameter":5,"x":35,"y":75,"z":0},"B2":{"depth":5,"totalLiquidVolume":5,"shape":"circular","diameter":5,"x":35,"y":50,"z":0},"C2":{"depth":5,"totalLiquidVolume":5,"shape":"circular","diameter":5,"x":35,"y":25,"z":0},"A3":{"depth":5,"totalLiquidVolume":5,"shape":"circular","diameter":5,"x":60,"y":75,"z":0},"B3":{"depth":5,"totalLiquidVolume":5,"shape":"circular","diameter":5,"x":60,"y":50,"z":0},"C3":{"depth":5,"totalLiquidVolume":5,"shape":"circular","diameter":5,"x":60,"y":25,"z":0},"A4":{"depth":5,"totalLiquidVolume":5,"shape":"circular","diameter":5,"x":85,"y":75,"z":0},"B4":{"depth":5,"totalLiquidVolume":5,"shape":"circular","diameter":5,"x":85,"y":50,"z":0},"C4":{"depth":5,"totalLiquidVolume":5,"shape":"circular","diameter":5,"x":85,"y":25,"z":0},"A5":{"depth":5,"totalLiquidVolume":5,"shape":"circular","diameter":5,"x":110,"y":75,"z":0},"B5":{"depth":5,"totalLiquidVolume":5,"shape":"circular","diameter":5,"x":110,"y":50,"z":0},"C5":{"depth":5,"totalLiquidVolume":5,"shape":"circular","diameter":5,"x":110,"y":25,"z":0}},"groups":[{"metadata":{"displayName":"TestPro 15 Well Plate 5 µL","displayCategory":"wellPlate","wellBottomShape":"flat"},"brand":{"brand":"TestPro","brandId":["001"]},"wells":["A1","B1","C1","A2","B2","C2","A3","B3","C3","A4","B4","C4","A5","B5","C5"]}],"parameters":{"format":"irregular","quirks":[],"isTiprack":false,"isMagneticModuleCompatible":false,"loadName":"testpro_15_wellplate_5ul"},"namespace":"custom_beta","version":1,"schemaVersion":2,"cornerOffsetFromSlot":{"x":0,"y":0,"z":0}}"""
LABWARE_DEF = json.loads(LABWARE_DEF_JSON)
LABWARE_LABEL = LABWARE_DEF.get('metadata', {}).get(
'displayName', 'test labware')
metadata = {'apiLevel': '2.0'}
def uniq(l):
res = []
for i in l:
if i not in res:
res.append(i)
return res
def run(protocol: protocol_api.ProtocolContext):
tiprack = protocol.load_labware(TIPRACK_LOADNAME, TIPRACK_SLOT)
pipette = protocol.load_instrument(
PIPETTE_NAME, PIPETTE_MOUNT, tip_racks=[tiprack])
test_labware = protocol.load_labware_from_definition(
LABWARE_DEF,
TEST_LABWARE_SLOT,
LABWARE_LABEL,
)
num_cols = len(LABWARE_DEF.get('ordering', [[]]))
num_rows = len(LABWARE_DEF.get('ordering', [[]])[0])
well_locs = uniq([
'A1',
'{}{}'.format(chr(ord('A') + num_rows - 1), str(num_cols))])
pipette.pick_up_tip()
def set_speeds(rate):
protocol.max_speeds.update({
'X': (600 * rate),
'Y': (400 * rate),
'Z': (125 * rate),
'A': (125 * rate),
})
speed_max = max(protocol.max_speeds.values())
for instr in protocol.loaded_instruments.values():
instr.default_speed = speed_max
set_speeds(RATE)
for slot in CALIBRATION_CROSS_SLOTS:
coordinate = CALIBRATION_CROSS_COORDS[slot]
location = types.Location(point=types.Point(**coordinate),
labware=None)
pipette.move_to(location)
protocol.pause(
f"Confirm {PIPETTE_MOUNT} pipette is at slot {slot} calibration cross")
pipette.home()
protocol.pause(f"Place your labware in Slot {TEST_LABWARE_SLOT}")
for well_loc in well_locs:
well = test_labware.well(well_loc)
all_4_edges = [
[well._from_center_cartesian(x=-1, y=0, z=1), 'left'],
[well._from_center_cartesian(x=1, y=0, z=1), 'right'],
[well._from_center_cartesian(x=0, y=-1, z=1), 'front'],
[well._from_center_cartesian(x=0, y=1, z=1), 'back']
]
set_speeds(RATE)
pipette.move_to(well.top())
protocol.pause("Moved to the top of the well")
for edge_pos, edge_name in all_4_edges:
set_speeds(SLOWER_RATE)
edge_location = types.Location(point=edge_pos, labware=None)
pipette.move_to(edge_location)
protocol.pause(f'Moved to {edge_name} edge')
set_speeds(RATE)
pipette.move_to(well.bottom())
protocol.pause("Moved to the bottom of the well")
pipette.blow_out(well)
set_speeds(1.0)
pipette.return_tip()
|
razvanphp/arangodb | refs/heads/devel | 3rdParty/V8-3.31.74.1/third_party/python_26/Lib/site-packages/win32/lib/netbios.py | 21 | import win32wnet
import struct
import string
# Constants generated by h2py from nb30.h
NCBNAMSZ = 16
MAX_LANA = 254
NAME_FLAGS_MASK = 0x87
GROUP_NAME = 0x80
UNIQUE_NAME = 0x00
REGISTERING = 0x00
REGISTERED = 0x04
DEREGISTERED = 0x05
DUPLICATE = 0x06
DUPLICATE_DEREG = 0x07
LISTEN_OUTSTANDING = 0x01
CALL_PENDING = 0x02
SESSION_ESTABLISHED = 0x03
HANGUP_PENDING = 0x04
HANGUP_COMPLETE = 0x05
SESSION_ABORTED = 0x06
ALL_TRANSPORTS = "M\0\0\0"
MS_NBF = "MNBF"
NCBCALL = 0x10
NCBLISTEN = 0x11
NCBHANGUP = 0x12
NCBSEND = 0x14
NCBRECV = 0x15
NCBRECVANY = 0x16
NCBCHAINSEND = 0x17
NCBDGSEND = 0x20
NCBDGRECV = 0x21
NCBDGSENDBC = 0x22
NCBDGRECVBC = 0x23
NCBADDNAME = 0x30
NCBDELNAME = 0x31
NCBRESET = 0x32
NCBASTAT = 0x33
NCBSSTAT = 0x34
NCBCANCEL = 0x35
NCBADDGRNAME = 0x36
NCBENUM = 0x37
NCBUNLINK = 0x70
NCBSENDNA = 0x71
NCBCHAINSENDNA = 0x72
NCBLANSTALERT = 0x73
NCBACTION = 0x77
NCBFINDNAME = 0x78
NCBTRACE = 0x79
ASYNCH = 0x80
NRC_GOODRET = 0x00
NRC_BUFLEN = 0x01
NRC_ILLCMD = 0x03
NRC_CMDTMO = 0x05
NRC_INCOMP = 0x06
NRC_BADDR = 0x07
NRC_SNUMOUT = 0x08
NRC_NORES = 0x09
NRC_SCLOSED = 0x0a
NRC_CMDCAN = 0x0b
NRC_DUPNAME = 0x0d
NRC_NAMTFUL = 0x0e
NRC_ACTSES = 0x0f
NRC_LOCTFUL = 0x11
NRC_REMTFUL = 0x12
NRC_ILLNN = 0x13
NRC_NOCALL = 0x14
NRC_NOWILD = 0x15
NRC_INUSE = 0x16
NRC_NAMERR = 0x17
NRC_SABORT = 0x18
NRC_NAMCONF = 0x19
NRC_IFBUSY = 0x21
NRC_TOOMANY = 0x22
NRC_BRIDGE = 0x23
NRC_CANOCCR = 0x24
NRC_CANCEL = 0x26
NRC_DUPENV = 0x30
NRC_ENVNOTDEF = 0x34
NRC_OSRESNOTAV = 0x35
NRC_MAXAPPS = 0x36
NRC_NOSAPS = 0x37
NRC_NORESOURCES = 0x38
NRC_INVADDRESS = 0x39
NRC_INVDDID = 0x3B
NRC_LOCKFAIL = 0x3C
NRC_OPENERR = 0x3f
NRC_SYSTEM = 0x40
NRC_PENDING = 0xff
UCHAR = "B"
WORD = "H"
DWORD = "I"
USHORT = "H"
ULONG = "I"
ADAPTER_STATUS_ITEMS = [
("6s", "adapter_address"),
(UCHAR, "rev_major"),
(UCHAR, "reserved0"),
(UCHAR, "adapter_type"),
(UCHAR, "rev_minor"),
(WORD, "duration"),
(WORD, "frmr_recv"),
(WORD, "frmr_xmit"),
(WORD, "iframe_recv_err"),
(WORD, "xmit_aborts"),
(DWORD, "xmit_success"),
(DWORD, "recv_success"),
(WORD, "iframe_xmit_err"),
(WORD, "recv_buff_unavail"),
(WORD, "t1_timeouts"),
(WORD, "ti_timeouts"),
(DWORD, "reserved1"),
(WORD, "free_ncbs"),
(WORD, "max_cfg_ncbs"),
(WORD, "max_ncbs"),
(WORD, "xmit_buf_unavail"),
(WORD, "max_dgram_size"),
(WORD, "pending_sess"),
(WORD, "max_cfg_sess"),
(WORD, "max_sess"),
(WORD, "max_sess_pkt_size"),
(WORD, "name_count"),
]
NAME_BUFFER_ITEMS = [
(str(NCBNAMSZ) + "s", "name"),
(UCHAR, "name_num"),
(UCHAR, "name_flags"),
]
SESSION_HEADER_ITEMS = [
(UCHAR, "sess_name"),
(UCHAR, "num_sess"),
(UCHAR, "rcv_dg_outstanding"),
(UCHAR, "rcv_any_outstanding"),
]
SESSION_BUFFER_ITEMS = [
(UCHAR, "lsn"),
(UCHAR, "state"),
(str(NCBNAMSZ)+"s", "local_name"),
(str(NCBNAMSZ)+"s", "remote_name"),
(UCHAR, "rcvs_outstanding"),
(UCHAR, "sends_outstanding"),
]
LANA_ENUM_ITEMS = [
("B", "length"), # Number of valid entries in lana[]
(str(MAX_LANA+1) + "s", "lana"),
]
FIND_NAME_HEADER_ITEMS = [
(WORD, "node_count"),
(UCHAR, "reserved"),
(UCHAR, "unique_group"),
]
FIND_NAME_BUFFER_ITEMS = [
(UCHAR, "length"),
(UCHAR, "access_control"),
(UCHAR, "frame_control"),
("6s", "destination_addr"),
("6s", "source_addr"),
("18s", "routing_info"),
]
ACTION_HEADER_ITEMS = [
(ULONG, "transport_id"),
(USHORT, "action_code"),
(USHORT, "reserved"),
]
del UCHAR, WORD, DWORD, USHORT, ULONG
NCB = win32wnet.NCB
def Netbios(ncb):
ob = ncb.Buffer
is_ours = hasattr(ob, "_pack")
if is_ours:
ob._pack()
try:
return win32wnet.Netbios(ncb)
finally:
if is_ours:
ob._unpack()
class NCBStruct:
def __init__(self, items):
self._format = string.join(map(lambda item: item[0], items), "")
self._items = items
self._buffer_ = win32wnet.NCBBuffer(struct.calcsize(self._format))
for format, name in self._items:
if len(format)==1:
if format == 'c':
val = '\0'
else:
val = 0
else:
l = int(format[:-1])
val = '\0' * l
self.__dict__[name] = val
def _pack(self):
vals = []
for format, name in self._items:
try:
vals.append(self.__dict__[name])
except KeyError:
vals.append(None)
self._buffer_[:] = apply( struct.pack, (self._format,) + tuple(vals) )
def _unpack(self):
items = struct.unpack(self._format, self._buffer_)
assert len(items)==len(self._items), "unexpected number of items to unpack!"
for (format, name), val in map(None, self._items, items):
self.__dict__[name] = val
def __setattr__(self, attr, val):
if not self.__dict__.has_key(attr) and attr[0]!='_':
for format, attr_name in self._items:
if attr==attr_name:
break
else:
raise AttributeError, attr
self.__dict__[attr] = val
def ADAPTER_STATUS():
return NCBStruct(ADAPTER_STATUS_ITEMS)
def NAME_BUFFER():
return NCBStruct(NAME_BUFFER_ITEMS)
def SESSION_HEADER():
return NCBStruct(SESSION_HEADER_ITEMS)
def SESSION_BUFFER():
return NCBStruct(SESSION_BUFFER_ITEMS)
def LANA_ENUM():
return NCBStruct(LANA_ENUM_ITEMS)
def FIND_NAME_HEADER():
return NCBStruct(FIND_NAME_HEADER_ITEMS)
def FIND_NAME_BUFFER():
return NCBStruct(FIND_NAME_BUFFER_ITEMS)
def ACTION_HEADER():
return NCBStruct(ACTION_HEADER_ITEMS)
if __name__=='__main__':
# code ported from "HOWTO: Get the MAC Address for an Ethernet Adapter"
# MS KB ID: Q118623
ncb = NCB()
ncb.Command = NCBENUM
la_enum = LANA_ENUM()
ncb.Buffer = la_enum
rc = Netbios(ncb)
if rc != 0: raise RuntimeError, "Unexpected result %d" % (rc,)
for i in range(la_enum.length):
ncb.Reset()
ncb.Command = NCBRESET
ncb.Lana_num = ord(la_enum.lana[i])
rc = Netbios(ncb)
if rc != 0: raise RuntimeError, "Unexpected result %d" % (rc,)
ncb.Reset()
ncb.Command = NCBASTAT
ncb.Lana_num = ord(la_enum.lana[i])
ncb.Callname = "* "
adapter = ADAPTER_STATUS()
ncb.Buffer = adapter
Netbios(ncb)
print "Adapter address:",
for ch in adapter.adapter_address:
print "%02x" % (ord(ch),) ,
print
|
therandomcode/Fanalytics | refs/heads/master | lib/flask/ext/__init__.py | 853 | # -*- coding: utf-8 -*-
"""
flask.ext
~~~~~~~~~
Redirect imports for extensions. This module basically makes it possible
for us to transition from flaskext.foo to flask_foo without having to
force all extensions to upgrade at the same time.
When a user does ``from flask.ext.foo import bar`` it will attempt to
import ``from flask_foo import bar`` first and when that fails it will
try to import ``from flaskext.foo import bar``.
We're switching from namespace packages because it was just too painful for
everybody involved.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
def setup():
from ..exthook import ExtensionImporter
importer = ExtensionImporter(['flask_%s', 'flaskext.%s'], __name__)
importer.install()
setup()
del setup
|
Pajinek/spacewalk | refs/heads/master | backend/server/action_extra_data/__init__.py | 14 | #
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
|
itai12312/workspaces | refs/heads/master | hellodjango/venv/lib/python2.7/site-packages/django/contrib/gis/db/backends/spatialite/introspection.py | 221 | from django.contrib.gis.gdal import OGRGeomType
from django.db.backends.sqlite3.introspection import DatabaseIntrospection, FlexibleFieldLookupDict
from django.utils import six
class GeoFlexibleFieldLookupDict(FlexibleFieldLookupDict):
"""
Sublcass that includes updates the `base_data_types_reverse` dict
for geometry field types.
"""
base_data_types_reverse = FlexibleFieldLookupDict.base_data_types_reverse.copy()
base_data_types_reverse.update(
{'point' : 'GeometryField',
'linestring' : 'GeometryField',
'polygon' : 'GeometryField',
'multipoint' : 'GeometryField',
'multilinestring' : 'GeometryField',
'multipolygon' : 'GeometryField',
'geometrycollection' : 'GeometryField',
})
class SpatiaLiteIntrospection(DatabaseIntrospection):
data_types_reverse = GeoFlexibleFieldLookupDict()
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# Querying the `geometry_columns` table to get additional metadata.
cursor.execute('SELECT "coord_dimension", "srid", "type" '
'FROM "geometry_columns" '
'WHERE "f_table_name"=%s AND "f_geometry_column"=%s',
(table_name, geo_col))
row = cursor.fetchone()
if not row:
raise Exception('Could not find a geometry column for "%s"."%s"' %
(table_name, geo_col))
# OGRGeomType does not require GDAL and makes it easy to convert
# from OGC geom type name to Django field.
field_type = OGRGeomType(row[2]).django
# Getting any GeometryField keyword arguments that are not the default.
dim = row[0]
srid = row[1]
field_params = {}
if srid != 4326:
field_params['srid'] = srid
if isinstance(dim, six.string_types) and 'Z' in dim:
field_params['dim'] = 3
finally:
cursor.close()
return field_type, field_params
|
NeuralEnsemble/neuroConstruct | refs/heads/master | lib/jython/Lib/test/test_locale.py | 72 | from test.test_support import run_unittest, verbose
import unittest
import locale
import sys
import codecs
enUS_locale = None
def get_enUS_locale():
global enUS_locale
if sys.platform == 'darwin':
import os
tlocs = ("en_US.UTF-8", "en_US.ISO8859-1", "en_US")
if int(os.uname()[2].split('.')[0]) < 10:
# The locale test work fine on OSX 10.6, I (ronaldoussoren)
# haven't had time yet to verify if tests work on OSX 10.5
# (10.4 is known to be bad)
raise unittest.SkipTest("Locale support on MacOSX is minimal")
if sys.platform.startswith("win"):
tlocs = ("En", "English")
else:
tlocs = ("en_US.UTF-8", "en_US.US-ASCII", "en_US")
oldlocale = locale.setlocale(locale.LC_NUMERIC)
for tloc in tlocs:
try:
locale.setlocale(locale.LC_NUMERIC, tloc)
except locale.Error:
continue
break
else:
raise unittest.SkipTest(
"Test locale not supported (tried %s)" % (', '.join(tlocs)))
enUS_locale = tloc
locale.setlocale(locale.LC_NUMERIC, oldlocale)
class BaseLocalizedTest(unittest.TestCase):
#
# Base class for tests using a real locale
#
def setUp(self):
self.oldlocale = locale.setlocale(self.locale_type)
locale.setlocale(self.locale_type, enUS_locale)
if verbose:
print "testing with \"%s\"..." % enUS_locale,
def tearDown(self):
locale.setlocale(self.locale_type, self.oldlocale)
class BaseCookedTest(unittest.TestCase):
#
# Base class for tests using cooked localeconv() values
#
def setUp(self):
locale._override_localeconv = self.cooked_values
def tearDown(self):
locale._override_localeconv = {}
class CCookedTest(BaseCookedTest):
# A cooked "C" locale
cooked_values = {
'currency_symbol': '',
'decimal_point': '.',
'frac_digits': 127,
'grouping': [],
'int_curr_symbol': '',
'int_frac_digits': 127,
'mon_decimal_point': '',
'mon_grouping': [],
'mon_thousands_sep': '',
'n_cs_precedes': 127,
'n_sep_by_space': 127,
'n_sign_posn': 127,
'negative_sign': '',
'p_cs_precedes': 127,
'p_sep_by_space': 127,
'p_sign_posn': 127,
'positive_sign': '',
'thousands_sep': ''
}
class EnUSCookedTest(BaseCookedTest):
# A cooked "en_US" locale
cooked_values = {
'currency_symbol': '$',
'decimal_point': '.',
'frac_digits': 2,
'grouping': [3, 3, 0],
'int_curr_symbol': 'USD ',
'int_frac_digits': 2,
'mon_decimal_point': '.',
'mon_grouping': [3, 3, 0],
'mon_thousands_sep': ',',
'n_cs_precedes': 1,
'n_sep_by_space': 0,
'n_sign_posn': 1,
'negative_sign': '-',
'p_cs_precedes': 1,
'p_sep_by_space': 0,
'p_sign_posn': 1,
'positive_sign': '',
'thousands_sep': ','
}
class FrFRCookedTest(BaseCookedTest):
# A cooked "fr_FR" locale with a space character as decimal separator
# and a non-ASCII currency symbol.
cooked_values = {
'currency_symbol': '\xe2\x82\xac',
'decimal_point': ',',
'frac_digits': 2,
'grouping': [3, 3, 0],
'int_curr_symbol': 'EUR ',
'int_frac_digits': 2,
'mon_decimal_point': ',',
'mon_grouping': [3, 3, 0],
'mon_thousands_sep': ' ',
'n_cs_precedes': 0,
'n_sep_by_space': 1,
'n_sign_posn': 1,
'negative_sign': '-',
'p_cs_precedes': 0,
'p_sep_by_space': 1,
'p_sign_posn': 1,
'positive_sign': '',
'thousands_sep': ' '
}
class BaseFormattingTest(object):
#
# Utility functions for formatting tests
#
def _test_formatfunc(self, format, value, out, func, **format_opts):
self.assertEqual(
func(format, value, **format_opts), out)
def _test_format(self, format, value, out, **format_opts):
self._test_formatfunc(format, value, out,
func=locale.format, **format_opts)
def _test_format_string(self, format, value, out, **format_opts):
self._test_formatfunc(format, value, out,
func=locale.format_string, **format_opts)
def _test_currency(self, value, out, **format_opts):
self.assertEqual(locale.currency(value, **format_opts), out)
class EnUSNumberFormatting(BaseFormattingTest):
# XXX there is a grouping + padding bug when the thousands separator
# is empty but the grouping array contains values (e.g. Solaris 10)
def setUp(self):
self.sep = locale.localeconv()['thousands_sep']
def test_grouping(self):
self._test_format("%f", 1024, grouping=1, out='1%s024.000000' % self.sep)
self._test_format("%f", 102, grouping=1, out='102.000000')
self._test_format("%f", -42, grouping=1, out='-42.000000')
self._test_format("%+f", -42, grouping=1, out='-42.000000')
def test_grouping_and_padding(self):
self._test_format("%20.f", -42, grouping=1, out='-42'.rjust(20))
if self.sep:
self._test_format("%+10.f", -4200, grouping=1,
out=('-4%s200' % self.sep).rjust(10))
self._test_format("%-10.f", -4200, grouping=1,
out=('-4%s200' % self.sep).ljust(10))
def test_integer_grouping(self):
self._test_format("%d", 4200, grouping=True, out='4%s200' % self.sep)
self._test_format("%+d", 4200, grouping=True, out='+4%s200' % self.sep)
self._test_format("%+d", -4200, grouping=True, out='-4%s200' % self.sep)
def test_integer_grouping_and_padding(self):
self._test_format("%10d", 4200, grouping=True,
out=('4%s200' % self.sep).rjust(10))
self._test_format("%-10d", -4200, grouping=True,
out=('-4%s200' % self.sep).ljust(10))
def test_simple(self):
self._test_format("%f", 1024, grouping=0, out='1024.000000')
self._test_format("%f", 102, grouping=0, out='102.000000')
self._test_format("%f", -42, grouping=0, out='-42.000000')
self._test_format("%+f", -42, grouping=0, out='-42.000000')
def test_padding(self):
self._test_format("%20.f", -42, grouping=0, out='-42'.rjust(20))
self._test_format("%+10.f", -4200, grouping=0, out='-4200'.rjust(10))
self._test_format("%-10.f", 4200, grouping=0, out='4200'.ljust(10))
def test_complex_formatting(self):
# Spaces in formatting string
self._test_format_string("One million is %i", 1000000, grouping=1,
out='One million is 1%s000%s000' % (self.sep, self.sep))
self._test_format_string("One million is %i", 1000000, grouping=1,
out='One million is 1%s000%s000' % (self.sep, self.sep))
# Dots in formatting string
self._test_format_string(".%f.", 1000.0, out='.1000.000000.')
# Padding
if self.sep:
self._test_format_string("--> %10.2f", 4200, grouping=1,
out='--> ' + ('4%s200.00' % self.sep).rjust(10))
# Asterisk formats
self._test_format_string("%10.*f", (2, 1000), grouping=0,
out='1000.00'.rjust(10))
if self.sep:
self._test_format_string("%*.*f", (10, 2, 1000), grouping=1,
out=('1%s000.00' % self.sep).rjust(10))
# Test more-in-one
if self.sep:
self._test_format_string("int %i float %.2f str %s",
(1000, 1000.0, 'str'), grouping=1,
out='int 1%s000 float 1%s000.00 str str' %
(self.sep, self.sep))
class TestFormatPatternArg(unittest.TestCase):
# Test handling of pattern argument of format
def test_onlyOnePattern(self):
# Issue 2522: accept exactly one % pattern, and no extra chars.
self.assertRaises(ValueError, locale.format, "%f\n", 'foo')
self.assertRaises(ValueError, locale.format, "%f\r", 'foo')
self.assertRaises(ValueError, locale.format, "%f\r\n", 'foo')
self.assertRaises(ValueError, locale.format, " %f", 'foo')
self.assertRaises(ValueError, locale.format, "%fg", 'foo')
self.assertRaises(ValueError, locale.format, "%^g", 'foo')
self.assertRaises(ValueError, locale.format, "%f%%", 'foo')
class TestLocaleFormatString(unittest.TestCase):
"""General tests on locale.format_string"""
def test_percent_escape(self):
self.assertEqual(locale.format_string('%f%%', 1.0), '%f%%' % 1.0)
self.assertEqual(locale.format_string('%d %f%%d', (1, 1.0)),
'%d %f%%d' % (1, 1.0))
self.assertEqual(locale.format_string('%(foo)s %%d', {'foo': 'bar'}),
('%(foo)s %%d' % {'foo': 'bar'}))
def test_mapping(self):
self.assertEqual(locale.format_string('%(foo)s bing.', {'foo': 'bar'}),
('%(foo)s bing.' % {'foo': 'bar'}))
self.assertEqual(locale.format_string('%(foo)s', {'foo': 'bar'}),
('%(foo)s' % {'foo': 'bar'}))
class TestNumberFormatting(BaseLocalizedTest, EnUSNumberFormatting):
# Test number formatting with a real English locale.
locale_type = locale.LC_NUMERIC
def setUp(self):
BaseLocalizedTest.setUp(self)
EnUSNumberFormatting.setUp(self)
class TestEnUSNumberFormatting(EnUSCookedTest, EnUSNumberFormatting):
# Test number formatting with a cooked "en_US" locale.
def setUp(self):
EnUSCookedTest.setUp(self)
EnUSNumberFormatting.setUp(self)
def test_currency(self):
self._test_currency(50000, "$50000.00")
self._test_currency(50000, "$50,000.00", grouping=True)
self._test_currency(50000, "USD 50,000.00",
grouping=True, international=True)
class TestCNumberFormatting(CCookedTest, BaseFormattingTest):
# Test number formatting with a cooked "C" locale.
def test_grouping(self):
self._test_format("%.2f", 12345.67, grouping=True, out='12345.67')
def test_grouping_and_padding(self):
self._test_format("%9.2f", 12345.67, grouping=True, out=' 12345.67')
class TestFrFRNumberFormatting(FrFRCookedTest, BaseFormattingTest):
# Test number formatting with a cooked "fr_FR" locale.
def test_decimal_point(self):
self._test_format("%.2f", 12345.67, out='12345,67')
def test_grouping(self):
self._test_format("%.2f", 345.67, grouping=True, out='345,67')
self._test_format("%.2f", 12345.67, grouping=True, out='12 345,67')
def test_grouping_and_padding(self):
self._test_format("%6.2f", 345.67, grouping=True, out='345,67')
self._test_format("%7.2f", 345.67, grouping=True, out=' 345,67')
self._test_format("%8.2f", 12345.67, grouping=True, out='12 345,67')
self._test_format("%9.2f", 12345.67, grouping=True, out='12 345,67')
self._test_format("%10.2f", 12345.67, grouping=True, out=' 12 345,67')
self._test_format("%-6.2f", 345.67, grouping=True, out='345,67')
self._test_format("%-7.2f", 345.67, grouping=True, out='345,67 ')
self._test_format("%-8.2f", 12345.67, grouping=True, out='12 345,67')
self._test_format("%-9.2f", 12345.67, grouping=True, out='12 345,67')
self._test_format("%-10.2f", 12345.67, grouping=True, out='12 345,67 ')
def test_integer_grouping(self):
self._test_format("%d", 200, grouping=True, out='200')
self._test_format("%d", 4200, grouping=True, out='4 200')
def test_integer_grouping_and_padding(self):
self._test_format("%4d", 4200, grouping=True, out='4 200')
self._test_format("%5d", 4200, grouping=True, out='4 200')
self._test_format("%10d", 4200, grouping=True, out='4 200'.rjust(10))
self._test_format("%-4d", 4200, grouping=True, out='4 200')
self._test_format("%-5d", 4200, grouping=True, out='4 200')
self._test_format("%-10d", 4200, grouping=True, out='4 200'.ljust(10))
def test_currency(self):
euro = u'\u20ac'.encode('utf-8')
self._test_currency(50000, "50000,00 " + euro)
self._test_currency(50000, "50 000,00 " + euro, grouping=True)
# XXX is the trailing space a bug?
self._test_currency(50000, "50 000,00 EUR ",
grouping=True, international=True)
class TestStringMethods(BaseLocalizedTest):
locale_type = locale.LC_CTYPE
if sys.platform != 'sunos5' and not sys.platform.startswith("win"):
# Test BSD Rune locale's bug for isctype functions.
def test_isspace(self):
self.assertEqual('\x20'.isspace(), True)
self.assertEqual('\xa0'.isspace(), False)
self.assertEqual('\xa1'.isspace(), False)
def test_isalpha(self):
self.assertEqual('\xc0'.isalpha(), False)
def test_isalnum(self):
self.assertEqual('\xc0'.isalnum(), False)
def test_isupper(self):
self.assertEqual('\xc0'.isupper(), False)
def test_islower(self):
self.assertEqual('\xc0'.islower(), False)
def test_lower(self):
self.assertEqual('\xcc\x85'.lower(), '\xcc\x85')
def test_upper(self):
self.assertEqual('\xed\x95\xa0'.upper(), '\xed\x95\xa0')
def test_strip(self):
self.assertEqual('\xed\x95\xa0'.strip(), '\xed\x95\xa0')
def test_split(self):
self.assertEqual('\xec\xa0\xbc'.split(), ['\xec\xa0\xbc'])
class TestMiscellaneous(unittest.TestCase):
def test_getpreferredencoding(self):
# Invoke getpreferredencoding to make sure it does not cause exceptions.
enc = locale.getpreferredencoding()
if enc:
# If encoding non-empty, make sure it is valid
codecs.lookup(enc)
if hasattr(locale, "strcoll"):
def test_strcoll_3303(self):
# test crasher from bug #3303
self.assertRaises(TypeError, locale.strcoll, u"a", None)
def test_setlocale_category(self):
locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_TIME)
locale.setlocale(locale.LC_CTYPE)
locale.setlocale(locale.LC_COLLATE)
locale.setlocale(locale.LC_MONETARY)
locale.setlocale(locale.LC_NUMERIC)
# crasher from bug #7419
self.assertRaises(locale.Error, locale.setlocale, 12345)
def test_getsetlocale_issue1813(self):
# Issue #1813: setting and getting the locale under a Turkish locale
oldlocale = locale.getlocale()
self.addCleanup(locale.setlocale, locale.LC_CTYPE, oldlocale)
try:
locale.setlocale(locale.LC_CTYPE, 'tr_TR')
except locale.Error:
# Unsupported locale on this system
self.skipTest('test needs Turkish locale')
loc = locale.getlocale()
locale.setlocale(locale.LC_CTYPE, loc)
self.assertEqual(loc, locale.getlocale())
def test_normalize_issue12752(self):
# Issue #1813 caused a regression where locale.normalize() would no
# longer accept unicode strings.
self.assertEqual(locale.normalize(u'en_US'), 'en_US.ISO8859-1')
def test_main():
tests = [
TestMiscellaneous,
TestFormatPatternArg,
TestLocaleFormatString,
TestEnUSNumberFormatting,
TestCNumberFormatting,
TestFrFRNumberFormatting,
]
# SkipTest can't be raised inside unittests, handle it manually instead
try:
get_enUS_locale()
except unittest.SkipTest as e:
if verbose:
print "Some tests will be disabled: %s" % e
else:
tests += [TestNumberFormatting, TestStringMethods]
run_unittest(*tests)
if __name__ == '__main__':
test_main()
|
a-parhom/edx-platform | refs/heads/master | common/djangoapps/edxmako/template.py | 18 | # Copyright (c) 2008 Mikeal Rogers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf import settings
from django.template import Context, engines
from mako.template import Template as MakoTemplate
from six import text_type
from . import Engines, LOOKUP
from .request_context import get_template_request_context
from .shortcuts import is_any_marketing_link_set, is_marketing_link_set, marketing_link
KEY_CSRF_TOKENS = ('csrf_token', 'csrf')
class Template(object):
"""
This bridges the gap between a Mako template and a Django template. It can
be rendered like it is a Django template because the arguments are transformed
in a way that MakoTemplate can understand.
"""
def __init__(self, *args, **kwargs):
"""Overrides base __init__ to provide django variable overrides"""
self.engine = kwargs.pop('engine', engines[Engines.MAKO])
if len(args) and isinstance(args[0], MakoTemplate):
self.mako_template = args[0]
else:
kwargs['lookup'] = LOOKUP['main']
self.mako_template = MakoTemplate(*args, **kwargs)
def render(self, context=None, request=None):
"""
This takes a render call with a context (from Django) and translates
it to a render call on the mako template.
"""
context_object = self._get_context_object(request)
context_dictionary = self._get_context_processors_output_dict(context_object)
if isinstance(context, Context):
context_dictionary.update(context.flatten())
elif context is not None:
context_dictionary.update(context)
self._add_core_context(context_dictionary)
self._evaluate_lazy_csrf_tokens(context_dictionary)
return self.mako_template.render_unicode(**context_dictionary)
@staticmethod
def _get_context_object(request):
"""
Get a Django RequestContext or Context, as appropriate for the situation.
In some tests, there might not be a current request.
"""
request_context = get_template_request_context(request)
if request_context is not None:
return request_context
else:
return Context({})
def _get_context_processors_output_dict(self, context_object):
"""
Run the context processors for the given context and get the output as a new dictionary.
"""
with context_object.bind_template(self):
return context_object.flatten()
@staticmethod
def _add_core_context(context_dictionary):
"""
Add to the given dictionary context variables which should always be
present, even when context processors aren't run during tests. Using
a context processor should almost always be preferred to adding more
variables here.
"""
context_dictionary['settings'] = settings
context_dictionary['EDX_ROOT_URL'] = settings.EDX_ROOT_URL
context_dictionary['marketing_link'] = marketing_link
context_dictionary['is_any_marketing_link_set'] = is_any_marketing_link_set
context_dictionary['is_marketing_link_set'] = is_marketing_link_set
@staticmethod
def _evaluate_lazy_csrf_tokens(context_dictionary):
"""
Evaluate any lazily-evaluated CSRF tokens in the given context.
"""
for key in KEY_CSRF_TOKENS:
if key in context_dictionary:
context_dictionary[key] = text_type(context_dictionary[key])
|
krsjoseph/youtube-dl | refs/heads/master | youtube_dl/extractor/vimeo.py | 64 | # encoding: utf-8
from __future__ import unicode_literals
import json
import re
import itertools
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_urllib_parse,
compat_urllib_request,
compat_urlparse,
)
from ..utils import (
ExtractorError,
InAdvancePagedList,
int_or_none,
RegexNotFoundError,
smuggle_url,
std_headers,
unified_strdate,
unsmuggle_url,
urlencode_postdata,
unescapeHTML,
)
class VimeoBaseInfoExtractor(InfoExtractor):
_NETRC_MACHINE = 'vimeo'
_LOGIN_REQUIRED = False
_LOGIN_URL = 'https://vimeo.com/log_in'
def _login(self):
(username, password) = self._get_login_info()
if username is None:
if self._LOGIN_REQUIRED:
raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
return
self.report_login()
webpage = self._download_webpage(self._LOGIN_URL, None, False)
token = self._extract_xsrft(webpage)
data = urlencode_postdata({
'action': 'login',
'email': username,
'password': password,
'service': 'vimeo',
'token': token,
})
login_request = compat_urllib_request.Request(self._LOGIN_URL, data)
login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
login_request.add_header('Referer', self._LOGIN_URL)
self._download_webpage(login_request, None, False, 'Wrong login info')
def _extract_xsrft(self, webpage):
return self._search_regex(
r'xsrft\s*[=:]\s*(?P<q>["\'])(?P<xsrft>.+?)(?P=q)',
webpage, 'login token', group='xsrft')
class VimeoIE(VimeoBaseInfoExtractor):
"""Information extractor for vimeo.com."""
# _VALID_URL matches Vimeo URLs
_VALID_URL = r'''(?x)
https?://
(?:(?:www|(?P<player>player))\.)?
vimeo(?P<pro>pro)?\.com/
(?!channels/[^/?#]+/?(?:$|[?#])|album/)
(?:.*?/)?
(?:(?:play_redirect_hls|moogaloop\.swf)\?clip_id=)?
(?:videos?/)?
(?P<id>[0-9]+)
/?(?:[?&].*)?(?:[#].*)?$'''
IE_NAME = 'vimeo'
_TESTS = [
{
'url': 'http://vimeo.com/56015672#at=0',
'md5': '8879b6cc097e987f02484baf890129e5',
'info_dict': {
'id': '56015672',
'ext': 'mp4',
"upload_date": "20121220",
"description": "This is a test case for youtube-dl.\nFor more information, see github.com/rg3/youtube-dl\nTest chars: \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550",
"uploader_id": "user7108434",
"uploader": "Filippo Valsorda",
"title": "youtube-dl test video - \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550",
"duration": 10,
},
},
{
'url': 'http://vimeopro.com/openstreetmapus/state-of-the-map-us-2013/video/68093876',
'md5': '3b5ca6aa22b60dfeeadf50b72e44ed82',
'note': 'Vimeo Pro video (#1197)',
'info_dict': {
'id': '68093876',
'ext': 'mp4',
'uploader_id': 'openstreetmapus',
'uploader': 'OpenStreetMap US',
'title': 'Andy Allan - Putting the Carto into OpenStreetMap Cartography',
'description': 'md5:380943ec71b89736ff4bf27183233d09',
'duration': 1595,
},
},
{
'url': 'http://player.vimeo.com/video/54469442',
'md5': '619b811a4417aa4abe78dc653becf511',
'note': 'Videos that embed the url in the player page',
'info_dict': {
'id': '54469442',
'ext': 'mp4',
'title': 'Kathy Sierra: Building the minimum Badass User, Business of Software 2012',
'uploader': 'The BLN & Business of Software',
'uploader_id': 'theblnbusinessofsoftware',
'duration': 3610,
'description': None,
},
},
{
'url': 'http://vimeo.com/68375962',
'md5': 'aaf896bdb7ddd6476df50007a0ac0ae7',
'note': 'Video protected with password',
'info_dict': {
'id': '68375962',
'ext': 'mp4',
'title': 'youtube-dl password protected test video',
'upload_date': '20130614',
'uploader_id': 'user18948128',
'uploader': 'Jaime Marquínez Ferrándiz',
'duration': 10,
'description': 'This is "youtube-dl password protected test video" by Jaime Marquínez Ferrándiz on Vimeo, the home for high quality videos and the people who love them.',
},
'params': {
'videopassword': 'youtube-dl',
},
},
{
'url': 'http://vimeo.com/channels/keypeele/75629013',
'md5': '2f86a05afe9d7abc0b9126d229bbe15d',
'note': 'Video is freely available via original URL '
'and protected with password when accessed via http://vimeo.com/75629013',
'info_dict': {
'id': '75629013',
'ext': 'mp4',
'title': 'Key & Peele: Terrorist Interrogation',
'description': 'md5:8678b246399b070816b12313e8b4eb5c',
'uploader_id': 'atencio',
'uploader': 'Peter Atencio',
'upload_date': '20130927',
'duration': 187,
},
},
{
'url': 'http://vimeo.com/76979871',
'md5': '3363dd6ffebe3784d56f4132317fd446',
'note': 'Video with subtitles',
'info_dict': {
'id': '76979871',
'ext': 'mp4',
'title': 'The New Vimeo Player (You Know, For Videos)',
'description': 'md5:2ec900bf97c3f389378a96aee11260ea',
'upload_date': '20131015',
'uploader_id': 'staff',
'uploader': 'Vimeo Staff',
'duration': 62,
}
},
{
# from https://www.ouya.tv/game/Pier-Solar-and-the-Great-Architects/
'url': 'https://player.vimeo.com/video/98044508',
'note': 'The js code contains assignments to the same variable as the config',
'info_dict': {
'id': '98044508',
'ext': 'mp4',
'title': 'Pier Solar OUYA Official Trailer',
'uploader': 'Tulio Gonçalves',
'uploader_id': 'user28849593',
},
},
]
@staticmethod
def _extract_vimeo_url(url, webpage):
# Look for embedded (iframe) Vimeo player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//player\.vimeo\.com/video/.+?)\1', webpage)
if mobj:
player_url = unescapeHTML(mobj.group('url'))
surl = smuggle_url(player_url, {'Referer': url})
return surl
# Look for embedded (swf embed) Vimeo player
mobj = re.search(
r'<embed[^>]+?src="((?:https?:)?//(?:www\.)?vimeo\.com/moogaloop\.swf.+?)"', webpage)
if mobj:
return mobj.group(1)
def _verify_video_password(self, url, video_id, webpage):
password = self._downloader.params.get('videopassword', None)
if password is None:
raise ExtractorError('This video is protected by a password, use the --video-password option', expected=True)
token = self._extract_xsrft(webpage)
data = urlencode_postdata({
'password': password,
'token': token,
})
if url.startswith('http://'):
# vimeo only supports https now, but the user can give an http url
url = url.replace('http://', 'https://')
password_request = compat_urllib_request.Request(url + '/password', data)
password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
password_request.add_header('Referer', url)
return self._download_webpage(
password_request, video_id,
'Verifying the password', 'Wrong password')
def _verify_player_video_password(self, url, video_id):
password = self._downloader.params.get('videopassword', None)
if password is None:
raise ExtractorError('This video is protected by a password, use the --video-password option')
data = compat_urllib_parse.urlencode({'password': password})
pass_url = url + '/check-password'
password_request = compat_urllib_request.Request(pass_url, data)
password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
return self._download_json(
password_request, video_id,
'Verifying the password',
'Wrong password')
def _real_initialize(self):
self._login()
def _real_extract(self, url):
url, data = unsmuggle_url(url)
headers = std_headers
if data is not None:
headers = headers.copy()
headers.update(data)
if 'Referer' not in headers:
headers['Referer'] = url
# Extract ID from URL
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
orig_url = url
if mobj.group('pro') or mobj.group('player'):
url = 'https://player.vimeo.com/video/' + video_id
else:
url = 'https://vimeo.com/' + video_id
# Retrieve video webpage to extract further information
request = compat_urllib_request.Request(url, None, headers)
try:
webpage = self._download_webpage(request, video_id)
except ExtractorError as ee:
if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
errmsg = ee.cause.read()
if b'Because of its privacy settings, this video cannot be played here' in errmsg:
raise ExtractorError(
'Cannot download embed-only video without embedding '
'URL. Please call youtube-dl with the URL of the page '
'that embeds this video.',
expected=True)
raise
# Now we begin extracting as much information as we can from what we
# retrieved. First we extract the information common to all extractors,
# and latter we extract those that are Vimeo specific.
self.report_extraction(video_id)
vimeo_config = self._search_regex(
r'vimeo\.config\s*=\s*({.+?});', webpage,
'vimeo config', default=None)
if vimeo_config:
seed_status = self._parse_json(vimeo_config, video_id).get('seed_status', {})
if seed_status.get('state') == 'failed':
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, seed_status['title']),
expected=True)
# Extract the config JSON
try:
try:
config_url = self._html_search_regex(
r' data-config-url="(.+?)"', webpage, 'config URL')
config_json = self._download_webpage(config_url, video_id)
config = json.loads(config_json)
except RegexNotFoundError:
# For pro videos or player.vimeo.com urls
# We try to find out to which variable is assigned the config dic
m_variable_name = re.search('(\w)\.video\.id', webpage)
if m_variable_name is not None:
config_re = r'%s=({[^}].+?});' % re.escape(m_variable_name.group(1))
else:
config_re = [r' = {config:({.+?}),assets:', r'(?:[abc])=({.+?});']
config = self._search_regex(config_re, webpage, 'info section',
flags=re.DOTALL)
config = json.loads(config)
except Exception as e:
if re.search('The creator of this video has not given you permission to embed it on this domain.', webpage):
raise ExtractorError('The author has restricted the access to this video, try with the "--referer" option')
if re.search(r'<form[^>]+?id="pw_form"', webpage) is not None:
if data and '_video_password_verified' in data:
raise ExtractorError('video password verification failed!')
self._verify_video_password(url, video_id, webpage)
return self._real_extract(
smuggle_url(url, {'_video_password_verified': 'verified'}))
else:
raise ExtractorError('Unable to extract info section',
cause=e)
else:
if config.get('view') == 4:
config = self._verify_player_video_password(url, video_id)
# Extract title
video_title = config["video"]["title"]
# Extract uploader and uploader_id
video_uploader = config["video"]["owner"]["name"]
video_uploader_id = config["video"]["owner"]["url"].split('/')[-1] if config["video"]["owner"]["url"] else None
# Extract video thumbnail
video_thumbnail = config["video"].get("thumbnail")
if video_thumbnail is None:
video_thumbs = config["video"].get("thumbs")
if video_thumbs and isinstance(video_thumbs, dict):
_, video_thumbnail = sorted((int(width if width.isdigit() else 0), t_url) for (width, t_url) in video_thumbs.items())[-1]
# Extract video description
video_description = self._html_search_regex(
r'(?s)<div\s+class="[^"]*description[^"]*"[^>]*>(.*?)</div>',
webpage, 'description', default=None)
if not video_description:
video_description = self._html_search_meta(
'description', webpage, default=None)
if not video_description and mobj.group('pro'):
orig_webpage = self._download_webpage(
orig_url, video_id,
note='Downloading webpage for description',
fatal=False)
if orig_webpage:
video_description = self._html_search_meta(
'description', orig_webpage, default=None)
if not video_description and not mobj.group('player'):
self._downloader.report_warning('Cannot find video description')
# Extract video duration
video_duration = int_or_none(config["video"].get("duration"))
# Extract upload date
video_upload_date = None
mobj = re.search(r'<time[^>]+datetime="([^"]+)"', webpage)
if mobj is not None:
video_upload_date = unified_strdate(mobj.group(1))
try:
view_count = int(self._search_regex(r'UserPlays:(\d+)', webpage, 'view count'))
like_count = int(self._search_regex(r'UserLikes:(\d+)', webpage, 'like count'))
comment_count = int(self._search_regex(r'UserComments:(\d+)', webpage, 'comment count'))
except RegexNotFoundError:
# This info is only available in vimeo.com/{id} urls
view_count = None
like_count = None
comment_count = None
# Vimeo specific: extract request signature and timestamp
sig = config['request']['signature']
timestamp = config['request']['timestamp']
# Vimeo specific: extract video codec and quality information
# First consider quality, then codecs, then take everything
codecs = [('vp6', 'flv'), ('vp8', 'flv'), ('h264', 'mp4')]
files = {'hd': [], 'sd': [], 'other': []}
config_files = config["video"].get("files") or config["request"].get("files")
for codec_name, codec_extension in codecs:
for quality in config_files.get(codec_name, []):
format_id = '-'.join((codec_name, quality)).lower()
key = quality if quality in files else 'other'
video_url = None
if isinstance(config_files[codec_name], dict):
file_info = config_files[codec_name][quality]
video_url = file_info.get('url')
else:
file_info = {}
if video_url is None:
video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \
% (video_id, sig, timestamp, quality, codec_name.upper())
files[key].append({
'ext': codec_extension,
'url': video_url,
'format_id': format_id,
'width': file_info.get('width'),
'height': file_info.get('height'),
})
formats = []
for key in ('other', 'sd', 'hd'):
formats += files[key]
if len(formats) == 0:
raise ExtractorError('No known codec found')
subtitles = {}
text_tracks = config['request'].get('text_tracks')
if text_tracks:
for tt in text_tracks:
subtitles[tt['lang']] = [{
'ext': 'vtt',
'url': 'https://vimeo.com' + tt['url'],
}]
return {
'id': video_id,
'uploader': video_uploader,
'uploader_id': video_uploader_id,
'upload_date': video_upload_date,
'title': video_title,
'thumbnail': video_thumbnail,
'description': video_description,
'duration': video_duration,
'formats': formats,
'webpage_url': url,
'view_count': view_count,
'like_count': like_count,
'comment_count': comment_count,
'subtitles': subtitles,
}
class VimeoChannelIE(VimeoBaseInfoExtractor):
IE_NAME = 'vimeo:channel'
_VALID_URL = r'https://vimeo\.com/channels/(?P<id>[^/?#]+)/?(?:$|[?#])'
_MORE_PAGES_INDICATOR = r'<a.+?rel="next"'
_TITLE = None
_TITLE_RE = r'<link rel="alternate"[^>]+?title="(.*?)"'
_TESTS = [{
'url': 'https://vimeo.com/channels/tributes',
'info_dict': {
'id': 'tributes',
'title': 'Vimeo Tributes',
},
'playlist_mincount': 25,
}]
def _page_url(self, base_url, pagenum):
return '%s/videos/page:%d/' % (base_url, pagenum)
def _extract_list_title(self, webpage):
return self._TITLE or self._html_search_regex(self._TITLE_RE, webpage, 'list title')
def _login_list_password(self, page_url, list_id, webpage):
login_form = self._search_regex(
r'(?s)<form[^>]+?id="pw_form"(.*?)</form>',
webpage, 'login form', default=None)
if not login_form:
return webpage
password = self._downloader.params.get('videopassword', None)
if password is None:
raise ExtractorError('This album is protected by a password, use the --video-password option', expected=True)
fields = self._hidden_inputs(login_form)
token = self._extract_xsrft(webpage)
fields['token'] = token
fields['password'] = password
post = urlencode_postdata(fields)
password_path = self._search_regex(
r'action="([^"]+)"', login_form, 'password URL')
password_url = compat_urlparse.urljoin(page_url, password_path)
password_request = compat_urllib_request.Request(password_url, post)
password_request.add_header('Content-type', 'application/x-www-form-urlencoded')
self._set_cookie('vimeo.com', 'xsrft', token)
return self._download_webpage(
password_request, list_id,
'Verifying the password', 'Wrong password')
def _extract_videos(self, list_id, base_url):
video_ids = []
for pagenum in itertools.count(1):
page_url = self._page_url(base_url, pagenum)
webpage = self._download_webpage(
page_url, list_id,
'Downloading page %s' % pagenum)
if pagenum == 1:
webpage = self._login_list_password(page_url, list_id, webpage)
video_ids.extend(re.findall(r'id="clip_(\d+?)"', webpage))
if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None:
break
entries = [self.url_result('https://vimeo.com/%s' % video_id, 'Vimeo')
for video_id in video_ids]
return {'_type': 'playlist',
'id': list_id,
'title': self._extract_list_title(webpage),
'entries': entries,
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
channel_id = mobj.group('id')
return self._extract_videos(channel_id, 'https://vimeo.com/channels/%s' % channel_id)
class VimeoUserIE(VimeoChannelIE):
IE_NAME = 'vimeo:user'
_VALID_URL = r'https://vimeo\.com/(?!(?:[0-9]+|watchlater)(?:$|[?#/]))(?P<name>[^/]+)(?:/videos|[#?]|$)'
_TITLE_RE = r'<a[^>]+?class="user">([^<>]+?)</a>'
_TESTS = [{
'url': 'https://vimeo.com/nkistudio/videos',
'info_dict': {
'title': 'Nki',
'id': 'nkistudio',
},
'playlist_mincount': 66,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
name = mobj.group('name')
return self._extract_videos(name, 'https://vimeo.com/%s' % name)
class VimeoAlbumIE(VimeoChannelIE):
IE_NAME = 'vimeo:album'
_VALID_URL = r'https://vimeo\.com/album/(?P<id>\d+)'
_TITLE_RE = r'<header id="page_header">\n\s*<h1>(.*?)</h1>'
_TESTS = [{
'url': 'https://vimeo.com/album/2632481',
'info_dict': {
'id': '2632481',
'title': 'Staff Favorites: November 2013',
},
'playlist_mincount': 13,
}, {
'note': 'Password-protected album',
'url': 'https://vimeo.com/album/3253534',
'info_dict': {
'title': 'test',
'id': '3253534',
},
'playlist_count': 1,
'params': {
'videopassword': 'youtube-dl',
}
}]
def _page_url(self, base_url, pagenum):
return '%s/page:%d/' % (base_url, pagenum)
def _real_extract(self, url):
album_id = self._match_id(url)
return self._extract_videos(album_id, 'https://vimeo.com/album/%s' % album_id)
class VimeoGroupsIE(VimeoAlbumIE):
IE_NAME = 'vimeo:group'
_VALID_URL = r'https://vimeo\.com/groups/(?P<name>[^/]+)'
_TESTS = [{
'url': 'https://vimeo.com/groups/rolexawards',
'info_dict': {
'id': 'rolexawards',
'title': 'Rolex Awards for Enterprise',
},
'playlist_mincount': 73,
}]
def _extract_list_title(self, webpage):
return self._og_search_title(webpage)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
name = mobj.group('name')
return self._extract_videos(name, 'https://vimeo.com/groups/%s' % name)
class VimeoReviewIE(InfoExtractor):
IE_NAME = 'vimeo:review'
IE_DESC = 'Review pages on vimeo'
_VALID_URL = r'https://vimeo\.com/[^/]+/review/(?P<id>[^/]+)'
_TESTS = [{
'url': 'https://vimeo.com/user21297594/review/75524534/3c257a1b5d',
'md5': 'c507a72f780cacc12b2248bb4006d253',
'info_dict': {
'id': '75524534',
'ext': 'mp4',
'title': "DICK HARDWICK 'Comedian'",
'uploader': 'Richard Hardwick',
}
}, {
'note': 'video player needs Referer',
'url': 'https://vimeo.com/user22258446/review/91613211/13f927e053',
'md5': '6295fdab8f4bf6a002d058b2c6dce276',
'info_dict': {
'id': '91613211',
'ext': 'mp4',
'title': 're:(?i)^Death by dogma versus assembling agile . Sander Hoogendoorn',
'uploader': 'DevWeek Events',
'duration': 2773,
'thumbnail': 're:^https?://.*\.jpg$',
}
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
player_url = 'https://player.vimeo.com/player/' + video_id
return self.url_result(player_url, 'Vimeo', video_id)
class VimeoWatchLaterIE(VimeoChannelIE):
IE_NAME = 'vimeo:watchlater'
IE_DESC = 'Vimeo watch later list, "vimeowatchlater" keyword (requires authentication)'
_VALID_URL = r'https://vimeo\.com/(?:home/)?watchlater|:vimeowatchlater'
_TITLE = 'Watch Later'
_LOGIN_REQUIRED = True
_TESTS = [{
'url': 'https://vimeo.com/watchlater',
'only_matching': True,
}]
def _real_initialize(self):
self._login()
def _page_url(self, base_url, pagenum):
url = '%s/page:%d/' % (base_url, pagenum)
request = compat_urllib_request.Request(url)
# Set the header to get a partial html page with the ids,
# the normal page doesn't contain them.
request.add_header('X-Requested-With', 'XMLHttpRequest')
return request
def _real_extract(self, url):
return self._extract_videos('watchlater', 'https://vimeo.com/watchlater')
class VimeoLikesIE(InfoExtractor):
_VALID_URL = r'https://(?:www\.)?vimeo\.com/user(?P<id>[0-9]+)/likes/?(?:$|[?#]|sort:)'
IE_NAME = 'vimeo:likes'
IE_DESC = 'Vimeo user likes'
_TEST = {
'url': 'https://vimeo.com/user755559/likes/',
'playlist_mincount': 293,
"info_dict": {
'id': 'user755559_likes',
"description": "See all the videos urza likes",
"title": 'Videos urza likes',
},
}
def _real_extract(self, url):
user_id = self._match_id(url)
webpage = self._download_webpage(url, user_id)
page_count = self._int(
self._search_regex(
r'''(?x)<li><a\s+href="[^"]+"\s+data-page="([0-9]+)">
.*?</a></li>\s*<li\s+class="pagination_next">
''', webpage, 'page count'),
'page count', fatal=True)
PAGE_SIZE = 12
title = self._html_search_regex(
r'(?s)<h1>(.+?)</h1>', webpage, 'title', fatal=False)
description = self._html_search_meta('description', webpage)
def _get_page(idx):
page_url = 'https://vimeo.com/user%s/likes/page:%d/sort:date' % (
user_id, idx + 1)
webpage = self._download_webpage(
page_url, user_id,
note='Downloading page %d/%d' % (idx + 1, page_count))
video_list = self._search_regex(
r'(?s)<ol class="js-browse_list[^"]+"[^>]*>(.*?)</ol>',
webpage, 'video content')
paths = re.findall(
r'<li[^>]*>\s*<a\s+href="([^"]+)"', video_list)
for path in paths:
yield {
'_type': 'url',
'url': compat_urlparse.urljoin(page_url, path),
}
pl = InAdvancePagedList(_get_page, page_count, PAGE_SIZE)
return {
'_type': 'playlist',
'id': 'user%s_likes' % user_id,
'title': title,
'description': description,
'entries': pl,
}
|
dav1x/ansible | refs/heads/devel | lib/ansible/modules/cloud/amazon/ec2_snapshot_facts.py | 5 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_snapshot_facts
short_description: Gather facts about ec2 volume snapshots in AWS
description:
- Gather facts about ec2 volume snapshots in AWS
version_added: "2.1"
author: "Rob White (@wimnat)"
options:
snapshot_ids:
description:
- If you specify one or more snapshot IDs, only snapshots that have the specified IDs are returned.
required: false
default: []
owner_ids:
description:
- If you specify one or more snapshot owners, only snapshots from the specified owners and for which you have \
access are returned.
required: false
default: []
restorable_by_user_ids:
description:
- If you specify a list of restorable users, only snapshots with create snapshot permissions for those users are \
returned.
required: false
default: []
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See \
U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSnapshots.html) for possible filters. Filter \
names and values are case sensitive.
required: false
default: {}
notes:
- By default, the module will return all snapshots, including public ones. To limit results to snapshots owned by \
the account use the filter 'owner-id'.
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather facts about all snapshots, including public ones
- ec2_snapshot_facts:
# Gather facts about all snapshots owned by the account 0123456789
- ec2_snapshot_facts:
filters:
owner-id: 0123456789
# Or alternatively...
- ec2_snapshot_facts:
owner_ids:
- 0123456789
# Gather facts about a particular snapshot using ID
- ec2_snapshot_facts:
filters:
snapshot-id: snap-00112233
# Or alternatively...
- ec2_snapshot_facts:
snapshot_ids:
- snap-00112233
# Gather facts about any snapshot with a tag key Name and value Example
- ec2_snapshot_facts:
filters:
"tag:Name": Example
# Gather facts about any snapshot with an error status
- ec2_snapshot_facts:
filters:
status: error
'''
RETURN = '''
snapshot_id:
description: The ID of the snapshot. Each snapshot receives a unique identifier when it is created.
type: string
returned: always
sample: snap-01234567
volume_id:
description: The ID of the volume that was used to create the snapshot.
type: string
returned: always
sample: vol-01234567
state:
description: The snapshot state (completed, pending or error).
type: string
returned: always
sample: completed
state_message:
description: Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy operation fails (for example, if the proper
AWS Key Management Service (AWS KMS) permissions are not obtained) this field displays error state details to help you diagnose why the
error occurred.
type: string
returned: always
sample:
start_time:
description: The time stamp when the snapshot was initiated.
type: string
returned: always
sample: "2015-02-12T02:14:02+00:00"
progress:
description: The progress of the snapshot, as a percentage.
type: string
returned: always
sample: "100%"
owner_id:
description: The AWS account ID of the EBS snapshot owner.
type: string
returned: always
sample: "099720109477"
description:
description: The description for the snapshot.
type: string
returned: always
sample: "My important backup"
volume_size:
description: The size of the volume, in GiB.
type: int
returned: always
sample: 8
owner_alias:
description: The AWS account alias (for example, amazon, self) or AWS account ID that owns the snapshot.
type: string
returned: always
sample: "033440102211"
tags:
description: Any tags assigned to the snapshot.
type: dict
returned: always
sample: "{ 'my_tag_key': 'my_tag_value' }"
encrypted:
description: Indicates whether the snapshot is encrypted.
type: boolean
returned: always
sample: "True"
kms_key_id:
description: The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to \
protect the volume encryption key for the parent volume.
type: string
returned: always
sample: "74c9742a-a1b2-45cb-b3fe-abcdef123456"
data_encryption_key_id:
description: The data encryption key identifier for the snapshot. This value is a unique identifier that \
corresponds to the data encryption key that was used to encrypt the original volume or snapshot copy.
type: string
returned: always
sample: "arn:aws:kms:ap-southeast-2:012345678900:key/74c9742a-a1b2-45cb-b3fe-abcdef123456"
'''
try:
import boto3
from botocore.exceptions import ClientError, NoCredentialsError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (ansible_dict_to_boto3_filter_list,
boto3_conn, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict,
ec2_argument_spec, get_aws_connection_info)
def list_ec2_snapshots(connection, module):
snapshot_ids = module.params.get("snapshot_ids")
owner_ids = map(str, module.params.get("owner_ids"))
restorable_by_user_ids = module.params.get("restorable_by_user_ids")
filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
try:
snapshots = connection.describe_snapshots(SnapshotIds=snapshot_ids, OwnerIds=owner_ids, RestorableByUserIds=restorable_by_user_ids, Filters=filters)
except ClientError as e:
module.fail_json(msg=e.message)
# Turn the boto3 result in to ansible_friendly_snaked_names
snaked_snapshots = []
for snapshot in snapshots['Snapshots']:
snaked_snapshots.append(camel_dict_to_snake_dict(snapshot))
# Turn the boto3 result in to ansible friendly tag dictionary
for snapshot in snaked_snapshots:
if 'tags' in snapshot:
snapshot['tags'] = boto3_tag_list_to_ansible_dict(snapshot['tags'])
module.exit_json(snapshots=snaked_snapshots)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
snapshot_ids=dict(default=[], type='list'),
owner_ids=dict(default=[], type='list'),
restorable_by_user_ids=dict(default=[], type='list'),
filters=dict(default={}, type='dict')
)
)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=[
['snapshot_ids', 'owner_ids', 'restorable_by_user_ids', 'filters']
]
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if region:
connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
else:
module.fail_json(msg="region must be specified")
list_ec2_snapshots(connection, module)
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.