repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
pbrady/sympy
|
refs/heads/master
|
sympy/functions/combinatorial/tests/__init__.py
|
12133432
| |
pshen/ansible
|
refs/heads/devel
|
lib/ansible/modules/remote_management/foreman/__init__.py
|
12133432
| |
etzhou/edx-platform
|
refs/heads/master
|
common/lib/xmodule/xmodule/tests/test_videoannotation.py
|
92
|
# -*- coding: utf-8 -*-
"Test for Annotation Xmodule functional logic."
import unittest
from mock import Mock
from lxml import etree
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from xmodule.videoannotation_module import VideoAnnotationModule
from . import get_test_system
class VideoAnnotationModuleTestCase(unittest.TestCase):
''' Video Annotation Module Test Case '''
sample_xml = '''
<annotatable>
<instructions><p>Video Test Instructions.</p></instructions>
</annotatable>
'''
sample_sourceurl = "http://video-js.zencoder.com/oceans-clip.mp4"
sample_youtubeurl = "http://www.youtube.com/watch?v=yxLIu-scR9Y"
def setUp(self):
"""
Makes sure that the Video Annotation Module is created.
"""
super(VideoAnnotationModuleTestCase, self).setUp()
# return anything except None to test LMS
def test_real_user(useless):
useless_user = Mock(email='fake@fake.com', id=useless)
return useless_user
# test to make sure that role is checked in LMS
def test_user_role():
return 'staff'
self.system = get_test_system()
self.system.get_real_user = test_real_user
self.system.get_user_role = test_user_role
self.system.anonymous_student_id = None
self.mod = VideoAnnotationModule(
Mock(),
self.system,
DictFieldData({'data': self.sample_xml, 'sourceUrl': self.sample_sourceurl}),
ScopeIds(None, None, None, None)
)
def test_extract_instructions(self):
"""
This test ensures that if an instruction exists it is pulled and
formatted from the <instructions> tags. Otherwise, it should return nothing.
"""
xmltree = etree.fromstring(self.sample_xml)
expected_xml = u"<div><p>Video Test Instructions.</p></div>"
actual_xml = self.mod._extract_instructions(xmltree) # pylint: disable=protected-access
self.assertIsNotNone(actual_xml)
self.assertEqual(expected_xml.strip(), actual_xml.strip())
xmltree = etree.fromstring('<annotatable>foo</annotatable>')
actual = self.mod._extract_instructions(xmltree) # pylint: disable=protected-access
self.assertIsNone(actual)
def test_get_extension(self):
"""
Tests the function that returns the appropriate extension depending on whether it is
a video from youtube, or one uploaded to the EdX server.
"""
expectedyoutube = 'video/youtube'
expectednotyoutube = 'video/mp4'
result1 = self.mod._get_extension(self.sample_sourceurl) # pylint: disable=protected-access
result2 = self.mod._get_extension(self.sample_youtubeurl) # pylint: disable=protected-access
self.assertEqual(expectedyoutube, result2)
self.assertEqual(expectednotyoutube, result1)
def test_student_view(self):
"""
Tests to make sure variables passed in truly exist within the html once it is all rendered.
"""
context = self.mod.student_view({}).content
for key in ['display_name', 'instructions_html', 'sourceUrl', 'typeSource', 'poster', 'annotation_storage', 'default_tab', 'instructor_email', 'annotation_mode', "is_course_staff"]:
self.assertIn(key, context)
|
alistairlow/tensorflow
|
refs/heads/master
|
tensorflow/python/ops/distributions/bijector.py
|
53
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bijector base."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import,unused-import
from tensorflow.python.ops.distributions.bijector_impl import Bijector
# pylint: enable=wildcard-import,unused-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ["Bijector"]
remove_undocumented(__name__, _allowed_symbols)
|
hosseinmh/jango_learning
|
refs/heads/master
|
.venv/lib/python2.7/site-packages/django/shortcuts.py
|
117
|
"""
This module collects helper functions and classes that "span" multiple levels
of MVC. In other words, these functions/classes introduce controlled coupling
for convenience's sake.
"""
from django.http import (
Http404, HttpResponse, HttpResponsePermanentRedirect, HttpResponseRedirect,
)
from django.template import loader
from django.urls import NoReverseMatch, reverse
from django.utils import six
from django.utils.encoding import force_text
from django.utils.functional import Promise
def render_to_response(template_name, context=None, content_type=None, status=None, using=None):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
"""
content = loader.render_to_string(template_name, context, using=using)
return HttpResponse(content, content_type, status)
def render(request, template_name, context=None, content_type=None, status=None, using=None):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
"""
content = loader.render_to_string(template_name, context, request, using=using)
return HttpResponse(content, content_type, status)
def redirect(to, *args, **kwargs):
"""
Returns an HttpResponseRedirect to the appropriate URL for the arguments
passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urls.reverse()` will be used
to reverse-resolve the name.
* A URL, which will be used as-is for the redirect location.
By default issues a temporary redirect; pass permanent=True to issue a
permanent redirect
"""
if kwargs.pop('permanent', False):
redirect_class = HttpResponsePermanentRedirect
else:
redirect_class = HttpResponseRedirect
return redirect_class(resolve_url(to, *args, **kwargs))
def _get_queryset(klass):
"""
Return a QuerySet or a Manager.
Duck typing in action: any class with a `get()` method (for
get_object_or_404) or a `filter()` method (for get_list_or_404) might do
the job.
"""
# If it is a model class or anything else with ._default_manager
if hasattr(klass, '_default_manager'):
return klass._default_manager.all()
return klass
def get_object_or_404(klass, *args, **kwargs):
"""
Uses get() to return an object, or raises a Http404 exception if the object
does not exist.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the get() query.
Note: Like with get(), an MultipleObjectsReturned will be raised if more than one
object is found.
"""
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except AttributeError:
klass__name = klass.__name__ if isinstance(klass, type) else klass.__class__.__name__
raise ValueError(
"First argument to get_object_or_404() must be a Model, Manager, "
"or QuerySet, not '%s'." % klass__name
)
except queryset.model.DoesNotExist:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
def get_list_or_404(klass, *args, **kwargs):
"""
Uses filter() to return a list of objects, or raise a Http404 exception if
the list is empty.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the filter() query.
"""
queryset = _get_queryset(klass)
try:
obj_list = list(queryset.filter(*args, **kwargs))
except AttributeError:
klass__name = klass.__name__ if isinstance(klass, type) else klass.__class__.__name__
raise ValueError(
"First argument to get_list_or_404() must be a Model, Manager, or "
"QuerySet, not '%s'." % klass__name
)
if not obj_list:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
return obj_list
def resolve_url(to, *args, **kwargs):
"""
Return a URL appropriate for the arguments passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urls.reverse()` will be used
to reverse-resolve the name.
* A URL, which will be returned as-is.
"""
# If it's a model, use get_absolute_url()
if hasattr(to, 'get_absolute_url'):
return to.get_absolute_url()
if isinstance(to, Promise):
# Expand the lazy instance, as it can cause issues when it is passed
# further to some Python functions like urlparse.
to = force_text(to)
if isinstance(to, six.string_types):
# Handle relative URLs
if to.startswith(('./', '../')):
return to
# Next try a reverse URL resolution.
try:
return reverse(to, args=args, kwargs=kwargs)
except NoReverseMatch:
# If this is a callable, re-raise.
if callable(to):
raise
# If this doesn't "feel" like a URL, re-raise.
if '/' not in to and '.' not in to:
raise
# Finally, fall back and assume it's a URL
return to
|
ulope/django
|
refs/heads/master
|
django/core/serializers/__init__.py
|
121
|
"""
Interfaces for serializing Django objects.
Usage::
from django.core import serializers
json = serializers.serialize("json", some_queryset)
objects = list(serializers.deserialize("json", json))
To add your own serializers, use the SERIALIZATION_MODULES setting::
SERIALIZATION_MODULES = {
"csv": "path.to.csv.serializer",
"txt": "path.to.txt.serializer",
}
"""
import importlib
from django.apps import apps
from django.conf import settings
from django.utils import six
from django.core.serializers.base import SerializerDoesNotExist
# Built-in serializers
BUILTIN_SERIALIZERS = {
"xml": "django.core.serializers.xml_serializer",
"python": "django.core.serializers.python",
"json": "django.core.serializers.json",
"yaml": "django.core.serializers.pyyaml",
}
_serializers = {}
class BadSerializer(object):
"""
Stub serializer to hold exception raised during registration
This allows the serializer registration to cache serializers and if there
is an error raised in the process of creating a serializer it will be
raised and passed along to the caller when the serializer is used.
"""
internal_use_only = False
def __init__(self, exception):
self.exception = exception
def __call__(self, *args, **kwargs):
raise self.exception
def register_serializer(format, serializer_module, serializers=None):
"""Register a new serializer.
``serializer_module`` should be the fully qualified module name
for the serializer.
If ``serializers`` is provided, the registration will be added
to the provided dictionary.
If ``serializers`` is not provided, the registration will be made
directly into the global register of serializers. Adding serializers
directly is not a thread-safe operation.
"""
if serializers is None and not _serializers:
_load_serializers()
try:
module = importlib.import_module(serializer_module)
except ImportError as exc:
bad_serializer = BadSerializer(exc)
module = type('BadSerializerModule', (object,), {
'Deserializer': bad_serializer,
'Serializer': bad_serializer,
})
if serializers is None:
_serializers[format] = module
else:
serializers[format] = module
def unregister_serializer(format):
"Unregister a given serializer. This is not a thread-safe operation."
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
del _serializers[format]
def get_serializer(format):
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
return _serializers[format].Serializer
def get_serializer_formats():
if not _serializers:
_load_serializers()
return list(_serializers)
def get_public_serializer_formats():
if not _serializers:
_load_serializers()
return [k for k, v in six.iteritems(_serializers) if not v.Serializer.internal_use_only]
def get_deserializer(format):
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
return _serializers[format].Deserializer
def serialize(format, queryset, **options):
"""
Serialize a queryset (or any iterator that returns database objects) using
a certain serializer.
"""
s = get_serializer(format)()
s.serialize(queryset, **options)
return s.getvalue()
def deserialize(format, stream_or_string, **options):
"""
Deserialize a stream or a string. Returns an iterator that yields ``(obj,
m2m_relation_dict)``, where ``obj`` is an instantiated -- but *unsaved* --
object, and ``m2m_relation_dict`` is a dictionary of ``{m2m_field_name :
list_of_related_objects}``.
"""
d = get_deserializer(format)
return d(stream_or_string, **options)
def _load_serializers():
"""
Register built-in and settings-defined serializers. This is done lazily so
that user code has a chance to (e.g.) set up custom settings without
needing to be careful of import order.
"""
global _serializers
serializers = {}
for format in BUILTIN_SERIALIZERS:
register_serializer(format, BUILTIN_SERIALIZERS[format], serializers)
if hasattr(settings, "SERIALIZATION_MODULES"):
for format in settings.SERIALIZATION_MODULES:
register_serializer(format, settings.SERIALIZATION_MODULES[format], serializers)
_serializers = serializers
def sort_dependencies(app_list):
"""Sort a list of (app_config, models) pairs into a single list of models.
The single list of models is sorted so that any model with a natural key
is serialized before a normal model, and any model with a natural key
dependency has it's dependencies serialized first.
"""
# Process the list of models, and get the list of dependencies
model_dependencies = []
models = set()
for app_config, model_list in app_list:
if model_list is None:
model_list = app_config.get_models()
for model in model_list:
models.add(model)
# Add any explicitly defined dependencies
if hasattr(model, 'natural_key'):
deps = getattr(model.natural_key, 'dependencies', [])
if deps:
deps = [apps.get_model(dep) for dep in deps]
else:
deps = []
# Now add a dependency for any FK relation with a model that
# defines a natural key
for field in model._meta.fields:
if hasattr(field.rel, 'to'):
rel_model = field.rel.to
if hasattr(rel_model, 'natural_key') and rel_model != model:
deps.append(rel_model)
# Also add a dependency for any simple M2M relation with a model
# that defines a natural key. M2M relations with explicit through
# models don't count as dependencies.
for field in model._meta.many_to_many:
if field.rel.through._meta.auto_created:
rel_model = field.rel.to
if hasattr(rel_model, 'natural_key') and rel_model != model:
deps.append(rel_model)
model_dependencies.append((model, deps))
model_dependencies.reverse()
# Now sort the models to ensure that dependencies are met. This
# is done by repeatedly iterating over the input list of models.
# If all the dependencies of a given model are in the final list,
# that model is promoted to the end of the final list. This process
# continues until the input list is empty, or we do a full iteration
# over the input models without promoting a model to the final list.
# If we do a full iteration without a promotion, that means there are
# circular dependencies in the list.
model_list = []
while model_dependencies:
skipped = []
changed = False
while model_dependencies:
model, deps = model_dependencies.pop()
# If all of the models in the dependency list are either already
# on the final model list, or not on the original serialization list,
# then we've found another model with all it's dependencies satisfied.
found = True
for candidate in ((d not in models or d in model_list) for d in deps):
if not candidate:
found = False
if found:
model_list.append(model)
changed = True
else:
skipped.append((model, deps))
if not changed:
raise RuntimeError("Can't resolve dependencies for %s in serialized app list." %
', '.join('%s.%s' % (model._meta.app_label, model._meta.object_name)
for model, deps in sorted(skipped, key=lambda obj: obj[0].__name__))
)
model_dependencies = skipped
return model_list
|
lakshayg/tensorflow
|
refs/heads/master
|
tensorflow/python/kernel_tests/division_future_test.py
|
79
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for division with division imported from __future__.
This file should be exactly the same as division_past_test.py except
for the __future__ division line.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
class DivisionTestCase(test.TestCase):
def testDivision(self):
"""Test all the different ways to divide."""
values = [1, 2, 7, 11]
functions = (lambda x: x), constant_op.constant
# TODO(irving): Test int8, int16 once we support casts for those.
dtypes = np.int32, np.int64, np.float32, np.float64
tensors = []
checks = []
def check(x, y):
x = ops.convert_to_tensor(x)
y = ops.convert_to_tensor(y)
tensors.append((x, y))
def f(x, y):
self.assertEqual(x.dtype, y.dtype)
self.assertEqual(x, y)
checks.append(f)
with self.test_session() as sess:
for dtype in dtypes:
for x in map(dtype, values):
for y in map(dtype, values):
for fx in functions:
for fy in functions:
tf_x = fx(x)
tf_y = fy(y)
div = x / y
tf_div = tf_x / tf_y
check(div, tf_div)
floordiv = x // y
tf_floordiv = tf_x // tf_y
check(floordiv, tf_floordiv)
# Do only one sess.run for speed
for f, (x, y) in zip(checks, sess.run(tensors)):
f(x, y)
if __name__ == "__main__":
test.main()
|
WhireCrow/openwrt-mt7620
|
refs/heads/master
|
staging_dir/host/lib/python2.7/test/test_zlib.py
|
29
|
import unittest
from test.test_support import TESTFN, run_unittest, import_module, unlink, requires
import binascii
import random
from test.test_support import precisionbigmemtest, _1G, _4G
import sys
try:
import mmap
except ImportError:
mmap = None
zlib = import_module('zlib')
class ChecksumTestCase(unittest.TestCase):
# checksum test cases
def test_crc32start(self):
self.assertEqual(zlib.crc32(""), zlib.crc32("", 0))
self.assertTrue(zlib.crc32("abc", 0xffffffff))
def test_crc32empty(self):
self.assertEqual(zlib.crc32("", 0), 0)
self.assertEqual(zlib.crc32("", 1), 1)
self.assertEqual(zlib.crc32("", 432), 432)
def test_adler32start(self):
self.assertEqual(zlib.adler32(""), zlib.adler32("", 1))
self.assertTrue(zlib.adler32("abc", 0xffffffff))
def test_adler32empty(self):
self.assertEqual(zlib.adler32("", 0), 0)
self.assertEqual(zlib.adler32("", 1), 1)
self.assertEqual(zlib.adler32("", 432), 432)
def assertEqual32(self, seen, expected):
# 32-bit values masked -- checksums on 32- vs 64- bit machines
# This is important if bit 31 (0x08000000L) is set.
self.assertEqual(seen & 0x0FFFFFFFFL, expected & 0x0FFFFFFFFL)
def test_penguins(self):
self.assertEqual32(zlib.crc32("penguin", 0), 0x0e5c1a120L)
self.assertEqual32(zlib.crc32("penguin", 1), 0x43b6aa94)
self.assertEqual32(zlib.adler32("penguin", 0), 0x0bcf02f6)
self.assertEqual32(zlib.adler32("penguin", 1), 0x0bd602f7)
self.assertEqual(zlib.crc32("penguin"), zlib.crc32("penguin", 0))
self.assertEqual(zlib.adler32("penguin"),zlib.adler32("penguin",1))
def test_abcdefghijklmnop(self):
"""test issue1202 compliance: signed crc32, adler32 in 2.x"""
foo = 'abcdefghijklmnop'
# explicitly test signed behavior
self.assertEqual(zlib.crc32(foo), -1808088941)
self.assertEqual(zlib.crc32('spam'), 1138425661)
self.assertEqual(zlib.adler32(foo+foo), -721416943)
self.assertEqual(zlib.adler32('spam'), 72286642)
def test_same_as_binascii_crc32(self):
foo = 'abcdefghijklmnop'
self.assertEqual(binascii.crc32(foo), zlib.crc32(foo))
self.assertEqual(binascii.crc32('spam'), zlib.crc32('spam'))
def test_negative_crc_iv_input(self):
# The range of valid input values for the crc state should be
# -2**31 through 2**32-1 to allow inputs artifically constrained
# to a signed 32-bit integer.
self.assertEqual(zlib.crc32('ham', -1), zlib.crc32('ham', 0xffffffffL))
self.assertEqual(zlib.crc32('spam', -3141593),
zlib.crc32('spam', 0xffd01027L))
self.assertEqual(zlib.crc32('spam', -(2**31)),
zlib.crc32('spam', (2**31)))
class ExceptionTestCase(unittest.TestCase):
# make sure we generate some expected errors
def test_badlevel(self):
# specifying compression level out of range causes an error
# (but -1 is Z_DEFAULT_COMPRESSION and apparently the zlib
# accepts 0 too)
self.assertRaises(zlib.error, zlib.compress, 'ERROR', 10)
def test_badcompressobj(self):
# verify failure on building compress object with bad params
self.assertRaises(ValueError, zlib.compressobj, 1, zlib.DEFLATED, 0)
# specifying total bits too large causes an error
self.assertRaises(ValueError,
zlib.compressobj, 1, zlib.DEFLATED, zlib.MAX_WBITS + 1)
def test_baddecompressobj(self):
# verify failure on building decompress object with bad params
self.assertRaises(ValueError, zlib.decompressobj, -1)
def test_decompressobj_badflush(self):
# verify failure on calling decompressobj.flush with bad params
self.assertRaises(ValueError, zlib.decompressobj().flush, 0)
self.assertRaises(ValueError, zlib.decompressobj().flush, -1)
class BaseCompressTestCase(object):
def check_big_compress_buffer(self, size, compress_func):
_1M = 1024 * 1024
fmt = "%%0%dx" % (2 * _1M)
# Generate 10MB worth of random, and expand it by repeating it.
# The assumption is that zlib's memory is not big enough to exploit
# such spread out redundancy.
data = ''.join([binascii.a2b_hex(fmt % random.getrandbits(8 * _1M))
for i in range(10)])
data = data * (size // len(data) + 1)
try:
compress_func(data)
finally:
# Release memory
data = None
def check_big_decompress_buffer(self, size, decompress_func):
data = 'x' * size
try:
compressed = zlib.compress(data, 1)
finally:
# Release memory
data = None
data = decompress_func(compressed)
# Sanity check
try:
self.assertEqual(len(data), size)
self.assertEqual(len(data.strip('x')), 0)
finally:
data = None
class CompressTestCase(BaseCompressTestCase, unittest.TestCase):
# Test compression in one go (whole message compression)
def test_speech(self):
x = zlib.compress(HAMLET_SCENE)
self.assertEqual(zlib.decompress(x), HAMLET_SCENE)
def test_speech128(self):
# compress more data
data = HAMLET_SCENE * 128
x = zlib.compress(data)
self.assertEqual(zlib.decompress(x), data)
def test_incomplete_stream(self):
# An useful error message is given
x = zlib.compress(HAMLET_SCENE)
self.assertRaisesRegexp(zlib.error,
"Error -5 while decompressing data: incomplete or truncated stream",
zlib.decompress, x[:-1])
# Memory use of the following functions takes into account overallocation
@precisionbigmemtest(size=_1G + 1024 * 1024, memuse=3)
def test_big_compress_buffer(self, size):
compress = lambda s: zlib.compress(s, 1)
self.check_big_compress_buffer(size, compress)
@precisionbigmemtest(size=_1G + 1024 * 1024, memuse=2)
def test_big_decompress_buffer(self, size):
self.check_big_decompress_buffer(size, zlib.decompress)
class CompressObjectTestCase(BaseCompressTestCase, unittest.TestCase):
# Test compression object
def test_pair(self):
# straightforward compress/decompress objects
data = HAMLET_SCENE * 128
co = zlib.compressobj()
x1 = co.compress(data)
x2 = co.flush()
self.assertRaises(zlib.error, co.flush) # second flush should not work
dco = zlib.decompressobj()
y1 = dco.decompress(x1 + x2)
y2 = dco.flush()
self.assertEqual(data, y1 + y2)
def test_compressoptions(self):
# specify lots of options to compressobj()
level = 2
method = zlib.DEFLATED
wbits = -12
memlevel = 9
strategy = zlib.Z_FILTERED
co = zlib.compressobj(level, method, wbits, memlevel, strategy)
x1 = co.compress(HAMLET_SCENE)
x2 = co.flush()
dco = zlib.decompressobj(wbits)
y1 = dco.decompress(x1 + x2)
y2 = dco.flush()
self.assertEqual(HAMLET_SCENE, y1 + y2)
def test_compressincremental(self):
# compress object in steps, decompress object as one-shot
data = HAMLET_SCENE * 128
co = zlib.compressobj()
bufs = []
for i in range(0, len(data), 256):
bufs.append(co.compress(data[i:i+256]))
bufs.append(co.flush())
combuf = ''.join(bufs)
dco = zlib.decompressobj()
y1 = dco.decompress(''.join(bufs))
y2 = dco.flush()
self.assertEqual(data, y1 + y2)
def test_decompinc(self, flush=False, source=None, cx=256, dcx=64):
# compress object in steps, decompress object in steps
source = source or HAMLET_SCENE
data = source * 128
co = zlib.compressobj()
bufs = []
for i in range(0, len(data), cx):
bufs.append(co.compress(data[i:i+cx]))
bufs.append(co.flush())
combuf = ''.join(bufs)
self.assertEqual(data, zlib.decompress(combuf))
dco = zlib.decompressobj()
bufs = []
for i in range(0, len(combuf), dcx):
bufs.append(dco.decompress(combuf[i:i+dcx]))
self.assertEqual('', dco.unconsumed_tail, ########
"(A) uct should be '': not %d long" %
len(dco.unconsumed_tail))
if flush:
bufs.append(dco.flush())
else:
while True:
chunk = dco.decompress('')
if chunk:
bufs.append(chunk)
else:
break
self.assertEqual('', dco.unconsumed_tail, ########
"(B) uct should be '': not %d long" %
len(dco.unconsumed_tail))
self.assertEqual(data, ''.join(bufs))
# Failure means: "decompressobj with init options failed"
def test_decompincflush(self):
self.test_decompinc(flush=True)
def test_decompimax(self, source=None, cx=256, dcx=64):
# compress in steps, decompress in length-restricted steps
source = source or HAMLET_SCENE
# Check a decompression object with max_length specified
data = source * 128
co = zlib.compressobj()
bufs = []
for i in range(0, len(data), cx):
bufs.append(co.compress(data[i:i+cx]))
bufs.append(co.flush())
combuf = ''.join(bufs)
self.assertEqual(data, zlib.decompress(combuf),
'compressed data failure')
dco = zlib.decompressobj()
bufs = []
cb = combuf
while cb:
#max_length = 1 + len(cb)//10
chunk = dco.decompress(cb, dcx)
self.assertFalse(len(chunk) > dcx,
'chunk too big (%d>%d)' % (len(chunk), dcx))
bufs.append(chunk)
cb = dco.unconsumed_tail
bufs.append(dco.flush())
self.assertEqual(data, ''.join(bufs), 'Wrong data retrieved')
def test_decompressmaxlen(self, flush=False):
# Check a decompression object with max_length specified
data = HAMLET_SCENE * 128
co = zlib.compressobj()
bufs = []
for i in range(0, len(data), 256):
bufs.append(co.compress(data[i:i+256]))
bufs.append(co.flush())
combuf = ''.join(bufs)
self.assertEqual(data, zlib.decompress(combuf),
'compressed data failure')
dco = zlib.decompressobj()
bufs = []
cb = combuf
while cb:
max_length = 1 + len(cb)//10
chunk = dco.decompress(cb, max_length)
self.assertFalse(len(chunk) > max_length,
'chunk too big (%d>%d)' % (len(chunk),max_length))
bufs.append(chunk)
cb = dco.unconsumed_tail
if flush:
bufs.append(dco.flush())
else:
while chunk:
chunk = dco.decompress('', max_length)
self.assertFalse(len(chunk) > max_length,
'chunk too big (%d>%d)' % (len(chunk),max_length))
bufs.append(chunk)
self.assertEqual(data, ''.join(bufs), 'Wrong data retrieved')
def test_decompressmaxlenflush(self):
self.test_decompressmaxlen(flush=True)
def test_maxlenmisc(self):
# Misc tests of max_length
dco = zlib.decompressobj()
self.assertRaises(ValueError, dco.decompress, "", -1)
self.assertEqual('', dco.unconsumed_tail)
def test_clear_unconsumed_tail(self):
# Issue #12050: calling decompress() without providing max_length
# should clear the unconsumed_tail attribute.
cdata = "x\x9cKLJ\x06\x00\x02M\x01" # "abc"
dco = zlib.decompressobj()
ddata = dco.decompress(cdata, 1)
ddata += dco.decompress(dco.unconsumed_tail)
self.assertEqual(dco.unconsumed_tail, "")
def test_flushes(self):
# Test flush() with the various options, using all the
# different levels in order to provide more variations.
sync_opt = ['Z_NO_FLUSH', 'Z_SYNC_FLUSH', 'Z_FULL_FLUSH']
sync_opt = [getattr(zlib, opt) for opt in sync_opt
if hasattr(zlib, opt)]
data = HAMLET_SCENE * 8
for sync in sync_opt:
for level in range(10):
obj = zlib.compressobj( level )
a = obj.compress( data[:3000] )
b = obj.flush( sync )
c = obj.compress( data[3000:] )
d = obj.flush()
self.assertEqual(zlib.decompress(''.join([a,b,c,d])),
data, ("Decompress failed: flush "
"mode=%i, level=%i") % (sync, level))
del obj
def test_odd_flush(self):
# Test for odd flushing bugs noted in 2.0, and hopefully fixed in 2.1
import random
if hasattr(zlib, 'Z_SYNC_FLUSH'):
# Testing on 17K of "random" data
# Create compressor and decompressor objects
co = zlib.compressobj(zlib.Z_BEST_COMPRESSION)
dco = zlib.decompressobj()
# Try 17K of data
# generate random data stream
try:
# In 2.3 and later, WichmannHill is the RNG of the bug report
gen = random.WichmannHill()
except AttributeError:
try:
# 2.2 called it Random
gen = random.Random()
except AttributeError:
# others might simply have a single RNG
gen = random
gen.seed(1)
data = genblock(1, 17 * 1024, generator=gen)
# compress, sync-flush, and decompress
first = co.compress(data)
second = co.flush(zlib.Z_SYNC_FLUSH)
expanded = dco.decompress(first + second)
# if decompressed data is different from the input data, choke.
self.assertEqual(expanded, data, "17K random source doesn't match")
def test_empty_flush(self):
# Test that calling .flush() on unused objects works.
# (Bug #1083110 -- calling .flush() on decompress objects
# caused a core dump.)
co = zlib.compressobj(zlib.Z_BEST_COMPRESSION)
self.assertTrue(co.flush()) # Returns a zlib header
dco = zlib.decompressobj()
self.assertEqual(dco.flush(), "") # Returns nothing
def test_decompress_incomplete_stream(self):
# This is 'foo', deflated
x = 'x\x9cK\xcb\xcf\x07\x00\x02\x82\x01E'
# For the record
self.assertEqual(zlib.decompress(x), 'foo')
self.assertRaises(zlib.error, zlib.decompress, x[:-5])
# Omitting the stream end works with decompressor objects
# (see issue #8672).
dco = zlib.decompressobj()
y = dco.decompress(x[:-5])
y += dco.flush()
self.assertEqual(y, 'foo')
if hasattr(zlib.compressobj(), "copy"):
def test_compresscopy(self):
# Test copying a compression object
data0 = HAMLET_SCENE
data1 = HAMLET_SCENE.swapcase()
c0 = zlib.compressobj(zlib.Z_BEST_COMPRESSION)
bufs0 = []
bufs0.append(c0.compress(data0))
c1 = c0.copy()
bufs1 = bufs0[:]
bufs0.append(c0.compress(data0))
bufs0.append(c0.flush())
s0 = ''.join(bufs0)
bufs1.append(c1.compress(data1))
bufs1.append(c1.flush())
s1 = ''.join(bufs1)
self.assertEqual(zlib.decompress(s0),data0+data0)
self.assertEqual(zlib.decompress(s1),data0+data1)
def test_badcompresscopy(self):
# Test copying a compression object in an inconsistent state
c = zlib.compressobj()
c.compress(HAMLET_SCENE)
c.flush()
self.assertRaises(ValueError, c.copy)
if hasattr(zlib.decompressobj(), "copy"):
def test_decompresscopy(self):
# Test copying a decompression object
data = HAMLET_SCENE
comp = zlib.compress(data)
d0 = zlib.decompressobj()
bufs0 = []
bufs0.append(d0.decompress(comp[:32]))
d1 = d0.copy()
bufs1 = bufs0[:]
bufs0.append(d0.decompress(comp[32:]))
s0 = ''.join(bufs0)
bufs1.append(d1.decompress(comp[32:]))
s1 = ''.join(bufs1)
self.assertEqual(s0,s1)
self.assertEqual(s0,data)
def test_baddecompresscopy(self):
# Test copying a compression object in an inconsistent state
data = zlib.compress(HAMLET_SCENE)
d = zlib.decompressobj()
d.decompress(data)
d.flush()
self.assertRaises(ValueError, d.copy)
# Memory use of the following functions takes into account overallocation
@precisionbigmemtest(size=_1G + 1024 * 1024, memuse=3)
def test_big_compress_buffer(self, size):
c = zlib.compressobj(1)
compress = lambda s: c.compress(s) + c.flush()
self.check_big_compress_buffer(size, compress)
@precisionbigmemtest(size=_1G + 1024 * 1024, memuse=2)
def test_big_decompress_buffer(self, size):
d = zlib.decompressobj()
decompress = lambda s: d.decompress(s) + d.flush()
self.check_big_decompress_buffer(size, decompress)
def genblock(seed, length, step=1024, generator=random):
"""length-byte stream of random data from a seed (in step-byte blocks)."""
if seed is not None:
generator.seed(seed)
randint = generator.randint
if length < step or step < 2:
step = length
blocks = []
for i in range(0, length, step):
blocks.append(''.join([chr(randint(0,255))
for x in range(step)]))
return ''.join(blocks)[:length]
def choose_lines(source, number, seed=None, generator=random):
"""Return a list of number lines randomly chosen from the source"""
if seed is not None:
generator.seed(seed)
sources = source.split('\n')
return [generator.choice(sources) for n in range(number)]
HAMLET_SCENE = """
LAERTES
O, fear me not.
I stay too long: but here my father comes.
Enter POLONIUS
A double blessing is a double grace,
Occasion smiles upon a second leave.
LORD POLONIUS
Yet here, Laertes! aboard, aboard, for shame!
The wind sits in the shoulder of your sail,
And you are stay'd for. There; my blessing with thee!
And these few precepts in thy memory
See thou character. Give thy thoughts no tongue,
Nor any unproportioned thought his act.
Be thou familiar, but by no means vulgar.
Those friends thou hast, and their adoption tried,
Grapple them to thy soul with hoops of steel;
But do not dull thy palm with entertainment
Of each new-hatch'd, unfledged comrade. Beware
Of entrance to a quarrel, but being in,
Bear't that the opposed may beware of thee.
Give every man thy ear, but few thy voice;
Take each man's censure, but reserve thy judgment.
Costly thy habit as thy purse can buy,
But not express'd in fancy; rich, not gaudy;
For the apparel oft proclaims the man,
And they in France of the best rank and station
Are of a most select and generous chief in that.
Neither a borrower nor a lender be;
For loan oft loses both itself and friend,
And borrowing dulls the edge of husbandry.
This above all: to thine ownself be true,
And it must follow, as the night the day,
Thou canst not then be false to any man.
Farewell: my blessing season this in thee!
LAERTES
Most humbly do I take my leave, my lord.
LORD POLONIUS
The time invites you; go; your servants tend.
LAERTES
Farewell, Ophelia; and remember well
What I have said to you.
OPHELIA
'Tis in my memory lock'd,
And you yourself shall keep the key of it.
LAERTES
Farewell.
"""
def test_main():
run_unittest(
ChecksumTestCase,
ExceptionTestCase,
CompressTestCase,
CompressObjectTestCase
)
if __name__ == "__main__":
test_main()
|
mediafactory/tryton_core_daemon
|
refs/heads/stable-2.4
|
trytond/backend/sqlite/table.py
|
1
|
#This file is part of Tryton. The COPYRIGHT file at the top level of
#this repository contains the full copyright notices and license terms.
from trytond.backend.table import TableHandlerInterface
import logging
import re
class TableHandler(TableHandlerInterface):
def __init__(self, cursor, model, module_name=None, history=False):
super(TableHandler, self).__init__(cursor, model,
module_name=module_name, history=history)
self._columns = {}
self._constraints = []
self._fk_deltypes = {}
self._indexes = []
self._field2module = {}
self._model = model
# Create new table if necessary
if not self.table_exist(self.cursor, self.table_name):
if not self.history:
self.cursor.execute('CREATE TABLE "%s" ' \
'(id INTEGER PRIMARY KEY AUTOINCREMENT)' % \
self.table_name)
else:
self.cursor.execute('CREATE TABLE "%s" ' \
'(__id INTEGER PRIMARY KEY AUTOINCREMENT, ' \
'id INTEGER)' % self.table_name)
self._update_definitions()
@staticmethod
def table_exist(cursor, table_name):
cursor.execute("SELECT sql FROM sqlite_master " \
"WHERE type = 'table' AND name = %s",
(table_name,))
res = cursor.fetchone()
if not res:
return False
sql, = res
# Migration from 1.6 add autoincrement
if not 'AUTOINCREMENT' in sql.upper():
temp_sql = sql.replace(table_name, '_temp_%s' % table_name)
cursor.execute(temp_sql)
cursor.execute('PRAGMA table_info("' + table_name + '")')
columns = ['"%s"' % column for _, column, _, _, _, _
in cursor.fetchall()]
cursor.execute(('INSERT INTO "_temp_%s" '
'(' + ','.join(columns) + ') '
'SELECT ' + ','.join(columns) +
' FROM "%s"') % (table_name, table_name))
cursor.execute('DROP TABLE "%s"' % table_name)
new_sql = sql.replace('PRIMARY KEY',
'PRIMARY KEY AUTOINCREMENT')
cursor.execute(new_sql)
cursor.execute(('INSERT INTO "%s" '
'(' + ','.join(columns) + ') '
'SELECT ' + ','.join(columns) +
' FROM "_temp_%s"') % (table_name, table_name))
cursor.execute('DROP TABLE "_temp_%s"' % table_name)
return True
@staticmethod
def table_rename(cursor, old_name, new_name):
if TableHandler.table_exist(cursor, old_name) and \
not TableHandler.table_exist(cursor, new_name):
cursor.execute('ALTER TABLE "%s" RENAME TO "%s"' % \
(old_name, new_name))
#Rename history table
old_history = old_name + "__history"
new_history = new_name + "__history"
if TableHandler.table_exist(cursor, old_history) and \
not TableHandler.table_exist(cursor, new_history):
cursor.execute('ALTER TABLE "%s" RENAME TO "%s"' % \
(old_history, new_history))
@staticmethod
def sequence_exist(cursor, sequence_name):
return True
@staticmethod
def sequence_rename(cursor, old_name, new_name):
pass
def column_exist(self, column_name):
return column_name in self._columns
def column_rename(self, old_name, new_name, exception=False):
if self.column_exist(old_name) and \
not self.column_exist(new_name):
temp_table = '_temp_%s' % self.table_name
TableHandler.table_rename(self.cursor, self.table_name,
temp_table)
new_table = TableHandler(self.cursor, self._model,
history=self.history)
for column, (notnull, hasdef, size, typname) \
in self._columns.iteritems():
if column == old_name:
column = new_name
new_table.add_raw_column(column, typname, False,
field_size=size)
new_columns = new_table._columns.keys()
old_columns = [x if x != old_name else new_name
for x in new_columns]
self.cursor.execute(('INSERT INTO "%s" (' + \
','.join('"%s"' % x for x in new_columns) + \
') SELECT ' + \
','.join('"%s"' % x for x in old_columns) + ' ' + \
'FROM "%s"') % (self.table_name, temp_table))
self.cursor.execute('DROP TABLE "%s"' % temp_table)
elif exception and self.column_exist(new_name):
raise Exception('Unable to rename column %s.%s to %s.%s: ' \
'%s.%s already exist!' % \
(self.table_name, old_name, self.table_name, new_name,
self.table_name, new_name))
def _update_definitions(self):
# Fetch columns definitions from the table
self.cursor.execute('PRAGMA table_info("' + self.table_name + '")')
self._columns = {}
for _, column, type_, notnull, hasdef, _ in self.cursor.fetchall():
column = re.sub(r'^\"|\"$', '', column)
match = re.match(r'(\w+)(\((.*?)\))?', type_)
if match:
typname = match.group(1).upper()
size = match.group(3) and int(match.group(3)) or 0
else:
typname = type_.upper()
size = -1
self._columns[column] = {
'notnull': notnull,
'hasdef': hasdef,
'size': size,
'typname': typname,
}
# Fetch indexes defined for the table
try:
self.cursor.execute('PRAGMA index_list("' + self.table_name + '")')
except IndexError: # There is sometimes IndexError
self.cursor.execute('PRAGMA index_list("' + self.table_name + '")')
self._indexes = [l[1] for l in self.cursor.fetchall()]
# Keep track of which module created each field
self._field2module = {}
if self.object_name is not None:
self.cursor.execute('SELECT f.name, f.module '\
'FROM ir_model_field f '\
'JOIN ir_model m on (f.model=m.id) '\
'WHERE m.model = %s',
(self.object_name,)
)
for line in self.cursor.fetchall():
self._field2module[line[0]] = line[1]
def alter_size(self, column_name, column_type):
logging.getLogger('init').warning(
'Unable to alter size of column %s ' \
'of table %s!' % \
(column_name, self.table_name))
def alter_type(self, column_name, column_type):
logging.getLogger('init').warning(
'Unable to alter type of column %s ' \
'of table %s!' % \
(column_name, self.table_name))
def db_default(self, column_name, value):
logging.getLogger('init').warning(
'Unable to set default on column %s ' \
'of table %s!' % \
(column_name, self.table_name))
def add_raw_column(self, column_name, column_type, column_format,
default_fun=None, field_size=None, migrate=True, string=''):
if self.column_exist(column_name):
if not migrate:
return
base_type = column_type[0].upper()
if base_type != self._columns[column_name]['typname']:
if (self._columns[column_name]['typname'], base_type) in [
('VARCHAR', 'TEXT'),
('TEXT', 'VARCHAR'),
('DATE', 'TIMESTAMP'),
('INTEGER', 'FLOAT'),
]:
self.alter_type(column_name, base_type)
else:
logging.getLogger('init').warning(
'Unable to migrate column %s on table %s ' \
'from %s to %s.' % \
(column_name, self.table_name,
self._columns[column_name]['typname'], base_type))
if base_type == 'VARCHAR' \
and self._columns[column_name]['typname'] == 'VARCHAR':
# Migrate size
if field_size == None:
if self._columns[column_name]['size'] > 0:
self.alter_size(column_name, base_type)
elif self._columns[column_name]['size'] == field_size:
pass
elif self._columns[column_name]['size'] > 0 and \
self._columns[column_name]['size'] < field_size:
self.alter_size(column_name, column_type[1])
else:
logging.getLogger('init').warning(
'Unable to migrate column %s on table %s ' \
'from varchar(%s) to varchar(%s).' % \
(column_name, self.table_name,
self._columns[column_name]['size'] > 0 and \
self._columns[column_name]['size'] or "",
field_size))
return
column_type = column_type[1]
default = ''
self.cursor.execute(('ALTER TABLE "%s" ADD COLUMN "%s" %s' + default) %
(self.table_name, column_name, column_type))
if column_format:
# check if table is non-empty:
self.cursor.execute('SELECT 1 FROM "%s" limit 1' % self.table_name)
if self.cursor.fetchone():
# Populate column with default values:
default = None
if default_fun is not None:
default = default_fun()
self.cursor.execute('UPDATE "' + self.table_name + '" '\
'SET "' + column_name + '" = %s',
(column_format(default),))
self._update_definitions()
def add_fk(self, column_name, reference, on_delete=None):
logging.getLogger('init').warning(
'Unable to add foreign key on table %s!' % \
(self.table_name,))
def drop_fk(self, column_name, table=None):
logging.getLogger('init').warning(
'Unable to drop foreign key on table %s!' % \
(self.table_name,))
def index_action(self, column_name, action='add', table=None):
if isinstance(column_name, basestring):
column_name = [column_name]
index_name = self.table_name + "_" + '_'.join(column_name) + "_index"
if action == 'add':
if index_name in self._indexes:
return
self.cursor.execute('CREATE INDEX "' + index_name + '" ' \
'ON "' + self.table_name + '" ( ' + \
','.join('"' + x + '"' for x in column_name) + \
')')
self._update_definitions()
elif action == 'remove':
if len(column_name) == 1:
if self._field2module.get(column_name[0], self.module_name) \
!= self.module_name:
return
if index_name in self._indexes:
self.cursor.execute('DROP INDEX "%s" ' % (index_name,))
self._update_definitions()
else:
raise Exception('Index action not supported!')
def not_null_action(self, column_name, action='add'):
if not self.column_exist(column_name):
return
if action == 'add':
logging.getLogger('init').warning(
'Unable to set not null on column %s ' \
'of table %s!' % \
(column_name, self.table_name))
elif action == 'remove':
logging.getLogger('init').warning(
'Unable to remove not null on column %s ' \
'of table %s!' % \
(column_name, self.table_name))
else:
raise Exception('Not null action not supported!')
def add_constraint(self, ident, constraint, exception=False):
logging.getLogger('init').warning(
'Unable to add constraint on table %s!' % \
(self.table_name,))
def drop_constraint(self, ident, exception=False, table=None):
logging.getLogger('init').warning(
'Unable to drop constraint on table %s!' % \
(self.table_name,))
def drop_column(self, column_name, exception=False):
logging.getLogger('init').warning(
'Unable to drop \'%s\' column on table %s!' % \
(column_name, self.table_name))
@staticmethod
def drop_table(cursor, model, table, cascade=False):
cursor.execute('DELETE from ir_model_data where '\
'model = \'%s\'' % model)
query = 'DROP TABLE "%s"' % table
if cascade:
query = query + ' CASCADE'
cursor.execute(query)
|
yancz1989/cancer
|
refs/heads/master
|
tf_upgrade.py
|
1
|
# -*- coding: utf-8 -*-
# @Author: yancz1989
# @Date: 2017-02-19 08:22:23
# @Last Modified by: yancz1989
# @Last Modified time: 2017-02-19 08:22:51
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Upgrader for Python scripts from pre-1.0 TensorFlow to 1.0 TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import ast
import collections
import os
import shutil
import sys
import tempfile
import traceback
class APIChangeSpec(object):
"""List of maps that describe what changed in the API."""
def __init__(self):
# Maps from a function name to a dictionary that describes how to
# map from an old argument keyword to the new argument keyword.
self.function_keyword_renames = {
"tf.count_nonzero": {
"reduction_indices": "axis"
},
"tf.reduce_all": {
"reduction_indices": "axis"
},
"tf.reduce_any": {
"reduction_indices": "axis"
},
"tf.reduce_max": {
"reduction_indices": "axis"
},
"tf.reduce_mean": {
"reduction_indices": "axis"
},
"tf.reduce_min": {
"reduction_indices": "axis"
},
"tf.reduce_prod": {
"reduction_indices": "axis"
},
"tf.reduce_sum": {
"reduction_indices": "axis"
},
"tf.reduce_logsumexp": {
"reduction_indices": "axis"
},
"tf.expand_dims": {
"dim": "axis"
},
"tf.argmax": {
"dimension": "axis"
},
"tf.argmin": {
"dimension": "axis"
},
"tf.reduce_join": {
"reduction_indices": "axis"
},
"tf.sparse_concat": {
"concat_dim": "axis"
},
"tf.sparse_split": {
"split_dim": "axis"
},
"tf.sparse_reduce_sum": {
"reduction_axes": "axis"
},
"tf.reverse_sequence": {
"seq_dim": "seq_axis",
"batch_dim": "batch_axis"
},
"tf.sparse_reduce_sum_sparse": {
"reduction_axes": "axis"
},
"tf.squeeze": {
"squeeze_dims": "axis"
},
"tf.split": {
"split_dim": "axis",
"num_split": "num_or_size_splits"
},
"tf.concat": {
"concat_dim": "axis"
},
}
# Mapping from function to the new name of the function
self.function_renames = {
"tf.inv": "tf.reciprocal",
"tf.contrib.deprecated.scalar_summary": "tf.summary.scalar",
"tf.contrib.deprecated.histogram_summary": "tf.summary.histogram",
"tf.listdiff": "tf.setdiff1d",
"tf.list_diff": "tf.setdiff1d",
"tf.mul": "tf.multiply",
"tf.neg": "tf.negative",
"tf.sub": "tf.subtract",
"tf.train.SummaryWriter": "tf.summary.FileWriter",
"tf.scalar_summary": "tf.summary.scalar",
"tf.histogram_summary": "tf.summary.histogram",
"tf.audio_summary": "tf.summary.audio",
"tf.image_summary": "tf.summary.image",
"tf.merge_summary": "tf.summary.merge",
"tf.merge_all_summaries": "tf.summary.merge_all",
"tf.image.per_image_whitening": "tf.image.per_image_standardization",
"tf.all_variables": "tf.global_variables",
"tf.VARIABLES": "tf.GLOBAL_VARIABLES",
"tf.initialize_all_variables": "tf.global_variables_initializer",
"tf.initialize_variables": "tf.variables_initializer",
"tf.initialize_local_variables": "tf.local_variables_initializer",
"tf.batch_matrix_diag": "tf.matrix_diag",
"tf.batch_band_part": "tf.band_part",
"tf.batch_set_diag": "tf.set_diag",
"tf.batch_matrix_transpose": "tf.matrix_transpose",
"tf.batch_matrix_determinant": "tf.matrix_determinant",
"tf.batch_matrix_inverse": "tf.matrix_inverse",
"tf.batch_cholesky": "tf.cholesky",
"tf.batch_cholesky_solve": "tf.cholesky_solve",
"tf.batch_matrix_solve": "tf.matrix_solve",
"tf.batch_matrix_triangular_solve": "tf.matrix_triangular_solve",
"tf.batch_matrix_solve_ls": "tf.matrix_solve_ls",
"tf.batch_self_adjoint_eig": "tf.self_adjoint_eig",
"tf.batch_self_adjoint_eigvals": "tf.self_adjoint_eigvals",
"tf.batch_svd": "tf.svd",
"tf.batch_fft": "tf.fft",
"tf.batch_ifft": "tf.ifft",
"tf.batch_ifft2d": "tf.ifft2d",
"tf.batch_fft3d": "tf.fft3d",
"tf.batch_ifft3d": "tf.ifft3d",
"tf.select": "tf.where",
"tf.complex_abs": "tf.abs",
"tf.batch_matmul": "tf.matmul",
"tf.pack": "tf.stack",
"tf.unpack": "tf.unstack",
}
self.change_to_function = {
"tf.ones_initializer",
"tf.zeros_initializer",
}
# Functions that were reordered should be changed to the new keyword args
# for safety, if positional arguments are used. If you have reversed the
# positional arguments yourself, this could do the wrong thing.
self.function_reorders = {
"tf.split": ["axis", "num_or_size_splits", "value", "name"],
"tf.sparse_split": ["axis", "num_or_size_splits", "value", "name"],
"tf.concat": ["concat_dim", "values", "name"],
"tf.svd": ["tensor", "compute_uv", "full_matrices", "name"],
"tf.nn.softmax_cross_entropy_with_logits": [
"logits", "labels", "dim", "name"],
"tf.nn.sparse_softmax_cross_entropy_with_logits": [
"logits", "labels", "name"],
"tf.nn.sigmoid_cross_entropy_with_logits": [
"logits", "labels", "name"]
}
# Specially handled functions.
self.function_handle = {"tf.reverse": self._reverse_handler}
@staticmethod
def _reverse_handler(file_edit_recorder, node):
# TODO(aselle): Could check for a literal list of bools and try to convert
# them to indices.
comment = ("ERROR: tf.reverse has had its argument semantics changed\n"
"significantly the converter cannot detect this reliably, so you"
"need to inspect this usage manually.\n")
file_edit_recorder.add(comment,
node.lineno,
node.col_offset,
"tf.reverse",
"tf.reverse",
error="tf.reverse requires manual check.")
class FileEditTuple(collections.namedtuple(
"FileEditTuple", ["comment", "line", "start", "old", "new"])):
"""Each edit that is recorded by a FileEditRecorder.
Fields:
comment: A description of the edit and why it was made.
line: The line number in the file where the edit occurs (1-indexed).
start: The line number in the file where the edit occurs (0-indexed).
old: text string to remove (this must match what was in file).
new: text string to add in place of `old`.
"""
__slots__ = ()
class FileEditRecorder(object):
"""Record changes that need to be done to the file."""
def __init__(self, filename):
# all edits are lists of chars
self._filename = filename
self._line_to_edit = collections.defaultdict(list)
self._errors = []
def process(self, text):
"""Process a list of strings, each corresponding to the recorded changes.
Args:
text: A list of lines of text (assumed to contain newlines)
Returns:
A tuple of the modified text and a textual description of what is done.
Raises:
ValueError: if substitution source location does not have expected text.
"""
change_report = ""
# Iterate of each line
for line, edits in self._line_to_edit.items():
offset = 0
# sort by column so that edits are processed in order in order to make
# indexing adjustments cumulative for changes that change the string
# length
edits.sort(key=lambda x: x.start)
# Extract each line to a list of characters, because mutable lists
# are editable, unlike immutable strings.
char_array = list(text[line - 1])
# Record a description of the change
change_report += "%r Line %d\n" % (self._filename, line)
change_report += "-" * 80 + "\n\n"
for e in edits:
change_report += "%s\n" % e.comment
change_report += "\n Old: %s" % (text[line - 1])
# Make underscore buffers for underlining where in the line the edit was
change_list = [" "] * len(text[line - 1])
change_list_new = [" "] * len(text[line - 1])
# Iterate for each edit
for e in edits:
# Create effective start, end by accounting for change in length due
# to previous edits
start_eff = e.start + offset
end_eff = start_eff + len(e.old)
# Make sure the edit is changing what it should be changing
old_actual = "".join(char_array[start_eff:end_eff])
if old_actual != e.old:
raise ValueError("Expected text %r but got %r" %
("".join(e.old), "".join(old_actual)))
# Make the edit
char_array[start_eff:end_eff] = list(e.new)
# Create the underline highlighting of the before and after
change_list[e.start:e.start + len(e.old)] = "~" * len(e.old)
change_list_new[start_eff:end_eff] = "~" * len(e.new)
# Keep track of how to generate effective ranges
offset += len(e.new) - len(e.old)
# Finish the report comment
change_report += " %s\n" % "".join(change_list)
text[line - 1] = "".join(char_array)
change_report += " New: %s" % (text[line - 1])
change_report += " %s\n\n" % "".join(change_list_new)
return "".join(text), change_report, self._errors
def add(self, comment, line, start, old, new, error=None):
"""Add a new change that is needed.
Args:
comment: A description of what was changed
line: Line number (1 indexed)
start: Column offset (0 indexed)
old: old text
new: new text
error: this "edit" is something that cannot be fixed automatically
Returns:
None
"""
self._line_to_edit[line].append(
FileEditTuple(comment, line, start, old, new))
if error:
self._errors.append("%s:%d: %s" % (self._filename, line, error))
class TensorFlowCallVisitor(ast.NodeVisitor):
"""AST Visitor that finds TensorFlow Function calls.
Updates function calls from old API version to new API version.
"""
def __init__(self, filename, lines):
self._filename = filename
self._file_edit = FileEditRecorder(filename)
self._lines = lines
self._api_change_spec = APIChangeSpec()
def process(self, lines):
return self._file_edit.process(lines)
def generic_visit(self, node):
ast.NodeVisitor.generic_visit(self, node)
def _rename_functions(self, node, full_name):
function_renames = self._api_change_spec.function_renames
try:
new_name = function_renames[full_name]
self._file_edit.add("Renamed function %r to %r" % (full_name,
new_name),
node.lineno, node.col_offset, full_name, new_name)
except KeyError:
pass
def _get_attribute_full_path(self, node):
"""Traverse an attribute to generate a full name e.g. tf.foo.bar.
Args:
node: A Node of type Attribute.
Returns:
a '.'-delimited full-name or None if the tree was not a simple form.
i.e. `foo()+b).bar` returns None, while `a.b.c` would return "a.b.c".
"""
curr = node
items = []
while not isinstance(curr, ast.Name):
if not isinstance(curr, ast.Attribute):
return None
items.append(curr.attr)
curr = curr.value
items.append(curr.id)
return ".".join(reversed(items))
def _find_true_position(self, node):
"""Return correct line number and column offset for a given node.
This is necessary mainly because ListComp's location reporting reports
the next token after the list comprehension list opening.
Args:
node: Node for which we wish to know the lineno and col_offset
"""
import re
find_open = re.compile("^\s*(\\[).*$")
find_string_chars = re.compile("['\"]")
if isinstance(node, ast.ListComp):
# Strangely, ast.ListComp returns the col_offset of the first token
# after the '[' token which appears to be a bug. Workaround by
# explicitly finding the real start of the list comprehension.
line = node.lineno
col = node.col_offset
# loop over lines
while 1:
# Reverse the text to and regular expression search for whitespace
text = self._lines[line-1]
reversed_preceding_text = text[:col][::-1]
# First find if a [ can be found with only whitespace between it and
# col.
m = find_open.match(reversed_preceding_text)
if m:
new_col_offset = col - m.start(1) - 1
return line, new_col_offset
else:
if (reversed_preceding_text=="" or
reversed_preceding_text.isspace()):
line = line - 1
prev_line = self._lines[line - 1]
# TODO(aselle):
# this is poor comment detection, but it is good enough for
# cases where the comment does not contain string literal starting/
# ending characters. If ast gave us start and end locations of the
# ast nodes rather than just start, we could use string literal
# node ranges to filter out spurious #'s that appear in string
# literals.
comment_start = prev_line.find("#")
if comment_start == -1:
col = len(prev_line) -1
elif find_string_chars.search(prev_line[comment_start:]) is None:
col = comment_start
else:
return None, None
else:
return None, None
# Most other nodes return proper locations (with notably does not), but
# it is not possible to use that in an argument.
return node.lineno, node.col_offset
def visit_Call(self, node): # pylint: disable=invalid-name
"""Handle visiting a call node in the AST.
Args:
node: Current Node
"""
# Find a simple attribute name path e.g. "tf.foo.bar"
full_name = self._get_attribute_full_path(node.func)
# Make sure the func is marked as being part of a call
node.func.is_function_for_call = True
if full_name and full_name.startswith("tf."):
# Call special handlers
function_handles = self._api_change_spec.function_handle
if full_name in function_handles:
function_handles[full_name](self._file_edit, node)
# Examine any non-keyword argument and make it into a keyword argument
# if reordering required.
function_reorders = self._api_change_spec.function_reorders
function_keyword_renames = (
self._api_change_spec.function_keyword_renames)
if full_name in function_reorders:
reordered = function_reorders[full_name]
for idx, arg in enumerate(node.args):
lineno, col_offset = self._find_true_position(arg)
if lineno is None or col_offset is None:
self._file_edit.add(
"Failed to add keyword %r to reordered function %r"
% (reordered[idx], full_name), arg.lineno, arg.col_offset,
"", "",
error="A necessary keyword argument failed to be inserted.")
else:
keyword_arg = reordered[idx]
if (full_name in function_keyword_renames and
keyword_arg in function_keyword_renames[full_name]):
keyword_arg = function_keyword_renames[full_name][keyword_arg]
self._file_edit.add("Added keyword %r to reordered function %r"
% (reordered[idx], full_name), lineno,
col_offset, "", keyword_arg + "=")
# Examine each keyword argument and convert it to the final renamed form
renamed_keywords = ({} if full_name not in function_keyword_renames else
function_keyword_renames[full_name])
for keyword in node.keywords:
argkey = keyword.arg
argval = keyword.value
if argkey in renamed_keywords:
argval_lineno, argval_col_offset = self._find_true_position(argval)
if (argval_lineno is not None and argval_col_offset is not None):
# TODO(aselle): We should scan backward to find the start of the
# keyword key. Unfortunately ast does not give you the location of
# keyword keys, so we are forced to infer it from the keyword arg
# value.
key_start = argval_col_offset - len(argkey) - 1
key_end = key_start + len(argkey) + 1
if self._lines[argval_lineno - 1][key_start:key_end] == argkey + "=":
self._file_edit.add("Renamed keyword argument from %r to %r" %
(argkey, renamed_keywords[argkey]),
argval_lineno,
argval_col_offset - len(argkey) - 1,
argkey + "=", renamed_keywords[argkey] + "=")
continue
self._file_edit.add(
"Failed to rename keyword argument from %r to %r" %
(argkey, renamed_keywords[argkey]),
argval.lineno,
argval.col_offset - len(argkey) - 1,
"", "",
error="Failed to find keyword lexographically. Fix manually.")
ast.NodeVisitor.generic_visit(self, node)
def visit_Attribute(self, node): # pylint: disable=invalid-name
"""Handle bare Attributes i.e. [tf.foo, tf.bar].
Args:
node: Node that is of type ast.Attribute
"""
full_name = self._get_attribute_full_path(node)
if full_name and full_name.startswith("tf."):
self._rename_functions(node, full_name)
if full_name in self._api_change_spec.change_to_function:
if not hasattr(node, "is_function_for_call"):
new_text = full_name + "()"
self._file_edit.add("Changed %r to %r"%(full_name, new_text),
node.lineno, node.col_offset, full_name, new_text)
ast.NodeVisitor.generic_visit(self, node)
class TensorFlowCodeUpgrader(object):
"""Class that handles upgrading a set of Python files to TensorFlow 1.0."""
def __init__(self):
pass
def process_file(self, in_filename, out_filename):
"""Process the given python file for incompatible changes.
Args:
in_filename: filename to parse
out_filename: output file to write to
Returns:
A tuple representing number of files processed, log of actions, errors
"""
# Write to a temporary file, just in case we are doing an implace modify.
with open(in_filename, "r") as in_file, \
tempfile.NamedTemporaryFile("w", delete=False) as temp_file:
ret = self.process_opened_file(
in_filename, in_file, out_filename, temp_file)
shutil.move(temp_file.name, out_filename)
return ret
# Broad exceptions are required here because ast throws whatever it wants.
# pylint: disable=broad-except
def process_opened_file(self, in_filename, in_file, out_filename, out_file):
"""Process the given python file for incompatible changes.
This function is split out to facilitate StringIO testing from
tf_upgrade_test.py.
Args:
in_filename: filename to parse
in_file: opened file (or StringIO)
out_filename: output file to write to
out_file: opened file (or StringIO)
Returns:
A tuple representing number of files processed, log of actions, errors
"""
process_errors = []
text = "-" * 80 + "\n"
text += "Processing file %r\n outputting to %r\n" % (in_filename,
out_filename)
text += "-" * 80 + "\n\n"
parsed_ast = None
lines = in_file.readlines()
try:
parsed_ast = ast.parse("".join(lines))
except Exception:
text += "Failed to parse %r\n\n" % in_filename
text += traceback.format_exc()
if parsed_ast:
visitor = TensorFlowCallVisitor(in_filename, lines)
visitor.visit(parsed_ast)
out_text, new_text, process_errors = visitor.process(lines)
text += new_text
if out_file:
out_file.write(out_text)
text += "\n"
return 1, text, process_errors
# pylint: enable=broad-except
def process_tree(self, root_directory, output_root_directory):
"""Processes upgrades on an entire tree of python files in place.
Note that only Python files. If you have custom code in other languages,
you will need to manually upgrade those.
Args:
root_directory: Directory to walk and process.
output_root_directory: Directory to use as base
Returns:
A tuple of files processed, the report string ofr all files, and errors
"""
# make sure output directory doesn't exist
if output_root_directory and os.path.exists(output_root_directory):
print("Output directory %r must not already exist." % (
output_root_directory))
sys.exit(1)
# make sure output directory does not overlap with root_directory
norm_root = os.path.split(os.path.normpath(root_directory))
norm_output = os.path.split(os.path.normpath(output_root_directory))
if norm_root == norm_output:
print("Output directory %r same as input directory %r" % (
root_directory, output_root_directory))
sys.exit(1)
# Collect list of files to process (we do this to correctly handle if the
# user puts the output directory in some sub directory of the input dir)
files_to_process = []
for dir_name, _, file_list in os.walk(root_directory):
py_files = [f for f in file_list if f.endswith(".py")]
for filename in py_files:
fullpath = os.path.join(dir_name, filename)
fullpath_output = os.path.join(
output_root_directory, os.path.relpath(fullpath, root_directory))
files_to_process.append((fullpath, fullpath_output))
file_count = 0
tree_errors = []
report = ""
report += ("=" * 80) + "\n"
report += "Input tree: %r\n" % root_directory
report += ("=" * 80) + "\n"
for input_path, output_path in files_to_process:
output_directory = os.path.dirname(output_path)
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
file_count += 1
_, l_report, l_errors = self.process_file(input_path, output_path)
tree_errors += l_errors
report += l_report
return file_count, report, tree_errors
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Convert a TensorFlow Python file to 1.0
Simple usage:
tf_convert.py --infile foo.py --outfile bar.py
tf_convert.py --intree ~/code/old --outtree ~/code/new
""")
parser.add_argument(
"--infile",
dest="input_file",
help="If converting a single file, the name of the file "
"to convert")
parser.add_argument(
"--outfile",
dest="output_file",
help="If converting a single file, the output filename.")
parser.add_argument(
"--intree",
dest="input_tree",
help="If converting a whole tree of files, the directory "
"to read from (relative or absolute).")
parser.add_argument(
"--outtree",
dest="output_tree",
help="If converting a whole tree of files, the output "
"directory (relative or absolute).")
parser.add_argument(
"--reportfile",
dest="report_filename",
help=("The name of the file where the report log is "
"stored."
"(default: %(default)s)"),
default="report.txt")
args = parser.parse_args()
upgrade = TensorFlowCodeUpgrader()
report_text = None
report_filename = args.report_filename
files_processed = 0
if args.input_file:
files_processed, report_text, errors = upgrade.process_file(
args.input_file, args.output_file)
files_processed = 1
elif args.input_tree:
files_processed, report_text, errors = upgrade.process_tree(
args.input_tree, args.output_tree)
else:
parser.print_help()
if report_text:
open(report_filename, "w").write(report_text)
print("TensorFlow 1.0 Upgrade Script")
print("-----------------------------")
print("Converted %d files\n" % files_processed)
print("Detected %d errors that require attention" % len(errors))
print("-" * 80)
print("\n".join(errors))
print("\nMake sure to read the detailed log %r\n" % report_filename)
|
bcpki/nonce2testblocks
|
refs/heads/master
|
qa/rpc-tests/receivedby.py
|
16
|
#!/usr/bin/env python
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the listreceivedbyaddress API
from test_framework import BitcoinTestFramework
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def get_sub_array_from_array(object_array, to_match):
'''
Finds and returns a sub array from an array of arrays.
to_match should be a unique idetifier of a sub array
'''
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
return item
return []
def check_array_result(object_array, to_match, expected, should_not_find = False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found in object_array
"""
if should_not_find == True:
expected = { }
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0 and should_not_find != True:
raise AssertionError("No objects matched %s"%(str(to_match)))
if num_matched > 0 and should_not_find == True:
raise AssertionError("Objects was matched %s"%(str(to_match)))
class ReceivedByTest(BitcoinTestFramework):
def run_test(self, nodes):
'''
listreceivedbyaddress Test
'''
# Send from node 0 to 1
addr = nodes[1].getnewaddress()
txid = nodes[0].sendtoaddress(addr, 0.1)
sync_mempools(nodes)
#Check not listed in listreceivedbyaddress because has 0 confirmations
check_array_result(nodes[1].listreceivedbyaddress(),
{"address":addr},
{ },
True)
#Bury Tx under 10 block so it will be returned by listreceivedbyaddress
nodes[1].setgenerate(True, 10)
sync_blocks(nodes)
check_array_result(nodes[1].listreceivedbyaddress(),
{"address":addr},
{"address":addr, "account":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]})
#With min confidence < 10
check_array_result(nodes[1].listreceivedbyaddress(5),
{"address":addr},
{"address":addr, "account":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]})
#With min confidence > 10, should not find Tx
check_array_result(nodes[1].listreceivedbyaddress(11),{"address":addr},{ },True)
#Empty Tx
addr = nodes[1].getnewaddress()
check_array_result(nodes[1].listreceivedbyaddress(0,True),
{"address":addr},
{"address":addr, "account":"", "amount":0, "confirmations":0, "txids":[]})
'''
getreceivedbyaddress Test
'''
# Send from node 0 to 1
addr = nodes[1].getnewaddress()
txid = nodes[0].sendtoaddress(addr, 0.1)
sync_mempools(nodes)
#Check balance is 0 because of 0 confirmations
balance = nodes[1].getreceivedbyaddress(addr)
if balance != Decimal("0.0"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
#Check balance is 0.1
balance = nodes[1].getreceivedbyaddress(addr,0)
if balance != Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
#Bury Tx under 10 block so it will be returned by the default getreceivedbyaddress
nodes[1].setgenerate(True, 10)
sync_blocks(nodes)
balance = nodes[1].getreceivedbyaddress(addr)
if balance != Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
'''
listreceivedbyaccount + getreceivedbyaccount Test
'''
#set pre-state
addrArr = nodes[1].getnewaddress()
account = nodes[1].getaccount(addrArr)
received_by_account_json = get_sub_array_from_array(nodes[1].listreceivedbyaccount(),{"account":account})
if len(received_by_account_json) == 0:
raise AssertionError("No accounts found in node")
balance_by_account = rec_by_accountArr = nodes[1].getreceivedbyaccount(account)
txid = nodes[0].sendtoaddress(addr, 0.1)
# listreceivedbyaccount should return received_by_account_json because of 0 confirmations
check_array_result(nodes[1].listreceivedbyaccount(),
{"account":account},
received_by_account_json)
# getreceivedbyaddress should return same balance because of 0 confirmations
balance = nodes[1].getreceivedbyaccount(account)
if balance != balance_by_account:
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
nodes[1].setgenerate(True, 10)
sync_blocks(nodes)
# listreceivedbyaccount should return updated account balance
check_array_result(nodes[1].listreceivedbyaccount(),
{"account":account},
{"account":received_by_account_json["account"], "amount":(received_by_account_json["amount"] + Decimal("0.1"))})
# getreceivedbyaddress should return updates balance
balance = nodes[1].getreceivedbyaccount(account)
if balance != balance_by_account + Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
#Create a new account named "mynewaccount" that has a 0 balance
nodes[1].getaccountaddress("mynewaccount")
received_by_account_json = get_sub_array_from_array(nodes[1].listreceivedbyaccount(0,True),{"account":"mynewaccount"})
if len(received_by_account_json) == 0:
raise AssertionError("No accounts found in node")
# Test includeempty of listreceivedbyaccount
if received_by_account_json["amount"] != Decimal("0.0"):
raise AssertionError("Wrong balance returned by listreceivedbyaccount, %0.2f"%(received_by_account_json["amount"]))
# Test getreceivedbyaccount for 0 amount accounts
balance = nodes[1].getreceivedbyaccount("mynewaccount")
if balance != Decimal("0.0"):
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
if __name__ == '__main__':
ReceivedByTest().main()
|
tgarc/python-sounddevice
|
refs/heads/master
|
sounddevice_build.py
|
1
|
from cffi import FFI
ffibuilder = FFI()
ffibuilder.set_source("_sounddevice", None)
ffibuilder.cdef("""
int Pa_GetVersion( void );
const char* Pa_GetVersionText( void );
typedef int PaError;
typedef enum PaErrorCode
{
paNoError = 0,
paNotInitialized = -10000,
paUnanticipatedHostError,
paInvalidChannelCount,
paInvalidSampleRate,
paInvalidDevice,
paInvalidFlag,
paSampleFormatNotSupported,
paBadIODeviceCombination,
paInsufficientMemory,
paBufferTooBig,
paBufferTooSmall,
paNullCallback,
paBadStreamPtr,
paTimedOut,
paInternalError,
paDeviceUnavailable,
paIncompatibleHostApiSpecificStreamInfo,
paStreamIsStopped,
paStreamIsNotStopped,
paInputOverflowed,
paOutputUnderflowed,
paHostApiNotFound,
paInvalidHostApi,
paCanNotReadFromACallbackStream,
paCanNotWriteToACallbackStream,
paCanNotReadFromAnOutputOnlyStream,
paCanNotWriteToAnInputOnlyStream,
paIncompatibleStreamHostApi,
paBadBufferPtr
} PaErrorCode;
const char *Pa_GetErrorText( PaError errorCode );
PaError Pa_Initialize( void );
PaError Pa_Terminate( void );
typedef int PaDeviceIndex;
#define paNoDevice -1
#define paUseHostApiSpecificDeviceSpecification -2
typedef int PaHostApiIndex;
PaHostApiIndex Pa_GetHostApiCount( void );
PaHostApiIndex Pa_GetDefaultHostApi( void );
typedef enum PaHostApiTypeId
{
paInDevelopment=0,
paDirectSound=1,
paMME=2,
paASIO=3,
paSoundManager=4,
paCoreAudio=5,
paOSS=7,
paALSA=8,
paAL=9,
paBeOS=10,
paWDMKS=11,
paJACK=12,
paWASAPI=13,
paAudioScienceHPI=14
} PaHostApiTypeId;
typedef struct PaHostApiInfo
{
int structVersion;
PaHostApiTypeId type;
const char *name;
int deviceCount;
PaDeviceIndex defaultInputDevice;
PaDeviceIndex defaultOutputDevice;
} PaHostApiInfo;
const PaHostApiInfo * Pa_GetHostApiInfo( PaHostApiIndex hostApi );
PaHostApiIndex Pa_HostApiTypeIdToHostApiIndex( PaHostApiTypeId type );
PaDeviceIndex Pa_HostApiDeviceIndexToDeviceIndex( PaHostApiIndex hostApi,
int hostApiDeviceIndex );
typedef struct PaHostErrorInfo{
PaHostApiTypeId hostApiType;
long errorCode;
const char *errorText;
}PaHostErrorInfo;
const PaHostErrorInfo* Pa_GetLastHostErrorInfo( void );
PaDeviceIndex Pa_GetDeviceCount( void );
PaDeviceIndex Pa_GetDefaultInputDevice( void );
PaDeviceIndex Pa_GetDefaultOutputDevice( void );
typedef double PaTime;
typedef unsigned long PaSampleFormat;
#define paFloat32 0x00000001
#define paInt32 0x00000002
#define paInt24 0x00000004
#define paInt16 0x00000008
#define paInt8 0x00000010
#define paUInt8 0x00000020
#define paCustomFormat 0x00010000
#define paNonInterleaved 0x80000000
typedef struct PaDeviceInfo
{
int structVersion;
const char *name;
PaHostApiIndex hostApi;
int maxInputChannels;
int maxOutputChannels;
PaTime defaultLowInputLatency;
PaTime defaultLowOutputLatency;
PaTime defaultHighInputLatency;
PaTime defaultHighOutputLatency;
double defaultSampleRate;
} PaDeviceInfo;
const PaDeviceInfo* Pa_GetDeviceInfo( PaDeviceIndex device );
typedef struct PaStreamParameters
{
PaDeviceIndex device;
int channelCount;
PaSampleFormat sampleFormat;
PaTime suggestedLatency;
void *hostApiSpecificStreamInfo;
} PaStreamParameters;
#define paFormatIsSupported 0
PaError Pa_IsFormatSupported( const PaStreamParameters *inputParameters,
const PaStreamParameters *outputParameters,
double sampleRate );
typedef void PaStream;
#define paFramesPerBufferUnspecified 0
typedef unsigned long PaStreamFlags;
#define paNoFlag 0
#define paClipOff 0x00000001
#define paDitherOff 0x00000002
#define paNeverDropInput 0x00000004
#define paPrimeOutputBuffersUsingStreamCallback 0x00000008
#define paPlatformSpecificFlags 0xFFFF0000
typedef struct PaStreamCallbackTimeInfo{
PaTime inputBufferAdcTime;
PaTime currentTime;
PaTime outputBufferDacTime;
} PaStreamCallbackTimeInfo;
typedef unsigned long PaStreamCallbackFlags;
#define paInputUnderflow 0x00000001
#define paInputOverflow 0x00000002
#define paOutputUnderflow 0x00000004
#define paOutputOverflow 0x00000008
#define paPrimingOutput 0x00000010
typedef enum PaStreamCallbackResult
{
paContinue=0,
paComplete=1,
paAbort=2
} PaStreamCallbackResult;
typedef int PaStreamCallback(
const void *input, void *output,
unsigned long frameCount,
const PaStreamCallbackTimeInfo* timeInfo,
PaStreamCallbackFlags statusFlags,
void *userData );
PaError Pa_OpenStream( PaStream** stream,
const PaStreamParameters *inputParameters,
const PaStreamParameters *outputParameters,
double sampleRate,
unsigned long framesPerBuffer,
PaStreamFlags streamFlags,
PaStreamCallback *streamCallback,
void *userData );
PaError Pa_OpenDefaultStream( PaStream** stream,
int numInputChannels,
int numOutputChannels,
PaSampleFormat sampleFormat,
double sampleRate,
unsigned long framesPerBuffer,
PaStreamCallback *streamCallback,
void *userData );
PaError Pa_CloseStream( PaStream *stream );
typedef void PaStreamFinishedCallback( void *userData );
PaError Pa_SetStreamFinishedCallback( PaStream *stream,
PaStreamFinishedCallback* streamFinishedCallback );
PaError Pa_StartStream( PaStream *stream );
PaError Pa_StopStream( PaStream *stream );
PaError Pa_AbortStream( PaStream *stream );
PaError Pa_IsStreamStopped( PaStream *stream );
PaError Pa_IsStreamActive( PaStream *stream );
typedef struct PaStreamInfo
{
int structVersion;
PaTime inputLatency;
PaTime outputLatency;
double sampleRate;
} PaStreamInfo;
const PaStreamInfo* Pa_GetStreamInfo( PaStream *stream );
PaTime Pa_GetStreamTime( PaStream *stream );
double Pa_GetStreamCpuLoad( PaStream* stream );
PaError Pa_ReadStream( PaStream* stream,
void *buffer,
unsigned long frames );
PaError Pa_WriteStream( PaStream* stream,
const void *buffer,
unsigned long frames );
signed long Pa_GetStreamReadAvailable( PaStream* stream );
signed long Pa_GetStreamWriteAvailable( PaStream* stream );
PaHostApiTypeId Pa_GetStreamHostApiType( PaStream* stream );
PaError Pa_GetSampleSize( PaSampleFormat format );
void Pa_Sleep( long msec );
/* pa_mac_core.h */
typedef int32_t SInt32;
typedef struct
{
unsigned long size;
PaHostApiTypeId hostApiType;
unsigned long version;
unsigned long flags;
SInt32 const * channelMap;
unsigned long channelMapSize;
} PaMacCoreStreamInfo;
void PaMacCore_SetupStreamInfo( PaMacCoreStreamInfo *data, unsigned long flags );
void PaMacCore_SetupChannelMap( PaMacCoreStreamInfo *data, const SInt32 * const channelMap, unsigned long channelMapSize );
const char *PaMacCore_GetChannelName( int device, int channelIndex, bool input );
#define paMacCoreChangeDeviceParameters 0x01
#define paMacCoreFailIfConversionRequired 0x02
#define paMacCoreConversionQualityMin 0x0100
#define paMacCoreConversionQualityMedium 0x0200
#define paMacCoreConversionQualityLow 0x0300
#define paMacCoreConversionQualityHigh 0x0400
#define paMacCoreConversionQualityMax 0x0000
#define paMacCorePlayNice 0x00
#define paMacCorePro 0x01
#define paMacCoreMinimizeCPUButPlayNice 0x0100
#define paMacCoreMinimizeCPU 0x0101
/* pa_win_waveformat.h */
typedef unsigned long PaWinWaveFormatChannelMask;
/* pa_asio.h */
#define paAsioUseChannelSelectors 0x01
typedef struct PaAsioStreamInfo
{
unsigned long size;
PaHostApiTypeId hostApiType;
unsigned long version;
unsigned long flags;
int *channelSelectors;
} PaAsioStreamInfo;
/* pa_win_wasapi.h */
typedef enum PaWasapiFlags
{
paWinWasapiExclusive = 1,
paWinWasapiRedirectHostProcessor = 2,
paWinWasapiUseChannelMask = 4,
paWinWasapiPolling = 8,
paWinWasapiThreadPriority = 16
} PaWasapiFlags;
typedef void (*PaWasapiHostProcessorCallback) (
void *inputBuffer, long inputFrames,
void *outputBuffer, long outputFrames, void *userData);
typedef enum PaWasapiThreadPriority
{
eThreadPriorityNone = 0,
eThreadPriorityAudio,
eThreadPriorityCapture,
eThreadPriorityDistribution,
eThreadPriorityGames,
eThreadPriorityPlayback,
eThreadPriorityProAudio,
eThreadPriorityWindowManager
} PaWasapiThreadPriority;
typedef enum PaWasapiStreamCategory
{
eAudioCategoryOther = 0,
eAudioCategoryCommunications = 3,
eAudioCategoryAlerts = 4,
eAudioCategorySoundEffects = 5,
eAudioCategoryGameEffects = 6,
eAudioCategoryGameMedia = 7,
eAudioCategoryGameChat = 8,
eAudioCategorySpeech = 9,
eAudioCategoryMovie = 10,
eAudioCategoryMedia = 11
} PaWasapiStreamCategory;
typedef enum PaWasapiStreamOption
{
eStreamOptionNone = 0,
eStreamOptionRaw = 1,
eStreamOptionMatchFormat = 2
} PaWasapiStreamOption;
typedef struct PaWasapiStreamInfo
{
unsigned long size;
PaHostApiTypeId hostApiType;
unsigned long version;
unsigned long flags;
PaWinWaveFormatChannelMask channelMask;
PaWasapiHostProcessorCallback hostProcessorOutput;
PaWasapiHostProcessorCallback hostProcessorInput;
PaWasapiThreadPriority threadPriority;
PaWasapiStreamCategory streamCategory;
PaWasapiStreamOption streamOption;
} PaWasapiStreamInfo;
""")
ffibuilder.cdef("""
/* from stdio.h */
FILE* fopen(const char* path, const char* mode);
int fclose(FILE* fp);
FILE* stderr; /* GNU C library */
FILE* __stderrp; /* macOS */
""")
if __name__ == "__main__":
ffibuilder.compile(verbose=True)
|
blaze/distributed
|
refs/heads/master
|
distributed/http/worker/__init__.py
|
12133432
| |
idegtiarov/ceilometer
|
refs/heads/master
|
ceilometer/compute/virt/__init__.py
|
12133432
| |
peritus/robotframework-selenium2library
|
refs/heads/master
|
test/unit/keywords/__init__.py
|
12133432
| |
tomduijf/netdisco
|
refs/heads/master
|
netdisco/discoverables/netgear_router.py
|
2
|
""" Discovers Netgear routers. """
from . import SSDPDiscoverable
try:
from urlparse import urlparse # Py2
except ImportError:
from urllib.parse import urlparse # Py3
class Discoverable(SSDPDiscoverable):
""" Adds support for discovering Philips Hue bridges. """
def info_from_entry(self, entry):
""" Returns the most important info from a uPnP entry. """
url = urlparse(entry.values['location'])
return (entry.description['device']['modelNumber'], url.hostname)
def get_entries(self):
""" Get all the Hue bridge uPnP entries. """
return self.find_by_device_description({
"manufacturer": "NETGEAR, Inc.",
"deviceType": "urn:schemas-upnp-org:device:InternetGatewayDevice:1"
})
|
emilliman5/Bpipes
|
refs/heads/master
|
unique_read_filter.py
|
1
|
#!/usr/bin/env python
import csv
import sys
import re
f = csv.reader(sys.stdin, dialect="excel-tab")
of = csv.writer(sys.stdout, dialect="excel-tab")
last_read = None
XS=0
for line in f:
#take care of the header
if(line[0][0] == "@"):
of.writerow(line)
continue
if(last_read == None):
last_read = line
if(re.split(':',line[12])[0] == "XS" and int(re.split(':',line[12])[2]) == int(re.split(':',line[12])[2])):
XS=1
else :
if(last_read[0] == line[0]):
if(XS==1 and re.split(':',line[12])[0] =="XS" and int(re.split(':',line[12])[2]) == int(re.split(':',line[12])[2])):
last_read = None
XS=0
else:
of.writerow(last_read)
of.writerow(line)
XS=0
last_read=None
else :
last_read = line
XS=0
if(re.split(':',line[13])[0] == "XS" and int(re.split(':',line[12])[2]) == int(re.split(':',line[13])[2])):
XS=1
|
aldebaran/openni2
|
refs/heads/master
|
Packaging/ReleaseVersion.py
|
32
|
#!/usr/bin/python
#/****************************************************************************
#* *
#* OpenNI 2.x Alpha *
#* Copyright (C) 2012 PrimeSense Ltd. *
#* *
#* This file is part of OpenNI. *
#* *
#* Licensed under the Apache License, Version 2.0 (the "License"); *
#* you may not use this file except in compliance with the License. *
#* You may obtain a copy of the License at *
#* *
#* http://www.apache.org/licenses/LICENSE-2.0 *
#* *
#* Unless required by applicable law or agreed to in writing, software *
#* distributed under the License is distributed on an "AS IS" BASIS, *
#* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
#* See the License for the specific language governing permissions and *
#* limitations under the License. *
#* *
#****************************************************************************/
import os
import re
import sys
import shutil
import subprocess
import platform
import argparse
import stat
import UpdateVersion
if len(sys.argv) < 2 or sys.argv[1] in ('-h','--help'):
print "usage: " + sys.argv[0] + " <x86|x64|Arm|android> [UpdateVersion]"
sys.exit(1)
plat = sys.argv[1]
origDir = os.getcwd()
shouldUpdate = 0
if len(sys.argv) >= 3 and sys.argv[2] == 'UpdateVersion':
shouldUpdate = 1
if shouldUpdate == 1:
# Increase Build
UpdateVersion.VERSION_BUILD += 1
UpdateVersion.update()
def get_reg_values(reg_key, value_list):
# open the reg key
try:
reg_key = win32api.RegOpenKeyEx(*reg_key)
except pywintypes.error as e:
raise Exception("Failed to open registry key!")
# Get the values
try:
values = [(win32api.RegQueryValueEx(reg_key, name), data_type) for name, data_type in value_list]
# values list of ((value, type), expected_type)
for (value, data_type), expected in values:
if data_type != expected:
raise Exception("Bad registry value type! Expected %d, got %d instead." % (expected, data_type))
# values okay, leave only values
values = [value for ((value, data_type), expected) in values]
except pywintypes.error as e:
raise Exception("Failed to get registry value!")
finally:
try:
win32api.RegCloseKey(reg_key)
except pywintypes.error as e:
# We don't care if reg key close failed...
pass
return tuple(values)
def calc_jobs_number():
cores = 1
try:
if isinstance(self, OSMac):
txt = gop('sysctl -n hw.physicalcpu')
else:
txt = gop('grep "processor\W:" /proc/cpuinfo | wc -l')
cores = int(txt)
except:
pass
return str(cores * 2)
# Create installer
strVersion = UpdateVersion.getVersionName()
print "Creating installer for OpenNI " + strVersion + " " + plat
finalDir = "Final"
if not os.path.isdir(finalDir):
os.mkdir(finalDir)
if plat == 'android':
if not 'NDK_ROOT' in os.environ:
print 'Please define NDK_ROOT!'
sys.exit(2)
ndkDir = os.environ['NDK_ROOT']
buildDir = 'AndroidBuild'
if os.path.isdir(buildDir):
shutil.rmtree(buildDir)
outputDir = 'OpenNI-android-' + strVersion
if os.path.isdir(outputDir):
shutil.rmtree(outputDir)
os.makedirs(buildDir + '/jni')
os.symlink('../../../', buildDir + '/jni/OpenNI2')
shutil.copy('../Android.mk', buildDir + '/jni')
shutil.copy('../Application.mk', buildDir + '/jni')
rc = subprocess.call([ ndkDir + '/ndk-build', '-C', buildDir, '-j8' ])
if rc != 0:
print 'Build failed!'
sys.exit(3)
finalFile = finalDir + '/' + outputDir + '.tar'
shutil.move(buildDir + '/libs/armeabi-v7a', outputDir)
# add config files
shutil.copy('../Config/OpenNI.ini', outputDir)
shutil.copy('../Config/OpenNI2/Drivers/PS1080.ini', outputDir)
print('Creating archive ' + finalFile)
subprocess.check_call(['tar', '-cf', finalFile, outputDir])
elif platform.system() == 'Windows':
import win32con,pywintypes,win32api,platform
(bits,linkage) = platform.architecture()
matchObject = re.search('64',bits)
is_64_bit_machine = matchObject is not None
if is_64_bit_machine:
MSVC_KEY = (win32con.HKEY_LOCAL_MACHINE, r"SOFTWARE\Wow6432Node\Microsoft\VisualStudio\10.0")
else:
MSVC_KEY = (win32con.HKEY_LOCAL_MACHINE, r"SOFTWARE\Microsoft\VisualStudio\10.0")
MSVC_VALUES = [("InstallDir", win32con.REG_SZ)]
VS_INST_DIR = get_reg_values(MSVC_KEY, MSVC_VALUES)[0]
PROJECT_SLN = "..\OpenNI.sln"
bulidLog = origDir+'/build.Release.'+plat+'.txt'
devenv_cmd = '\"'+VS_INST_DIR + 'devenv\" '+PROJECT_SLN + ' /Project Install /Rebuild "Release|'+plat+'\" /out '+bulidLog
print(devenv_cmd)
subprocess.check_call(devenv_cmd, close_fds=True)
# everything OK, can remove build log
os.remove(bulidLog)
outFile = 'OpenNI-Windows-' + plat + '-' + strVersion + '.msi'
finalFile = os.path.join(finalDir, outFile)
if os.path.exists(finalFile):
os.remove(finalFile)
shutil.move('Install/bin/' + plat + '/en-us/' + outFile, finalDir)
elif platform.system() == 'Linux' or platform.system() == 'Darwin':
devNull = open('/dev/null', 'w')
subprocess.check_call(['make', '-C', '../', '-j' + calc_jobs_number(), 'PLATFORM=' + plat, 'clean'], stdout=devNull, stderr=devNull)
devNull.close()
buildLog = open(origDir + '/build.release.' + plat + '.log', 'w')
subprocess.check_call(['make', '-C', '../', '-j' + calc_jobs_number(), 'PLATFORM=' + plat, 'release'], stdout=buildLog, stderr=buildLog)
buildLog.close()
# everything OK, can remove build log
os.remove(origDir + '/build.release.' + plat + '.log')
else:
print "Unknown OS"
sys.exit(2)
# also copy Release Notes and CHANGES documents
shutil.copy('../ReleaseNotes.txt', finalDir)
shutil.copy('../CHANGES.txt', finalDir)
print "Installer can be found under: " + finalDir
print "Done"
|
KhalidGit/flask
|
refs/heads/master
|
Work/TriviaMVA/TriviaMVA/env/Lib/site-packages/pip/_vendor/distlib/util.py
|
190
|
#
# Copyright (C) 2012-2013 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import codecs
from collections import deque
import contextlib
import csv
from glob import iglob as std_iglob
import io
import json
import logging
import os
import py_compile
import re
import shutil
import socket
import ssl
import subprocess
import sys
import tarfile
import tempfile
try:
import threading
except ImportError:
import dummy_threading as threading
import time
from . import DistlibException
from .compat import (string_types, text_type, shutil, raw_input, StringIO,
cache_from_source, urlopen, httplib, xmlrpclib, splittype,
HTTPHandler, HTTPSHandler as BaseHTTPSHandler,
BaseConfigurator, valid_ident, Container, configparser,
URLError, match_hostname, CertificateError, ZipFile)
logger = logging.getLogger(__name__)
#
# Requirement parsing code for name + optional constraints + optional extras
#
# e.g. 'foo >= 1.2, < 2.0 [bar, baz]'
#
# The regex can seem a bit hairy, so we build it up out of smaller pieces
# which are manageable.
#
COMMA = r'\s*,\s*'
COMMA_RE = re.compile(COMMA)
IDENT = r'(\w|[.-])+'
EXTRA_IDENT = r'(\*|:(\*|\w+):|' + IDENT + ')'
VERSPEC = IDENT + r'\*?'
RELOP = '([<>=!~]=)|[<>]'
#
# The first relop is optional - if absent, will be taken as '~='
#
BARE_CONSTRAINTS = ('(' + RELOP + r')?\s*(' + VERSPEC + ')(' + COMMA + '(' +
RELOP + r')\s*(' + VERSPEC + '))*')
DIRECT_REF = '(from\s+(?P<diref>.*))'
#
# Either the bare constraints or the bare constraints in parentheses
#
CONSTRAINTS = (r'\(\s*(?P<c1>' + BARE_CONSTRAINTS + '|' + DIRECT_REF +
r')\s*\)|(?P<c2>' + BARE_CONSTRAINTS + '\s*)')
EXTRA_LIST = EXTRA_IDENT + '(' + COMMA + EXTRA_IDENT + ')*'
EXTRAS = r'\[\s*(?P<ex>' + EXTRA_LIST + r')?\s*\]'
REQUIREMENT = ('(?P<dn>' + IDENT + r')\s*(' + EXTRAS + r'\s*)?(\s*' +
CONSTRAINTS + ')?$')
REQUIREMENT_RE = re.compile(REQUIREMENT)
#
# Used to scan through the constraints
#
RELOP_IDENT = '(?P<op>' + RELOP + r')\s*(?P<vn>' + VERSPEC + ')'
RELOP_IDENT_RE = re.compile(RELOP_IDENT)
def parse_requirement(s):
def get_constraint(m):
d = m.groupdict()
return d['op'], d['vn']
result = None
m = REQUIREMENT_RE.match(s)
if m:
d = m.groupdict()
name = d['dn']
cons = d['c1'] or d['c2']
if not d['diref']:
url = None
else:
# direct reference
cons = None
url = d['diref'].strip()
if not cons:
cons = None
constr = ''
rs = d['dn']
else:
if cons[0] not in '<>!=':
cons = '~=' + cons
iterator = RELOP_IDENT_RE.finditer(cons)
cons = [get_constraint(m) for m in iterator]
rs = '%s (%s)' % (name, ', '.join(['%s %s' % con for con in cons]))
if not d['ex']:
extras = None
else:
extras = COMMA_RE.split(d['ex'])
result = Container(name=name, constraints=cons, extras=extras,
requirement=rs, source=s, url=url)
return result
def get_resources_dests(resources_root, rules):
"""Find destinations for resources files"""
def get_rel_path(base, path):
# normalizes and returns a lstripped-/-separated path
base = base.replace(os.path.sep, '/')
path = path.replace(os.path.sep, '/')
assert path.startswith(base)
return path[len(base):].lstrip('/')
destinations = {}
for base, suffix, dest in rules:
prefix = os.path.join(resources_root, base)
for abs_base in iglob(prefix):
abs_glob = os.path.join(abs_base, suffix)
for abs_path in iglob(abs_glob):
resource_file = get_rel_path(resources_root, abs_path)
if dest is None: # remove the entry if it was here
destinations.pop(resource_file, None)
else:
rel_path = get_rel_path(abs_base, abs_path)
rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
destinations[resource_file] = rel_dest + '/' + rel_path
return destinations
def in_venv():
if hasattr(sys, 'real_prefix'):
# virtualenv venvs
result = True
else:
# PEP 405 venvs
result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
return result
def get_executable():
if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__'
in os.environ):
result = os.environ['__PYVENV_LAUNCHER__']
else:
result = sys.executable
return result
def proceed(prompt, allowed_chars, error_prompt=None, default=None):
p = prompt
while True:
s = raw_input(p)
p = prompt
if not s and default:
s = default
if s:
c = s[0].lower()
if c in allowed_chars:
break
if error_prompt:
p = '%c: %s\n%s' % (c, error_prompt, prompt)
return c
def extract_by_key(d, keys):
if isinstance(keys, string_types):
keys = keys.split()
result = {}
for key in keys:
if key in d:
result[key] = d[key]
return result
def read_exports(stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
# Try to load as JSON, falling back on legacy format
data = stream.read()
stream = StringIO(data)
try:
data = json.load(stream)
result = data['exports']
for group, entries in result.items():
for k, v in entries.items():
s = '%s = %s' % (k, v)
entry = get_export_entry(s)
assert entry is not None
entries[k] = entry
return result
except Exception:
stream.seek(0, 0)
cp = configparser.ConfigParser()
if hasattr(cp, 'read_file'):
cp.read_file(stream)
else:
cp.readfp(stream)
result = {}
for key in cp.sections():
result[key] = entries = {}
for name, value in cp.items(key):
s = '%s = %s' % (name, value)
entry = get_export_entry(s)
assert entry is not None
#entry.dist = self
entries[name] = entry
return result
def write_exports(exports, stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getwriter('utf-8')(stream)
cp = configparser.ConfigParser()
for k, v in exports.items():
# TODO check k, v for valid values
cp.add_section(k)
for entry in v.values():
if entry.suffix is None:
s = entry.prefix
else:
s = '%s:%s' % (entry.prefix, entry.suffix)
if entry.flags:
s = '%s [%s]' % (s, ', '.join(entry.flags))
cp.set(k, entry.name, s)
cp.write(stream)
@contextlib.contextmanager
def tempdir():
td = tempfile.mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
@contextlib.contextmanager
def chdir(d):
cwd = os.getcwd()
try:
os.chdir(d)
yield
finally:
os.chdir(cwd)
@contextlib.contextmanager
def socket_timeout(seconds=15):
cto = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(seconds)
yield
finally:
socket.setdefaulttimeout(cto)
class cached_property(object):
def __init__(self, func):
self.func = func
#for attr in ('__name__', '__module__', '__doc__'):
# setattr(self, attr, getattr(func, attr, None))
def __get__(self, obj, cls=None):
if obj is None:
return self
value = self.func(obj)
object.__setattr__(obj, self.func.__name__, value)
#obj.__dict__[self.func.__name__] = value = self.func(obj)
return value
def convert_path(pathname):
"""Return 'pathname' as a name that will work on the native filesystem.
The path is split on '/' and put back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError("path '%s' cannot be absolute" % pathname)
if pathname[-1] == '/':
raise ValueError("path '%s' cannot end with '/'" % pathname)
paths = pathname.split('/')
while os.curdir in paths:
paths.remove(os.curdir)
if not paths:
return os.curdir
return os.path.join(*paths)
class FileOperator(object):
def __init__(self, dry_run=False):
self.dry_run = dry_run
self.ensured = set()
self._init_record()
def _init_record(self):
self.record = False
self.files_written = set()
self.dirs_created = set()
def record_as_written(self, path):
if self.record:
self.files_written.add(path)
def newer(self, source, target):
"""Tell if the target is newer than the source.
Returns true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't.
Returns false if both exist and 'target' is the same age or younger
than 'source'. Raise PackagingFileError if 'source' does not exist.
Note that this test is not very accurate: files created in the same
second will have the same "age".
"""
if not os.path.exists(source):
raise DistlibException("file '%r' does not exist" %
os.path.abspath(source))
if not os.path.exists(target):
return True
return os.stat(source).st_mtime > os.stat(target).st_mtime
def copy_file(self, infile, outfile, check=True):
"""Copy a file respecting dry-run and force flags.
"""
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying %s to %s', infile, outfile)
if not self.dry_run:
msg = None
if check:
if os.path.islink(outfile):
msg = '%s is a symlink' % outfile
elif os.path.exists(outfile) and not os.path.isfile(outfile):
msg = '%s is a non-regular file' % outfile
if msg:
raise ValueError(msg + ' which would be overwritten')
shutil.copyfile(infile, outfile)
self.record_as_written(outfile)
def copy_stream(self, instream, outfile, encoding=None):
assert not os.path.isdir(outfile)
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying stream %s to %s', instream, outfile)
if not self.dry_run:
if encoding is None:
outstream = open(outfile, 'wb')
else:
outstream = codecs.open(outfile, 'w', encoding=encoding)
try:
shutil.copyfileobj(instream, outstream)
finally:
outstream.close()
self.record_as_written(outfile)
def write_binary_file(self, path, data):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data)
self.record_as_written(path)
def write_text_file(self, path, data, encoding):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data.encode(encoding))
self.record_as_written(path)
def set_mode(self, bits, mask, files):
if os.name == 'posix':
# Set the executable bits (owner, group, and world) on
# all the files specified.
for f in files:
if self.dry_run:
logger.info("changing mode of %s", f)
else:
mode = (os.stat(f).st_mode | bits) & mask
logger.info("changing mode of %s to %o", f, mode)
os.chmod(f, mode)
set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
def ensure_dir(self, path):
path = os.path.abspath(path)
if path not in self.ensured and not os.path.exists(path):
self.ensured.add(path)
d, f = os.path.split(path)
self.ensure_dir(d)
logger.info('Creating %s' % path)
if not self.dry_run:
os.mkdir(path)
if self.record:
self.dirs_created.add(path)
def byte_compile(self, path, optimize=False, force=False, prefix=None):
dpath = cache_from_source(path, not optimize)
logger.info('Byte-compiling %s to %s', path, dpath)
if not self.dry_run:
if force or self.newer(path, dpath):
if not prefix:
diagpath = None
else:
assert path.startswith(prefix)
diagpath = path[len(prefix):]
py_compile.compile(path, dpath, diagpath, True) # raise error
self.record_as_written(dpath)
return dpath
def ensure_removed(self, path):
if os.path.exists(path):
if os.path.isdir(path) and not os.path.islink(path):
logger.debug('Removing directory tree at %s', path)
if not self.dry_run:
shutil.rmtree(path)
if self.record:
if path in self.dirs_created:
self.dirs_created.remove(path)
else:
if os.path.islink(path):
s = 'link'
else:
s = 'file'
logger.debug('Removing %s %s', s, path)
if not self.dry_run:
os.remove(path)
if self.record:
if path in self.files_written:
self.files_written.remove(path)
def is_writable(self, path):
result = False
while not result:
if os.path.exists(path):
result = os.access(path, os.W_OK)
break
parent = os.path.dirname(path)
if parent == path:
break
path = parent
return result
def commit(self):
"""
Commit recorded changes, turn off recording, return
changes.
"""
assert self.record
result = self.files_written, self.dirs_created
self._init_record()
return result
def rollback(self):
if not self.dry_run:
for f in list(self.files_written):
if os.path.exists(f):
os.remove(f)
# dirs should all be empty now, except perhaps for
# __pycache__ subdirs
# reverse so that subdirs appear before their parents
dirs = sorted(self.dirs_created, reverse=True)
for d in dirs:
flist = os.listdir(d)
if flist:
assert flist == ['__pycache__']
sd = os.path.join(d, flist[0])
os.rmdir(sd)
os.rmdir(d) # should fail if non-empty
self._init_record()
def resolve(module_name, dotted_path):
if module_name in sys.modules:
mod = sys.modules[module_name]
else:
mod = __import__(module_name)
if dotted_path is None:
result = mod
else:
parts = dotted_path.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
class ExportEntry(object):
def __init__(self, name, prefix, suffix, flags):
self.name = name
self.prefix = prefix
self.suffix = suffix
self.flags = flags
@cached_property
def value(self):
return resolve(self.prefix, self.suffix)
def __repr__(self):
return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix,
self.suffix, self.flags)
def __eq__(self, other):
if not isinstance(other, ExportEntry):
result = False
else:
result = (self.name == other.name and
self.prefix == other.prefix and
self.suffix == other.suffix and
self.flags == other.flags)
return result
__hash__ = object.__hash__
ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.])+)
\s*=\s*(?P<callable>(\w+)([:\.]\w+)*)
\s*(\[\s*(?P<flags>\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
''', re.VERBOSE)
def get_export_entry(specification):
m = ENTRY_RE.search(specification)
if not m:
result = None
if '[' in specification or ']' in specification:
raise DistlibException('Invalid specification '
'%r' % specification)
else:
d = m.groupdict()
name = d['name']
path = d['callable']
colons = path.count(':')
if colons == 0:
prefix, suffix = path, None
else:
if colons != 1:
raise DistlibException('Invalid specification '
'%r' % specification)
prefix, suffix = path.split(':')
flags = d['flags']
if flags is None:
if '[' in specification or ']' in specification:
raise DistlibException('Invalid specification '
'%r' % specification)
flags = []
else:
flags = [f.strip() for f in flags.split(',')]
result = ExportEntry(name, prefix, suffix, flags)
return result
def get_cache_base(suffix=None):
"""
Return the default base location for distlib caches. If the directory does
not exist, it is created. Use the suffix provided for the base directory,
and default to '.distlib' if it isn't provided.
On Windows, if LOCALAPPDATA is defined in the environment, then it is
assumed to be a directory, and will be the parent directory of the result.
On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
directory - using os.expanduser('~') - will be the parent directory of
the result.
The result is just the directory '.distlib' in the parent directory as
determined above, or with the name specified with ``suffix``.
"""
if suffix is None:
suffix = '.distlib'
if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:
result = os.path.expandvars('$localappdata')
else:
# Assume posix, or old Windows
result = os.path.expanduser('~')
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if os.path.isdir(result):
usable = os.access(result, os.W_OK)
if not usable:
logger.warning('Directory exists but is not writable: %s', result)
else:
try:
os.makedirs(result)
usable = True
except OSError:
logger.warning('Unable to create %s', result, exc_info=True)
usable = False
if not usable:
result = tempfile.mkdtemp()
logger.warning('Default location unusable, using %s', result)
return os.path.join(result, suffix)
def path_to_cache_dir(path):
"""
Convert an absolute path to a directory name for use in a cache.
The algorithm used is:
#. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
#. Any occurrence of ``os.sep`` is replaced with ``'--'``.
#. ``'.cache'`` is appended.
"""
d, p = os.path.splitdrive(os.path.abspath(path))
if d:
d = d.replace(':', '---')
p = p.replace(os.sep, '--')
return d + p + '.cache'
def ensure_slash(s):
if not s.endswith('/'):
return s + '/'
return s
def parse_credentials(netloc):
username = password = None
if '@' in netloc:
prefix, netloc = netloc.split('@', 1)
if ':' not in prefix:
username = prefix
else:
username, password = prefix.split(':', 1)
return username, password, netloc
def get_process_umask():
result = os.umask(0o22)
os.umask(result)
return result
def is_string_sequence(seq):
result = True
i = None
for i, s in enumerate(seq):
if not isinstance(s, string_types):
result = False
break
assert i is not None
return result
PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-'
'([a-z0-9_.+-]+)', re.I)
PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)')
def split_filename(filename, project_name=None):
"""
Extract name, version, python version from a filename (no extension)
Return name, version, pyver or None
"""
result = None
pyver = None
m = PYTHON_VERSION.search(filename)
if m:
pyver = m.group(1)
filename = filename[:m.start()]
if project_name and len(filename) > len(project_name) + 1:
m = re.match(re.escape(project_name) + r'\b', filename)
if m:
n = m.end()
result = filename[:n], filename[n + 1:], pyver
if result is None:
m = PROJECT_NAME_AND_VERSION.match(filename)
if m:
result = m.group(1), m.group(3), pyver
return result
# Allow spaces in name because of legacy dists like "Twisted Core"
NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*'
r'\(\s*(?P<ver>[^\s)]+)\)$')
def parse_name_and_version(p):
"""
A utility method used to get name and version from a string.
From e.g. a Provides-Dist value.
:param p: A value in a form 'foo (1.0)'
:return: The name and version as a tuple.
"""
m = NAME_VERSION_RE.match(p)
if not m:
raise DistlibException('Ill-formed name/version string: \'%s\'' % p)
d = m.groupdict()
return d['name'].strip().lower(), d['ver']
def get_extras(requested, available):
result = set()
requested = set(requested or [])
available = set(available or [])
if '*' in requested:
requested.remove('*')
result |= available
for r in requested:
if r == '-':
result.add(r)
elif r.startswith('-'):
unwanted = r[1:]
if unwanted not in available:
logger.warning('undeclared extra: %s' % unwanted)
if unwanted in result:
result.remove(unwanted)
else:
if r not in available:
logger.warning('undeclared extra: %s' % r)
result.add(r)
return result
#
# Extended metadata functionality
#
def _get_external_data(url):
result = {}
try:
# urlopen might fail if it runs into redirections,
# because of Python issue #13696. Fixed in locators
# using a custom redirect handler.
resp = urlopen(url)
headers = resp.info()
if headers.get('Content-Type') != 'application/json':
logger.debug('Unexpected response for JSON request')
else:
reader = codecs.getreader('utf-8')(resp)
#data = reader.read().decode('utf-8')
#result = json.loads(data)
result = json.load(reader)
except Exception as e:
logger.exception('Failed to get external data for %s: %s', url, e)
return result
def get_project_data(name):
url = ('https://www.red-dove.com/pypi/projects/'
'%s/%s/project.json' % (name[0].upper(), name))
result = _get_external_data(url)
return result
def get_package_data(name, version):
url = ('https://www.red-dove.com/pypi/projects/'
'%s/%s/package-%s.json' % (name[0].upper(), name, version))
return _get_external_data(url)
class Cache(object):
"""
A class implementing a cache for resources that need to live in the file system
e.g. shared libraries. This class was moved from resources to here because it
could be used by other modules, e.g. the wheel module.
"""
def __init__(self, base):
"""
Initialise an instance.
:param base: The base directory where the cache should be located.
"""
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if not os.path.isdir(base):
os.makedirs(base)
if (os.stat(base).st_mode & 0o77) != 0:
logger.warning('Directory \'%s\' is not private', base)
self.base = os.path.abspath(os.path.normpath(base))
def prefix_to_dir(self, prefix):
"""
Converts a resource prefix to a directory name in the cache.
"""
return path_to_cache_dir(prefix)
def clear(self):
"""
Clear the cache.
"""
not_removed = []
for fn in os.listdir(self.base):
fn = os.path.join(self.base, fn)
try:
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
except Exception:
not_removed.append(fn)
return not_removed
class EventMixin(object):
"""
A very simple publish/subscribe system.
"""
def __init__(self):
self._subscribers = {}
def add(self, event, subscriber, append=True):
"""
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
"""
subs = self._subscribers
if event not in subs:
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber)
def remove(self, event, subscriber):
"""
Remove a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be removed.
"""
subs = self._subscribers
if event not in subs:
raise ValueError('No subscribers: %r' % event)
subs[event].remove(subscriber)
def get_subscribers(self, event):
"""
Return an iterator for the subscribers for an event.
:param event: The event to return subscribers for.
"""
return iter(self._subscribers.get(event, ()))
def publish(self, event, *args, **kwargs):
"""
Publish a event and return a list of values returned by its
subscribers.
:param event: The event to publish.
:param args: The positional arguments to pass to the event's
subscribers.
:param kwargs: The keyword arguments to pass to the event's
subscribers.
"""
result = []
for subscriber in self.get_subscribers(event):
try:
value = subscriber(event, *args, **kwargs)
except Exception:
logger.exception('Exception during event publication')
value = None
result.append(value)
logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
event, args, kwargs, result)
return result
#
# Simple sequencing
#
class Sequencer(object):
def __init__(self):
self._preds = {}
self._succs = {}
self._nodes = set() # nodes with no preds/succs
def add_node(self, node):
self._nodes.add(node)
def remove_node(self, node, edges=False):
if node in self._nodes:
self._nodes.remove(node)
if edges:
for p in set(self._preds.get(node, ())):
self.remove(p, node)
for s in set(self._succs.get(node, ())):
self.remove(node, s)
# Remove empties
for k, v in list(self._preds.items()):
if not v:
del self._preds[k]
for k, v in list(self._succs.items()):
if not v:
del self._succs[k]
def add(self, pred, succ):
assert pred != succ
self._preds.setdefault(succ, set()).add(pred)
self._succs.setdefault(pred, set()).add(succ)
def remove(self, pred, succ):
assert pred != succ
try:
preds = self._preds[succ]
succs = self._succs[pred]
except KeyError:
raise ValueError('%r not a successor of anything' % succ)
try:
preds.remove(pred)
succs.remove(succ)
except KeyError:
raise ValueError('%r not a successor of %r' % (succ, pred))
def is_step(self, step):
return (step in self._preds or step in self._succs or
step in self._nodes)
def get_steps(self, final):
if not self.is_step(final):
raise ValueError('Unknown: %r' % final)
result = []
todo = []
seen = set()
todo.append(final)
while todo:
step = todo.pop(0)
if step in seen:
# if a step was already seen,
# move it to the end (so it will appear earlier
# when reversed on return) ... but not for the
# final step, as that would be confusing for
# users
if step != final:
result.remove(step)
result.append(step)
else:
seen.add(step)
result.append(step)
preds = self._preds.get(step, ())
todo.extend(preds)
return reversed(result)
@property
def strong_connections(self):
#http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
index_counter = [0]
stack = []
lowlinks = {}
index = {}
result = []
graph = self._succs
def strongconnect(node):
# set the depth index for this node to the smallest unused index
index[node] = index_counter[0]
lowlinks[node] = index_counter[0]
index_counter[0] += 1
stack.append(node)
# Consider successors
try:
successors = graph[node]
except Exception:
successors = []
for successor in successors:
if successor not in lowlinks:
# Successor has not yet been visited
strongconnect(successor)
lowlinks[node] = min(lowlinks[node],lowlinks[successor])
elif successor in stack:
# the successor is in the stack and hence in the current
# strongly connected component (SCC)
lowlinks[node] = min(lowlinks[node],index[successor])
# If `node` is a root node, pop the stack and generate an SCC
if lowlinks[node] == index[node]:
connected_component = []
while True:
successor = stack.pop()
connected_component.append(successor)
if successor == node: break
component = tuple(connected_component)
# storing the result
result.append(component)
for node in graph:
if node not in lowlinks:
strongconnect(node)
return result
@property
def dot(self):
result = ['digraph G {']
for succ in self._preds:
preds = self._preds[succ]
for pred in preds:
result.append(' %s -> %s;' % (pred, succ))
for node in self._nodes:
result.append(' %s;' % node)
result.append('}')
return '\n'.join(result)
#
# Unarchiving functionality for zip, tar, tgz, tbz, whl
#
ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip',
'.tgz', '.tbz', '.whl')
def unarchive(archive_filename, dest_dir, format=None, check=True):
def check_path(path):
if not isinstance(path, text_type):
path = path.decode('utf-8')
p = os.path.abspath(os.path.join(dest_dir, path))
if not p.startswith(dest_dir) or p[plen] != os.sep:
raise ValueError('path outside destination: %r' % p)
dest_dir = os.path.abspath(dest_dir)
plen = len(dest_dir)
archive = None
if format is None:
if archive_filename.endswith(('.zip', '.whl')):
format = 'zip'
elif archive_filename.endswith(('.tar.gz', '.tgz')):
format = 'tgz'
mode = 'r:gz'
elif archive_filename.endswith(('.tar.bz2', '.tbz')):
format = 'tbz'
mode = 'r:bz2'
elif archive_filename.endswith('.tar'):
format = 'tar'
mode = 'r'
else:
raise ValueError('Unknown format for %r' % archive_filename)
try:
if format == 'zip':
archive = ZipFile(archive_filename, 'r')
if check:
names = archive.namelist()
for name in names:
check_path(name)
else:
archive = tarfile.open(archive_filename, mode)
if check:
names = archive.getnames()
for name in names:
check_path(name)
if format != 'zip' and sys.version_info[0] < 3:
# See Python issue 17153. If the dest path contains Unicode,
# tarfile extraction fails on Python 2.x if a member path name
# contains non-ASCII characters - it leads to an implicit
# bytes -> unicode conversion using ASCII to decode.
for tarinfo in archive.getmembers():
if not isinstance(tarinfo.name, text_type):
tarinfo.name = tarinfo.name.decode('utf-8')
archive.extractall(dest_dir)
finally:
if archive:
archive.close()
def zip_dir(directory):
"""zip a directory tree into a BytesIO object"""
result = io.BytesIO()
dlen = len(directory)
with ZipFile(result, "w") as zf:
for root, dirs, files in os.walk(directory):
for name in files:
full = os.path.join(root, name)
rel = root[dlen:]
dest = os.path.join(rel, name)
zf.write(full, dest)
return result
#
# Simple progress bar
#
UNITS = ('', 'K', 'M', 'G','T','P')
class Progress(object):
unknown = 'UNKNOWN'
def __init__(self, minval=0, maxval=100):
assert maxval is None or maxval >= minval
self.min = self.cur = minval
self.max = maxval
self.started = None
self.elapsed = 0
self.done = False
def update(self, curval):
assert self.min <= curval
assert self.max is None or curval <= self.max
self.cur = curval
now = time.time()
if self.started is None:
self.started = now
else:
self.elapsed = now - self.started
def increment(self, incr):
assert incr >= 0
self.update(self.cur + incr)
def start(self):
self.update(self.min)
return self
def stop(self):
if self.max is not None:
self.update(self.max)
self.done = True
@property
def maximum(self):
return self.unknown if self.max is None else self.max
@property
def percentage(self):
if self.done:
result = '100 %'
elif self.max is None:
result = ' ?? %'
else:
v = 100.0 * (self.cur - self.min) / (self.max - self.min)
result = '%3d %%' % v
return result
def format_duration(self, duration):
if (duration <= 0) and self.max is None or self.cur == self.min:
result = '??:??:??'
#elif duration < 1:
# result = '--:--:--'
else:
result = time.strftime('%H:%M:%S', time.gmtime(duration))
return result
@property
def ETA(self):
if self.done:
prefix = 'Done'
t = self.elapsed
#import pdb; pdb.set_trace()
else:
prefix = 'ETA '
if self.max is None:
t = -1
elif self.elapsed == 0 or (self.cur == self.min):
t = 0
else:
#import pdb; pdb.set_trace()
t = float(self.max - self.min)
t /= self.cur - self.min
t = (t - 1) * self.elapsed
return '%s: %s' % (prefix, self.format_duration(t))
@property
def speed(self):
if self.elapsed == 0:
result = 0.0
else:
result = (self.cur - self.min) / self.elapsed
for unit in UNITS:
if result < 1000:
break
result /= 1000.0
return '%d %sB/s' % (result, unit)
#
# Glob functionality
#
RICH_GLOB = re.compile(r'\{([^}]*)\}')
_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')
_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')
def iglob(path_glob):
"""Extended globbing function that supports ** and {opt1,opt2,opt3}."""
if _CHECK_RECURSIVE_GLOB.search(path_glob):
msg = """invalid glob %r: recursive glob "**" must be used alone"""
raise ValueError(msg % path_glob)
if _CHECK_MISMATCH_SET.search(path_glob):
msg = """invalid glob %r: mismatching set marker '{' or '}'"""
raise ValueError(msg % path_glob)
return _iglob(path_glob)
def _iglob(path_glob):
rich_path_glob = RICH_GLOB.split(path_glob, 1)
if len(rich_path_glob) > 1:
assert len(rich_path_glob) == 3, rich_path_glob
prefix, set, suffix = rich_path_glob
for item in set.split(','):
for path in _iglob(''.join((prefix, item, suffix))):
yield path
else:
if '**' not in path_glob:
for item in std_iglob(path_glob):
yield item
else:
prefix, radical = path_glob.split('**', 1)
if prefix == '':
prefix = '.'
if radical == '':
radical = '*'
else:
# we support both
radical = radical.lstrip('/')
radical = radical.lstrip('\\')
for path, dir, files in os.walk(prefix):
path = os.path.normpath(path)
for fn in _iglob(os.path.join(path, radical)):
yield fn
#
# HTTPSConnection which verifies certificates/matches domains
#
class HTTPSConnection(httplib.HTTPSConnection):
ca_certs = None # set this to the path to the certs file (.pem)
check_domain = True # only used if ca_certs is not None
# noinspection PyPropertyAccess
def connect(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if getattr(self, '_tunnel_host', False):
self.sock = sock
self._tunnel()
if not hasattr(ssl, 'SSLContext'):
# For 2.x
if self.ca_certs:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=cert_reqs,
ssl_version=ssl.PROTOCOL_SSLv23,
ca_certs=self.ca_certs)
else:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
if self.cert_file:
context.load_cert_chain(self.cert_file, self.key_file)
kwargs = {}
if self.ca_certs:
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(cafile=self.ca_certs)
if getattr(ssl, 'HAS_SNI', False):
kwargs['server_hostname'] = self.host
self.sock = context.wrap_socket(sock, **kwargs)
if self.ca_certs and self.check_domain:
try:
match_hostname(self.sock.getpeercert(), self.host)
logger.debug('Host verified: %s', self.host)
except CertificateError:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
class HTTPSHandler(BaseHTTPSHandler):
def __init__(self, ca_certs, check_domain=True):
BaseHTTPSHandler.__init__(self)
self.ca_certs = ca_certs
self.check_domain = check_domain
def _conn_maker(self, *args, **kwargs):
"""
This is called to create a connection instance. Normally you'd
pass a connection class to do_open, but it doesn't actually check for
a class, and just expects a callable. As long as we behave just as a
constructor would have, we should be OK. If it ever changes so that
we *must* pass a class, we'll create an UnsafeHTTPSConnection class
which just sets check_domain to False in the class definition, and
choose which one to pass to do_open.
"""
result = HTTPSConnection(*args, **kwargs)
if self.ca_certs:
result.ca_certs = self.ca_certs
result.check_domain = self.check_domain
return result
def https_open(self, req):
try:
return self.do_open(self._conn_maker, req)
except URLError as e:
if 'certificate verify failed' in str(e.reason):
raise CertificateError('Unable to verify server certificate '
'for %s' % req.host)
else:
raise
#
# To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
# Middle proxy using HTTP listens on port 443, or an index mistakenly serves
# HTML containing a http://xyz link when it should be https://xyz),
# you can use the following handler class, which does not allow HTTP traffic.
#
# It works by inheriting from HTTPHandler - so build_opener won't add a
# handler for HTTP itself.
#
class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
def http_open(self, req):
raise URLError('Unexpected HTTP request on what should be a secure '
'connection: %s' % req)
#
# XML-RPC with timeouts
#
_ver_info = sys.version_info[:2]
if _ver_info == (2, 6):
class HTTP(httplib.HTTP):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class HTTPS(httplib.HTTPS):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class Transport(xmlrpclib.Transport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.Transport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, x509 = self.get_host_info(host)
if _ver_info == (2, 6):
result = HTTP(h, timeout=self.timeout)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPConnection(h)
result = self._connection[1]
return result
class SafeTransport(xmlrpclib.SafeTransport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.SafeTransport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, kwargs = self.get_host_info(host)
if not kwargs:
kwargs = {}
kwargs['timeout'] = self.timeout
if _ver_info == (2, 6):
result = HTTPS(host, None, **kwargs)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPSConnection(h, None,
**kwargs)
result = self._connection[1]
return result
class ServerProxy(xmlrpclib.ServerProxy):
def __init__(self, uri, **kwargs):
self.timeout = timeout = kwargs.pop('timeout', None)
# The above classes only come into play if a timeout
# is specified
if timeout is not None:
scheme, _ = splittype(uri)
use_datetime = kwargs.get('use_datetime', 0)
if scheme == 'https':
tcls = SafeTransport
else:
tcls = Transport
kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)
self.transport = t
xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
#
# CSV functionality. This is provided because on 2.x, the csv module can't
# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
#
def _csv_open(fn, mode, **kwargs):
if sys.version_info[0] < 3:
mode += 'b'
else:
kwargs['newline'] = ''
return open(fn, mode, **kwargs)
class CSVBase(object):
defaults = {
'delimiter': str(','), # The strs are used because we need native
'quotechar': str('"'), # str in the csv API (2.x won't take
'lineterminator': str('\n') # Unicode)
}
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.stream.close()
class CSVReader(CSVBase):
def __init__(self, **kwargs):
if 'stream' in kwargs:
stream = kwargs['stream']
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
self.stream = stream
else:
self.stream = _csv_open(kwargs['path'], 'r')
self.reader = csv.reader(self.stream, **self.defaults)
def __iter__(self):
return self
def next(self):
result = next(self.reader)
if sys.version_info[0] < 3:
for i, item in enumerate(result):
if not isinstance(item, text_type):
result[i] = item.decode('utf-8')
return result
__next__ = next
class CSVWriter(CSVBase):
def __init__(self, fn, **kwargs):
self.stream = _csv_open(fn, 'w')
self.writer = csv.writer(self.stream, **self.defaults)
def writerow(self, row):
if sys.version_info[0] < 3:
r = []
for item in row:
if isinstance(item, text_type):
item = item.encode('utf-8')
r.append(item)
row = r
self.writer.writerow(row)
#
# Configurator functionality
#
class Configurator(BaseConfigurator):
value_converters = dict(BaseConfigurator.value_converters)
value_converters['inc'] = 'inc_convert'
def __init__(self, config, base=None):
super(Configurator, self).__init__(config)
self.base = base or os.getcwd()
def configure_custom(self, config):
def convert(o):
if isinstance(o, (list, tuple)):
result = type(o)([convert(i) for i in o])
elif isinstance(o, dict):
if '()' in o:
result = self.configure_custom(o)
else:
result = {}
for k in o:
result[k] = convert(o[k])
else:
result = self.convert(o)
return result
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
args = config.pop('[]', ())
if args:
args = tuple([convert(o) for o in args])
items = [(k, convert(config[k])) for k in config if valid_ident(k)]
kwargs = dict(items)
result = c(*args, **kwargs)
if props:
for n, v in props.items():
setattr(result, n, convert(v))
return result
def __getitem__(self, key):
result = self.config[key]
if isinstance(result, dict) and '()' in result:
self.config[key] = result = self.configure_custom(result)
return result
def inc_convert(self, value):
"""Default converter for the inc:// protocol."""
if not os.path.isabs(value):
value = os.path.join(self.base, value)
with codecs.open(value, 'r', encoding='utf-8') as f:
result = json.load(f)
return result
#
# Mixin for running subprocesses and capturing their output
#
class SubprocessMixin(object):
def __init__(self, verbose=False, progress=None):
self.verbose = verbose
self.progress = progress
def reader(self, stream, context):
"""
Read lines from a subprocess' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.
"""
progress = self.progress
verbose = self.verbose
while True:
s = stream.readline()
if not s:
break
if progress is not None:
progress(s, context)
else:
if not verbose:
sys.stderr.write('.')
else:
sys.stderr.write(s.decode('utf-8'))
sys.stderr.flush()
stream.close()
def run_command(self, cmd, **kwargs):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, **kwargs)
t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))
t1.start()
t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))
t2.start()
p.wait()
t1.join()
t2.join()
if self.progress is not None:
self.progress('done.', 'main')
elif self.verbose:
sys.stderr.write('done.\n')
return p
|
costypetrisor/scikit-learn
|
refs/heads/master
|
examples/exercises/digits_classification_exercise.py
|
350
|
"""
================================
Digits Classification Exercise
================================
A tutorial exercise regarding the use of classification techniques on
the Digits dataset.
This exercise is used in the :ref:`clf_tut` part of the
:ref:`supervised_learning_tut` section of the
:ref:`stat_learn_tut_index`.
"""
print(__doc__)
from sklearn import datasets, neighbors, linear_model
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
n_samples = len(X_digits)
X_train = X_digits[:.9 * n_samples]
y_train = y_digits[:.9 * n_samples]
X_test = X_digits[.9 * n_samples:]
y_test = y_digits[.9 * n_samples:]
knn = neighbors.KNeighborsClassifier()
logistic = linear_model.LogisticRegression()
print('KNN score: %f' % knn.fit(X_train, y_train).score(X_test, y_test))
print('LogisticRegression score: %f'
% logistic.fit(X_train, y_train).score(X_test, y_test))
|
progwriter/tmgen
|
refs/heads/master
|
docs/conf.py
|
1
|
# -*- coding: utf-8 -*-
#
# TMgen documentation build configuration file, created by
# sphinx-quickstart on Mon Apr 25 23:18:51 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'TMgen'
copyright = u'2016, Victor Heorhiadi'
author = u'Victor Heorhiadi'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = u'TMgen v0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'TMgendoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'TMgen.tex', u'TMgen Documentation',
u'Victor Heorhiadi', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tmgen', u'TMgen Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'TMgen', u'TMgen Documentation',
author, 'TMgen', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
vabs22/zulip
|
refs/heads/master
|
zerver/migrations/0054_realm_icon.py
|
19
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-15 06:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zerver', '0053_emailchangestatus'),
]
operations = [
migrations.AddField(
model_name='realm',
name='icon_source',
field=models.CharField(
choices=[('G', 'Hosted by Gravatar'), ('U', 'Uploaded by administrator')],
default='G', max_length=1),
),
migrations.AddField(
model_name='realm',
name='icon_version',
field=models.PositiveSmallIntegerField(default=1),
),
]
|
JeffHoogland/mtg-totals
|
refs/heads/master
|
elm/mtg-totals.py
|
1
|
theirName = "Mat Bimonte"
myLifeFile = "/media/jeff/Storage/CrashTest/DataTextFiles/MyLife.txt"
theirLifeFile = "/media/jeff/Storage/CrashTest/DataTextFiles/TheirLife.txt"
import efl.elementary as elm
from efl.elementary.window import StandardWindow, Window, ELM_WIN_DIALOG_BASIC
from efl.elementary.background import Background
from efl.elementary.entry import Entry
from efl.elementary.box import Box
from efl.elementary.button import Button
from efl.evas import EVAS_HINT_EXPAND, EVAS_HINT_FILL, \
EVAS_CALLBACK_KEY_UP, EVAS_EVENT_FLAG_ON_HOLD
EXPAND_BOTH = EVAS_HINT_EXPAND, EVAS_HINT_EXPAND
EXPAND_HORIZ = EVAS_HINT_EXPAND, 0.0
FILL_BOTH = EVAS_HINT_FILL, EVAS_HINT_FILL
FILL_HORIZ = EVAS_HINT_FILL, 0.5
class MainWindow(StandardWindow):
def __init__(self):
StandardWindow.__init__(self, "lifetracker", "Life Tracker", size=(200, 200))
self.callback_delete_request_add(lambda o: elm.exit())
self.elm_event_callback_add(self.eventsCb)
self.buildSubs()
self.resetLifeTotals()
def buildSubs(self):
self.subWin = Window("lifetracker", ELM_WIN_DIALOG_BASIC, self, size=(300, 300))
self.subWin.title = "Life Tracker Assignment"
bg = Background(self.subWin, size_hint_weight=EXPAND_BOTH)
bg.show()
self.subWin.resize_object_add(bg)
self.subWin.callback_delete_request_add(lambda o: elm.exit())
self.ourWin = Window("lifetracker", ELM_WIN_DIALOG_BASIC, self, size=(300, 300))
self.ourWin.title = "Life Tracker Key Strokes"
bg = Background(self.ourWin, size_hint_weight=EXPAND_BOTH)
bg.show()
self.ourWin.resize_object_add(bg)
self.ourWin.callback_delete_request_add(lambda o: elm.exit())
self.ourWin.elm_event_callback_add(self.eventsCb)
self.ourLife = ourLabel = Entry(self.ourWin, editable=False)
ourLabel.size_hint_weight = EXPAND_BOTH
ourLabel.size_hint_align = FILL_BOTH
ourLabel.text_style_user_push("DEFAULT='font_size=20'")
ourLabel.text = "Up and Down for Their Life, Left and Right for Mine"
ourLabel.show()
self.ourEntry = ourEntry = Entry(self.subWin)
ourEntry.size_hint_weight = EXPAND_HORIZ
ourEntry.size_hint_align = (-1, 0)
ourEntry.single_line_set(True)
ourEntry.text_style_user_push("DEFAULT='font_size=50'")
ourEntry.callback_activated_add(self.ourLifeUpdate)
ourEntry.text = "20"
ourEntry.show()
self.theirEntry = theirEntry = Entry(self.subWin)
theirEntry.size_hint_weight = EXPAND_HORIZ
theirEntry.size_hint_align = (-1, 0)
theirEntry.single_line_set(True)
theirEntry.text_style_user_push("DEFAULT='font_size=50'")
theirEntry.callback_activated_add(self.theirLifeUpdate)
theirEntry.text = "20"
theirEntry.show()
resetBtn = Button(self.subWin)
resetBtn.text = "Reset life totals"
resetBtn.callback_pressed_add(self.resetLifeTotals)
resetBtn.show()
entryBox = Box(self.subWin)
entryBox.size_hint_weight = EXPAND_HORIZ
entryBox.pack_end(ourEntry)
entryBox.pack_end(theirEntry)
entryBox.pack_end(resetBtn)
entryBox.show()
self.ourWin.resize_object_add(ourLabel)
self.subWin.resize_object_add(entryBox)
self.ourWin.show()
self.subWin.show()
self.ourWin.center(True, True)
self.subWin.center(True, True)
def resetLifeTotals(self, obj=None):
self.ourLifeTotal = 20
self.ourEntry.text = str(self.ourLifeTotal)
self.updateLifeText("mine")
self.theirLifeTotal = 20
self.theirEntry.text = str(self.theirLifeTotal)
self.updateLifeText("theirs")
def ourLifeUpdate(self, obj):
self.ourLifeTotal = int(obj.text)
self.updateLifeText("mine")
def theirLifeUpdate(self, obj):
self.theirLifeTotal = int(obj.text)
self.updateLifeText("theirs")
def updateLifeText(self, who):
if who == "mine":
lifeText = str(self.ourLifeTotal)
while len(lifeText) < 3:
lifeText = " %s"%lifeText
with open(myLifeFile, 'w') as myfile: #file is a builtin, don't name your file 'file'
myfile.write(lifeText)
else:
lifeText = str(self.theirLifeTotal)
while len(lifeText) < 3:
lifeText = " %s"%lifeText
with open(theirLifeFile,'w') as myfile: #file is a builtin, don't name your file 'file'
myfile.write(lifeText)
def lifeChange(self, who, direction):
if direction == "up":
change = 1
else:
change = -1
if who == "mine":
self.ourLifeTotal += change
self.ourEntry.text = str(self.ourLifeTotal)
self.updateLifeText("mine")
else:
self.theirLifeTotal += change
self.theirEntry.text = str(self.theirLifeTotal)
self.updateLifeText("theirs")
def eventsCb(self, obj, src, event_type, event):
#print(obj)
#print(src)
#print(event.key.lower())
#print(event_type)
#print("")
if not event_type == EVAS_CALLBACK_KEY_UP:
return False
if event.keyname == "Up":
self.lifeChange("mine", "up")
elif event.keyname == "Down":
self.lifeChange("mine", "down")
elif event.keyname == "Right":
self.lifeChange("thiers", "up")
elif event.keyname == "Left":
self.lifeChange("theirs", "down")
event.event_flags = event.event_flags | EVAS_EVENT_FLAG_ON_HOLD
return True
if __name__ == "__main__":
elm.init()
GUI = MainWindow()
#GUI.show()
elm.run()
elm.shutdown()
|
CompPhysics/ComputationalPhysicsMSU
|
refs/heads/master
|
doc/Programs/PythonCodesLectureNotes/ising2dim.py
|
4
|
from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
import math, sys
def periodic (i, limit, add):
"""
Choose correct matrix index with periodic
boundary conditions
Input:
- i: Base index
- limit: Highest \"legal\" index
- add: Number to add or subtract from i
"""
return (i+limit+add) % limit
def monteCarlo(temp, NSpins, MCcycles):
"""
Calculate the energy and magnetization
(\"straight\" and squared) for a given temperature
Input:
- temp: Temperature to calculate for
- NSpins: dimension of square matrix
- MCcycles: Monte-carlo MCcycles (how many times do we
flip the matrix?)
Output:
- E_av: Energy of matrix averaged over MCcycles, normalized to spins**2
- E_variance: Variance of energy, same normalization * temp**2
- M_av: Magnetic field of matrix, averaged over MCcycles, normalized to spins**2
- M_variance: Variance of magnetic field, same normalization * temp
- Mabs: Absolute value of magnetic field, averaged over MCcycles
"""
#Setup spin matrix, initialize to ground state
spin_matrix = np.zeros( (NSpins,NSpins), np.int8) + 1
#Create and initialize variables
E = M = 0
E_av = E2_av = M_av = M2_av = Mabs_av = 0
#Setup array for possible energy changes
w = np.zeros(17,np.float64)
for de in range(-8,9,4): #include +8
w[de+8] = math.exp(-de/temp)
#Calculate initial magnetization:
M = spin_matrix.sum()
#Calculate initial energy
for j in range(NSpins):
for i in range(NSpins):
E -= spin_matrix.item(i,j)*\
(spin_matrix.item(periodic(i,NSpins,-1),j) + spin_matrix.item(i,periodic(j,NSpins,1)))
#Start metropolis MonteCarlo computation
for i in range(MCcycles):
#Metropolis
#Loop over all spins, pick a random spin each time
for s in range(NSpins**2):
x = int(np.random.random()*NSpins)
y = int(np.random.random()*NSpins)
deltaE = 2*spin_matrix.item(x,y)*\
(spin_matrix.item(periodic(x,NSpins,-1), y) +\
spin_matrix.item(periodic(x,NSpins,1), y) +\
spin_matrix.item(x, periodic(y,NSpins,-1)) +\
spin_matrix.item(x, periodic(y,NSpins,1)))
if np.random.random() <= w[deltaE+8]:
#Accept!
spin_matrix[x,y] *= -1
M += 2*spin_matrix[x,y]
E += deltaE
#Update expectation values
E_av += E
E2_av += E**2
M_av += M
M2_av += M**2
Mabs_av += int(math.fabs(M))
#Normalize average values
E_av /= float(MCcycles);
E2_av /= float(MCcycles);
M_av /= float(MCcycles);
M2_av /= float(MCcycles);
Mabs_av /= float(MCcycles);
#Calculate variance and normalize to per-point and temp
E_variance = (E2_av-E_av*E_av)/float(NSpins*NSpins*temp*temp);
M_variance = (M2_av-M_av*M_av)/float(NSpins*NSpins*temp);
#Normalize returned averages to per-point
E_av /= float(NSpins*NSpins);
M_av /= float(NSpins*NSpins);
Mabs_av /= float(NSpins*NSpins);
return (E_av, E_variance, M_av, M_variance, Mabs_av)
# Main program
# temperature steps, initial temperature, final temperature
NumberTsteps = 20
InitialT = 1.5
FinalT = 2.5
Tsteps = (FinalT-InitialT)/NumberTsteps
Temp = np.zeros(NumberTsteps)
for T in range(NumberTsteps):
Temp[T] = InitialT+T*Tsteps
# Declare arrays that hold averages
Energy = np.zeros(NumberTsteps); Magnetization = np.zeros(NumberTsteps)
SpecificHeat = np.zeros(NumberTsteps); Susceptibility = np.zeros(NumberTsteps)
MagnetizationAbs = np.zeros(NumberTsteps)
# Define number of spins
NSpins = 20
# Define number of Monte Carlo cycles
MCcycles = 100000
# Perform the simulations over a range of temperatures
for T in range(NumberTsteps):
(Energy[T], SpecificHeat[T], Magnetization[T], Susceptibility[T], MagnetizationAbs[T]) = monteCarlo(Temp[T],NSpins,MCcycles)
# And finally plot
f = plt.figure(figsize=(18, 10)); # plot the calculated values
sp = f.add_subplot(2, 2, 1 );
plt.plot(Temp, Energy, 'o', color="green");
plt.xlabel("Temperature (T)", fontsize=20);
plt.ylabel("Energy ", fontsize=20);
sp = f.add_subplot(2, 2, 2 );
plt.plot(Temp, abs(Magnetization), 'o', color="red");
plt.xlabel("Temperature (T)", fontsize=20);
plt.ylabel("Magnetization ", fontsize=20);
sp = f.add_subplot(2, 2, 3 );
plt.plot(Temp, SpecificHeat, 'o', color="blue");
plt.xlabel("Temperature (T)", fontsize=20);
plt.ylabel("Specific Heat ", fontsize=20);
sp = f.add_subplot(2, 2, 4 );
plt.plot(Temp, Susceptibility, 'o', color="black");
plt.xlabel("Temperature (T)", fontsize=20);
plt.ylabel("Susceptibility", fontsize=20);
plt.show()
|
chimkentec/KodiMODo_rep
|
refs/heads/master
|
plugin.video.youtube/resources/lib/youtube/helper/yt_setup_wizard.py
|
10
|
__author__ = 'bromix'
def _process_language(provider, context):
if not context.get_ui().on_yes_no_input(context.localize(provider.LOCAL_MAP['youtube.setup_wizard.adjust']),
context.localize(provider.LOCAL_MAP[
'youtube.setup_wizard.adjust.language_and_region'])):
return
client = provider.get_client(context)
kodi_language = context.get_language()
json_data = client.get_supported_languages(kodi_language)
items = json_data['items']
language_list = []
for item in items:
language_id = item['id'].split('-')[0]
language_name = item['snippet']['name']
language_list.append((language_name, language_id))
pass
language_list = sorted(language_list, key=lambda x: x[0])
language_id = context.get_ui().on_select(
context.localize(provider.LOCAL_MAP['youtube.setup_wizard.select_language']), language_list)
if language_id == -1:
return
json_data = client.get_supported_regions(language=language_id)
items = json_data['items']
region_list = []
for item in items:
region_id = item['id']
region_name = item['snippet']['name']
region_list.append((region_name, region_id))
pass
region_list = sorted(region_list, key=lambda x: x[0])
region_id = context.get_ui().on_select(context.localize(provider.LOCAL_MAP['youtube.setup_wizard.select_region']),
region_list)
if region_id == -1:
return
# set new language id
language_id = language_id + '-' + region_id
context.get_settings().set_string('youtube.language', language_id)
provider.reset_client()
pass
def process(provider, context):
_process_language(provider, context)
pass
|
dagmartin/capirca
|
refs/heads/master
|
lib/policyreader.py
|
7
|
#!/usr/bin/python2.4
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Utility to provide exploration of policy definition files.
Allows read only access of policy definition files. The library
creates a Policy object, which has filters containing terms.
This library does no expansion on the tokens directly, such as in policy.py.
TODO: This library is currently incomplete, and does not allow access to
every argument of a policy term.
"""
__author__ = 'watson@google.com (Tony Watson)'
import naming
class FileOpenError(Exception):
"""Trouble opening a file."""
class Filter(object):
"""Simple filter with a name a list of terms."""
def __init__(self, filtername=''):
self.name = filtername
self.term = []
def __str__(self):
rval = []
title = 'Filter: %s' % str(self.name)
rval.append('\n%s' % title)
rval.append('-' * len(title))
for term in self.term:
rval.append(str(term))
return '\n\n'.join(rval)
class Term(object):
"""Simple term with a name a list of attributes."""
def __init__(self, termname=''):
self.name = termname
self.source = []
self.destination = []
self.sport = []
self.dport = []
self.action = []
self.option = []
self.protocol = []
def __str__(self):
rval = []
rval.append(' Term: %s' % self.name)
rval.append(' Source-address:: %s' % ' '.join(self.source))
rval.append(' Destination-address:: %s' % ' '.join(self.destination))
rval.append(' Source-port:: %s' % ' '.join(self.sport))
rval.append(' Destination-port:: %s' % ' '.join(self.dport))
rval.append(' Protocol:: %s' % ' '.join(self.protocol))
rval.append(' Option:: %s' % ' '.join(self.option))
rval.append(' Action:: %s' % ' '.join(self.action))
return '\n'.join(rval)
class Policy(object):
"""Holds basic attributes of an unexpanded policy definition file."""
def __init__(self, filename, defs_data=None):
"""Build policy object and naming definitions from provided filenames.
Args:
filename: location of a .pol file
defs_data: location of naming definitions directory, if any
"""
self.defs = naming.Naming(defs_data)
self.filter = []
try:
self.data = open(filename, 'r').readlines()
except IOError, error_info:
info = str(filename) + ' cannot be opened'
raise FileOpenError('%s\n%s' % (info, error_info))
indent = 0
in_header = False
in_term = False
filt = Filter()
term = Term()
in_string = False
for line in self.data:
words = line.strip().split()
quotes = len(line.split('"')) + 1
if quotes % 2: # are we in or out of double quotes
in_string = not in_string # flip status of quote status
if not in_string:
if '{' in words:
indent += 1
if words:
if words[0] == 'header':
in_header = True
if words[0] == 'term':
in_term = True
term = Term(words[1])
if in_header and words[0] == 'target::':
if filt.name != words[2]: # avoid empty dupe filters due to
filt = Filter(words[2]) # multiple target header lines
if in_term:
if words[0] == 'source-address::':
term.source.extend(words[1:])
if words[0] == 'destination-address::':
term.destination.extend(words[1:])
if words[0] == 'source-port::':
term.sport.extend(words[1:])
if words[0] == 'destination-port::':
term.dport.extend(words[1:])
if words[0] == 'action::':
term.action.extend(words[1:])
if words[0] == 'protocol::':
term.protocol.extend(words[1:])
if words[0] == 'option::':
term.option.extend(words[1:])
if '}' in words:
indent -= 1
if in_header:
self.filter.append(filt)
in_header = False
if in_term:
filt.term.append(term)
in_term = False
def __str__(self):
return '\n'.join(str(next) for next in self.filter)
def Matches(self, src=None, dst=None, dport=None, sport=None,
filtername=None):
"""Return list of term names that match specific attributes.
Args:
src: source ip address '12.1.1.1'
dst: destination ip address '10.1.1.1'
dport: any port/protocol combo, such as '80/tcp' or '53/udp'
sport: any port/protocol combo, such as '80/tcp' or '53/udp'
filtername: a filter name or None to search all filters
Returns:
results: list of lists, each list is index to filter & term in the policy
Example:
p=policyreader.Policy('policy_path', 'definitions_path')
p.Matches(dst='209.85.216.5', dport='25/tcp')
[[0, 26]]
print p.filter[0].term[26].name
for match in p.Matches(dst='209.85.216.5'):
print p.filter[match[0]].term[match[1]].name
"""
rval = []
results = []
filter_list = []
dport_parents = None
sport_parents = None
destination_parents = None
source_parents = None
if dport:
dport_parents = self.defs.GetServiceParents(dport)
if sport:
sport_parents = self.defs.GetServiceParents(sport)
if dst:
destination_parents = self.defs.GetIpParents(dst)
try:
destination_parents.remove('ANY')
destination_parents.remove('RESERVED')
except ValueError:
pass # ignore and continue
if src:
source_parents = self.defs.GetIpParents(src)
try:
source_parents.remove('ANY')
source_parents.remove('RESERVED')
except ValueError:
pass # ignore and continue
if not filtername:
filter_list = self.filter
else:
for idx, next in enumerate(self.filter):
if filtername == next.name:
filter_list = [self.filter[idx]]
if not filter_list:
raise 'invalid filter name: %s' % filtername
for findex, xfilter in enumerate(filter_list):
mterms = []
mterms.append(set()) # dport
mterms.append(set()) # sport
mterms.append(set()) # dst
mterms.append(set()) # src
for tindex, term in enumerate(xfilter.term):
if dport_parents:
for token in dport_parents:
if token in term.dport:
mterms[0].add(tindex)
else:
mterms[0].add(tindex)
if sport_parents:
for token in sport_parents:
if token in term.sport:
mterms[1].add(tindex)
else:
mterms[1].add(tindex)
if destination_parents:
for token in destination_parents:
if token in term.destination:
mterms[2].add(tindex)
else:
mterms[2].add(tindex)
if source_parents:
for token in source_parents:
if token in term.source:
mterms[3].add(tindex)
else:
mterms[3].add(tindex)
rval.append(list(mterms[0] & mterms[1] & mterms[2] & mterms[3]))
for findex, fresult in enumerate(rval):
for next in list(fresult):
results.append([findex, next])
return results
|
defance/edx-platform
|
refs/heads/master
|
common/test/acceptance/pages/studio/settings_group_configurations.py
|
10
|
"""
Course Group Configurations page.
"""
from bok_choy.promise import EmptyPromise
from ..common.utils import confirm_prompt
from .course_page import CoursePage
class GroupConfigurationsPage(CoursePage):
"""
Course Group Configurations page.
"""
url_path = "group_configurations"
experiment_groups_css = ".experiment-groups"
content_groups_css = ".content-groups"
def is_browser_on_page(self):
"""
Verify that the browser is on the page and it is not still loading.
"""
return all([
self.q(css='body.view-group-configurations').present,
self.q(css='div.ui-loading.is-hidden').present
])
@property
def experiment_group_configurations(self):
"""
Return list of the experiment group configurations for the course.
"""
return self._get_groups(self.experiment_groups_css)
@property
def content_groups(self):
"""
Return list of the content groups for the course.
"""
return self._get_groups(self.content_groups_css)
def _get_groups(self, prefix):
"""
Return list of the group-configurations-list-item's of specified type for the course.
"""
css = prefix + ' .wrapper-collection'
return [GroupConfiguration(self, prefix, index) for index in xrange(len(self.q(css=css)))]
def create_experiment_group_configuration(self):
"""
Creates new group configuration.
"""
self.q(css=self.experiment_groups_css + " .new-button").first.click()
def create_first_content_group(self):
"""
Creates new content group when there are none initially defined.
"""
self.q(css=self.content_groups_css + " .new-button").first.click()
def add_content_group(self):
"""
Creates new content group when at least one already exists
"""
self.q(css=self.content_groups_css + " .action-add").first.click()
@property
def no_experiment_groups_message_is_present(self):
return self._no_content_message(self.experiment_groups_css).present
@property
def no_content_groups_message_is_present(self):
return self._no_content_message(self.content_groups_css).present
@property
def no_experiment_groups_message_text(self):
return self._no_content_message(self.experiment_groups_css).text[0]
@property
def no_content_groups_message_text(self):
return self._no_content_message(self.content_groups_css).text[0]
def _no_content_message(self, prefix):
"""
Returns the message about "no content" for the specified type.
"""
return self.q(css='.wrapper-content ' + prefix + ' .no-content')
@property
def experiment_group_sections_present(self):
"""
Returns whether or not anything related to content experiments is present.
"""
return self.q(css=self.experiment_groups_css).present or self.q(css=".experiment-groups-doc").present
class GroupConfiguration(object):
"""
Group Configuration wrapper.
"""
def __init__(self, page, prefix, index):
self.page = page
self.SELECTOR = prefix + ' .wrapper-collection-{}'.format(index)
self.index = index
def get_selector(self, css=''):
return ' '.join([self.SELECTOR, css])
def find_css(self, selector):
"""
Find elements as defined by css locator.
"""
return self.page.q(css=self.get_selector(css=selector))
def toggle(self):
"""
Expand/collapse group configuration.
"""
self.find_css('a.group-toggle').first.click()
@property
def is_expanded(self):
"""
Group configuration usage information is expanded.
"""
return self.find_css('a.group-toggle.hide-groups').present
def add_group(self):
"""
Add new group.
"""
self.find_css('button.action-add-group').first.click()
def get_text(self, css):
"""
Return text for the defined by css locator.
"""
return self.find_css(css).first.text[0]
def click_outline_anchor(self):
"""
Click on the `Course Outline` link.
"""
self.find_css('p.group-configuration-usage-text a').first.click()
def click_unit_anchor(self, index=0):
"""
Click on the link to the unit.
"""
self.find_css('li.group-configuration-usage-unit a').nth(index).click()
def edit(self):
"""
Open editing view for the group configuration.
"""
self.find_css('.action-edit .edit').first.click()
@property
def delete_button_is_disabled(self):
return self.find_css('.actions .delete.is-disabled').present
@property
def delete_button_is_present(self):
"""
Returns whether or not the delete icon is present.
"""
return self.find_css('.actions .delete').present
def delete(self):
"""
Delete the group configuration.
"""
self.find_css('.actions .delete').first.click()
confirm_prompt(self.page)
def save(self):
"""
Save group configuration.
"""
self.find_css('.action-primary').first.click()
self.page.wait_for_ajax()
def cancel(self):
"""
Cancel group configuration.
"""
self.find_css('.action-secondary').first.click()
@property
def mode(self):
"""
Return group configuration mode.
"""
if self.find_css('.collection-edit').present:
return 'edit'
elif self.find_css('.collection').present:
return 'details'
@property
def id(self):
"""
Return group configuration id.
"""
return self.get_text('.group-configuration-id .group-configuration-value')
@property
def validation_message(self):
"""
Return validation message.
"""
return self.get_text('.message-status.error')
@property
def usages(self):
"""
Return list of usages.
"""
css = '.group-configuration-usage-unit'
return self.find_css(css).text
@property
def name(self):
"""
Return group configuration name.
"""
return self.get_text('.title')
@name.setter
def name(self, value):
"""
Set group configuration name.
"""
self.find_css('.collection-name-input').first.fill(value)
@property
def description(self):
"""
Return group configuration description.
"""
return self.get_text('.group-configuration-description')
@description.setter
def description(self, value):
"""
Set group configuration description.
"""
self.find_css('.group-configuration-description-input').first.fill(value)
@property
def groups(self):
"""
Return list of groups.
"""
def group_selector(group_index):
return self.get_selector('.group-{} '.format(group_index))
return [Group(self.page, group_selector(index)) for index, element in enumerate(self.find_css('.group'))]
@property
def delete_note(self):
"""
Return delete note for the group configuration.
"""
return self.find_css('.wrapper-delete-button').first.attrs('data-tooltip')[0]
@property
def details_error_icon_is_present(self):
return self.find_css('.wrapper-group-configuration-usages .fa-exclamation-circle').present
@property
def details_warning_icon_is_present(self):
return self.find_css('.wrapper-group-configuration-usages .fa-warning').present
@property
def details_message_is_present(self):
return self.find_css('.wrapper-group-configuration-usages .group-configuration-validation-message').present
@property
def details_message_text(self):
return self.find_css('.wrapper-group-configuration-usages .group-configuration-validation-message').text[0]
@property
def edit_warning_icon_is_present(self):
return self.find_css('.wrapper-group-configuration-validation .fa-warning').present
@property
def edit_warning_message_is_present(self):
return self.find_css('.wrapper-group-configuration-validation .group-configuration-validation-text').present
@property
def edit_warning_message_text(self):
return self.find_css('.wrapper-group-configuration-validation .group-configuration-validation-text').text[0]
def __repr__(self):
return "<{}:{}>".format(self.__class__.__name__, self.name)
class Group(object):
"""
Group wrapper.
"""
def __init__(self, page, prefix_selector):
self.page = page
self.prefix = prefix_selector
def find_css(self, selector):
"""
Find elements as defined by css locator.
"""
return self.page.q(css=self.prefix + selector)
@property
def name(self):
"""
Return the name of the group .
"""
css = '.group-name'
return self.find_css(css).first.text[0]
@name.setter
def name(self, value):
"""
Set the name for the group.
"""
css = '.group-name'
self.find_css(css).first.fill(value)
@property
def allocation(self):
"""
Return allocation for the group.
"""
css = '.group-allocation'
return self.find_css(css).first.text[0]
def remove(self):
"""
Remove the group.
"""
css = '.action-close'
return self.find_css(css).first.click()
def __repr__(self):
return "<{}:{}>".format(self.__class__.__name__, self.name)
|
mrbox/django
|
refs/heads/master
|
tests/template_tests/filter_tests/test_slice.py
|
428
|
from django.template.defaultfilters import slice_filter
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class SliceTests(SimpleTestCase):
@setup({'slice01': '{{ a|slice:"1:3" }} {{ b|slice:"1:3" }}'})
def test_slice01(self):
output = self.engine.render_to_string('slice01', {'a': 'a&b', 'b': mark_safe('a&b')})
self.assertEqual(output, '&b &b')
@setup({'slice02': '{% autoescape off %}{{ a|slice:"1:3" }} {{ b|slice:"1:3" }}{% endautoescape %}'})
def test_slice02(self):
output = self.engine.render_to_string('slice02', {'a': 'a&b', 'b': mark_safe('a&b')})
self.assertEqual(output, '&b &b')
class FunctionTests(SimpleTestCase):
def test_zero_length(self):
self.assertEqual(slice_filter('abcdefg', '0'), '')
def test_index(self):
self.assertEqual(slice_filter('abcdefg', '1'), 'a')
def test_negative_index(self):
self.assertEqual(slice_filter('abcdefg', '-1'), 'abcdef')
def test_range(self):
self.assertEqual(slice_filter('abcdefg', '1:2'), 'b')
def test_range_multiple(self):
self.assertEqual(slice_filter('abcdefg', '1:3'), 'bc')
def test_range_step(self):
self.assertEqual(slice_filter('abcdefg', '0::2'), 'aceg')
|
xwolf12/scikit-learn
|
refs/heads/master
|
sklearn/metrics/tests/__init__.py
|
12133432
| |
andela-ooladayo/django
|
refs/heads/master
|
tests/migrations/related_models_app/__init__.py
|
12133432
| |
pczerkas/aodh
|
refs/heads/master
|
aodh/api/controllers/__init__.py
|
12133432
| |
pdellaert/ansible
|
refs/heads/devel
|
test/units/module_utils/network/avi/__init__.py
|
12133432
| |
kmike/gensim
|
refs/heads/develop
|
gensim/test/test_models.py
|
21
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
import logging
import unittest
import os
import os.path
import tempfile
import numpy
import scipy.linalg
from gensim.corpora import mmcorpus, Dictionary
from gensim.models import lsimodel, ldamodel, tfidfmodel, rpmodel, logentropy_model, ldamulticore
from gensim.models.wrappers import ldamallet
from gensim import matutils
module_path = os.path.dirname(__file__) # needed because sample data files are located in the same folder
datapath = lambda fname: os.path.join(module_path, 'test_data', fname)
# set up vars used in testing ("Deerwester" from the web tutorial)
texts = [['human', 'interface', 'computer'],
['survey', 'user', 'computer', 'system', 'response', 'time'],
['eps', 'user', 'interface', 'system'],
['system', 'human', 'system', 'eps'],
['user', 'response', 'time'],
['trees'],
['graph', 'trees'],
['graph', 'minors', 'trees'],
['graph', 'minors', 'survey']]
dictionary = Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
def testfile():
# temporary data will be stored to this file
return os.path.join(tempfile.gettempdir(), 'gensim_models.tst')
class TestLsiModel(unittest.TestCase):
def setUp(self):
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
def testTransform(self):
"""Test lsi[vector] transformation."""
# create the transformation model
model = lsimodel.LsiModel(self.corpus, num_topics=2)
# make sure the decomposition is enough accurate
u, s, vt = scipy.linalg.svd(matutils.corpus2dense(self.corpus, self.corpus.num_terms), full_matrices=False)
self.assertTrue(numpy.allclose(s[:2], model.projection.s)) # singular values must match
# transform one document
doc = list(self.corpus)[0]
transformed = model[doc]
vec = matutils.sparse2full(transformed, 2) # convert to dense vector, for easier equality tests
expected = numpy.array([-0.6594664, 0.142115444]) # scaled LSI version
# expected = numpy.array([-0.1973928, 0.05591352]) # non-scaled LSI version
self.assertTrue(numpy.allclose(abs(vec), abs(expected))) # transformed entries must be equal up to sign
def testCorpusTransform(self):
"""Test lsi[corpus] transformation."""
model = lsimodel.LsiModel(self.corpus, num_topics=2)
got = numpy.vstack(matutils.sparse2full(doc, 2) for doc in model[self.corpus])
expected = numpy.array([
[ 0.65946639, 0.14211544],
[ 2.02454305, -0.42088759],
[ 1.54655361, 0.32358921],
[ 1.81114125, 0.5890525 ],
[ 0.9336738 , -0.27138939],
[ 0.01274618, -0.49016181],
[ 0.04888203, -1.11294699],
[ 0.08063836, -1.56345594],
[ 0.27381003, -1.34694159]])
self.assertTrue(numpy.allclose(abs(got), abs(expected))) # must equal up to sign
def testOnlineTransform(self):
corpus = list(self.corpus)
doc = corpus[0] # use the corpus' first document for testing
# create the transformation model
model2 = lsimodel.LsiModel(corpus=corpus, num_topics=5) # compute everything at once
model = lsimodel.LsiModel(corpus=None, id2word=model2.id2word, num_topics=5) # start with no documents, we will add them later
# train model on a single document
model.add_documents([corpus[0]])
# transform the testing document with this partial transformation
transformed = model[doc]
vec = matutils.sparse2full(transformed, model.num_topics) # convert to dense vector, for easier equality tests
expected = numpy.array([-1.73205078, 0.0, 0.0, 0.0, 0.0]) # scaled LSI version
self.assertTrue(numpy.allclose(abs(vec), abs(expected), atol=1e-6)) # transformed entries must be equal up to sign
# train on another 4 documents
model.add_documents(corpus[1:5], chunksize=2) # train on 4 extra docs, in chunks of 2 documents, for the lols
# transform a document with this partial transformation
transformed = model[doc]
vec = matutils.sparse2full(transformed, model.num_topics) # convert to dense vector, for easier equality tests
expected = numpy.array([-0.66493785, -0.28314203, -1.56376302, 0.05488682, 0.17123269]) # scaled LSI version
self.assertTrue(numpy.allclose(abs(vec), abs(expected), atol=1e-6)) # transformed entries must be equal up to sign
# train on the rest of documents
model.add_documents(corpus[5:])
# make sure the final transformation is the same as if we had decomposed the whole corpus at once
vec1 = matutils.sparse2full(model[doc], model.num_topics)
vec2 = matutils.sparse2full(model2[doc], model2.num_topics)
self.assertTrue(numpy.allclose(abs(vec1), abs(vec2), atol=1e-5)) # the two LSI representations must equal up to sign
def testPersistence(self):
fname = testfile()
model = lsimodel.LsiModel(self.corpus, num_topics=2)
model.save(fname)
model2 = lsimodel.LsiModel.load(fname)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.projection.u, model2.projection.u))
self.assertTrue(numpy.allclose(model.projection.s, model2.projection.s))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testPersistenceCompressed(self):
fname = testfile() + '.gz'
model = lsimodel.LsiModel(self.corpus, num_topics=2)
model.save(fname)
model2 = lsimodel.LsiModel.load(fname, mmap=None)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.projection.u, model2.projection.u))
self.assertTrue(numpy.allclose(model.projection.s, model2.projection.s))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testLargeMmap(self):
fname = testfile()
model = lsimodel.LsiModel(self.corpus, num_topics=2)
# test storing the internal arrays into separate files
model.save(fname, sep_limit=0)
# now load the external arrays via mmap
model2 = lsimodel.LsiModel.load(fname, mmap='r')
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(isinstance(model2.projection.u, numpy.memmap))
self.assertTrue(isinstance(model2.projection.s, numpy.memmap))
self.assertTrue(numpy.allclose(model.projection.u, model2.projection.u))
self.assertTrue(numpy.allclose(model.projection.s, model2.projection.s))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testLargeMmapCompressed(self):
fname = testfile() + '.gz'
model = lsimodel.LsiModel(self.corpus, num_topics=2)
# test storing the internal arrays into separate files
model.save(fname, sep_limit=0)
# now load the external arrays via mmap
return
# turns out this test doesn't exercise this because there are no arrays
# to be mmaped!
self.assertRaises(IOError, lsimodel.LsiModel.load, fname, mmap='r')
#endclass TestLsiModel
class TestRpModel(unittest.TestCase):
def setUp(self):
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
def testTransform(self):
# create the transformation model
numpy.random.seed(13) # HACK; set fixed seed so that we always get the same random matrix (and can compare against expected results)
model = rpmodel.RpModel(self.corpus, num_topics=2)
# transform one document
doc = list(self.corpus)[0]
transformed = model[doc]
vec = matutils.sparse2full(transformed, 2) # convert to dense vector, for easier equality tests
expected = numpy.array([-0.70710677, 0.70710677])
self.assertTrue(numpy.allclose(vec, expected)) # transformed entries must be equal up to sign
def testPersistence(self):
fname = testfile()
model = rpmodel.RpModel(self.corpus, num_topics=2)
model.save(fname)
model2 = rpmodel.RpModel.load(fname)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.projection, model2.projection))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testPersistenceCompressed(self):
fname = testfile() + '.gz'
model = rpmodel.RpModel(self.corpus, num_topics=2)
model.save(fname)
model2 = rpmodel.RpModel.load(fname, mmap=None)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.projection, model2.projection))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
#endclass TestRpModel
class TestLdaModel(unittest.TestCase):
def setUp(self):
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
self.class_ = ldamodel.LdaModel
def testTransform(self):
passed = False
# sometimes, LDA training gets stuck at a local minimum
# in that case try re-training the model from scratch, hoping for a
# better random initialization
for i in range(5): # restart at most 5 times
# create the transformation model
model = self.class_(id2word=dictionary, num_topics=2, passes=100)
model.update(self.corpus)
# transform one document
doc = list(corpus)[0]
transformed = model[doc]
vec = matutils.sparse2full(transformed, 2) # convert to dense vector, for easier equality tests
expected = [0.13, 0.87]
passed = numpy.allclose(sorted(vec), sorted(expected), atol=1e-2) # must contain the same values, up to re-ordering
if passed:
break
logging.warning("LDA failed to converge on attempt %i (got %s, expected %s)" %
(i, sorted(vec), sorted(expected)))
self.assertTrue(passed)
def testTopTopics(self):
# create the transformation model
model = self.class_(id2word=dictionary, num_topics=2, passes=100)
model.update(self.corpus)
model.top_topics(self.corpus)
def testPasses(self):
# long message includes the original error message with a custom one
self.longMessage = True
# construct what we expect when passes aren't involved
test_rhots = list()
model = self.class_(id2word=dictionary, chunksize=1, num_topics=2)
final_rhot = lambda: pow(model.offset + (1 * model.num_updates) / model.chunksize, -model.decay)
# generate 5 updates to test rhot on
for x in range(5):
model.update(self.corpus)
test_rhots.append(final_rhot())
for passes in [1, 5, 10, 50, 100]:
model = self.class_(id2word=dictionary, chunksize=1, num_topics=2, passes=passes)
self.assertEqual(final_rhot(), 1.0)
# make sure the rhot matches the test after each update
for test_rhot in test_rhots:
model.update(self.corpus)
msg = ", ".join(map(str, [passes, model.num_updates, model.state.numdocs]))
self.assertAlmostEqual(final_rhot(), test_rhot, msg=msg)
self.assertEqual(model.state.numdocs, len(corpus) * len(test_rhots))
self.assertEqual(model.num_updates, len(corpus) * len(test_rhots))
# def testTopicSeeding(self):
# for topic in range(2):
# passed = False
# for i in range(5): # restart at most this many times, to mitigate LDA randomness
# # try seeding it both ways round, check you get the same
# # topics out but with which way round they are depending
# # on the way round they're seeded
# eta = numpy.ones((2, len(dictionary))) * 0.5
# system = dictionary.token2id[u'system']
# trees = dictionary.token2id[u'trees']
# # aggressively seed the word 'system', in one of the
# # two topics, 10 times higher than the other words
# eta[topic, system] *= 10.0
# model = self.class_(id2word=dictionary, num_topics=2, passes=200, eta=eta)
# model.update(self.corpus)
# topics = [dict((word, p) for p, word in model.show_topic(j, topn=None)) for j in range(2)]
# # check that the word 'system' in the topic we seeded got a high weight,
# # and the word 'trees' (the main word in the other topic) a low weight --
# # and vice versa for the other topic (which we didn't seed with 'system')
# passed = (
# (topics[topic][u'system'] > topics[topic][u'trees'])
# and
# (topics[1 - topic][u'system'] < topics[1 - topic][u'trees'])
# )
# if passed:
# break
# logging.warning("LDA failed to converge on attempt %i (got %s)", i, topics)
# self.assertTrue(passed)
def testPersistence(self):
fname = testfile()
model = self.class_(self.corpus, num_topics=2)
model.save(fname)
model2 = self.class_.load(fname)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.expElogbeta, model2.expElogbeta))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testPersistenceIgnore(self):
fname = testfile()
model = ldamodel.LdaModel(self.corpus, num_topics=2)
model.save(fname, ignore='id2word')
model2 = ldamodel.LdaModel.load(fname)
self.assertTrue(model2.id2word is None)
model.save(fname, ignore=['id2word'])
model2 = ldamodel.LdaModel.load(fname)
self.assertTrue(model2.id2word is None)
def testPersistenceCompressed(self):
fname = testfile() + '.gz'
model = self.class_(self.corpus, num_topics=2)
model.save(fname)
model2 = self.class_.load(fname, mmap=None)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.expElogbeta, model2.expElogbeta))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testLargeMmap(self):
fname = testfile()
model = self.class_(self.corpus, num_topics=2)
# simulate storing large arrays separately
model.save(testfile(), sep_limit=0)
# test loading the large model arrays with mmap
model2 = self.class_.load(testfile(), mmap='r')
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(isinstance(model2.expElogbeta, numpy.memmap))
self.assertTrue(numpy.allclose(model.expElogbeta, model2.expElogbeta))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testLargeMmapCompressed(self):
fname = testfile() + '.gz'
model = self.class_(self.corpus, num_topics=2)
# simulate storing large arrays separately
model.save(fname, sep_limit=0)
# test loading the large model arrays with mmap
self.assertRaises(IOError, self.class_.load, fname, mmap='r')
#endclass TestLdaModel
class TestLdaMulticore(TestLdaModel):
def setUp(self):
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
self.class_ = ldamulticore.LdaMulticore
#endclass TestLdaMulticore
class TestLdaMallet(unittest.TestCase):
def setUp(self):
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
mallet_home = os.environ.get('MALLET_HOME', None)
self.mallet_path = os.path.join(mallet_home, 'bin', 'mallet') if mallet_home else None
def testTransform(self):
if not self.mallet_path:
return
passed = False
for i in range(5): # restart at most 5 times
# create the transformation model
model = ldamallet.LdaMallet(self.mallet_path, corpus, id2word=dictionary, num_topics=2, iterations=200)
# transform one document
doc = list(corpus)[0]
transformed = model[doc]
vec = matutils.sparse2full(transformed, 2) # convert to dense vector, for easier equality tests
expected = [0.49, 0.51]
passed = numpy.allclose(sorted(vec), sorted(expected), atol=1e-2) # must contain the same values, up to re-ordering
if passed:
break
logging.warning("LDA failed to converge on attempt %i (got %s, expected %s)" %
(i, sorted(vec), sorted(expected)))
self.assertTrue(passed)
def testPersistence(self):
if not self.mallet_path:
return
fname = testfile()
model = ldamallet.LdaMallet(self.mallet_path, self.corpus, num_topics=2, iterations=100)
model.save(fname)
model2 = ldamallet.LdaMallet.load(fname)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.wordtopics, model2.wordtopics))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testPersistenceCompressed(self):
if not self.mallet_path:
return
fname = testfile() + '.gz'
model = ldamallet.LdaMallet(self.mallet_path, self.corpus, num_topics=2, iterations=100)
model.save(fname)
model2 = ldamallet.LdaMallet.load(fname, mmap=None)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(numpy.allclose(model.wordtopics, model2.wordtopics))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testLargeMmap(self):
if not self.mallet_path:
return
fname = testfile()
model = ldamallet.LdaMallet(self.mallet_path, self.corpus, num_topics=2, iterations=100)
# simulate storing large arrays separately
model.save(testfile(), sep_limit=0)
# test loading the large model arrays with mmap
model2 = ldamodel.LdaModel.load(testfile(), mmap='r')
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(isinstance(model2.wordtopics, numpy.memmap))
self.assertTrue(numpy.allclose(model.wordtopics, model2.wordtopics))
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testLargeMmapCompressed(self):
if not self.mallet_path:
return
fname = testfile() + '.gz'
model = ldamallet.LdaMallet(self.mallet_path, self.corpus, num_topics=2, iterations=100)
# simulate storing large arrays separately
model.save(fname, sep_limit=0)
# test loading the large model arrays with mmap
self.assertRaises(IOError, ldamodel.LdaModel.load, fname, mmap='r')
#endclass TestLdaMallet
class TestTfidfModel(unittest.TestCase):
def setUp(self):
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
def testTransform(self):
# create the transformation model
model = tfidfmodel.TfidfModel(self.corpus, normalize=True)
# transform one document
doc = list(self.corpus)[0]
transformed = model[doc]
expected = [(0, 0.57735026918962573), (1, 0.57735026918962573), (2, 0.57735026918962573)]
self.assertTrue(numpy.allclose(transformed, expected))
def testInit(self):
# create the transformation model by analyzing a corpus
# uses the global `corpus`!
model1 = tfidfmodel.TfidfModel(corpus)
# make sure the dfs<->idfs transformation works
self.assertEqual(model1.dfs, dictionary.dfs)
self.assertEqual(model1.idfs, tfidfmodel.precompute_idfs(model1.wglobal, dictionary.dfs, len(corpus)))
# create the transformation model by directly supplying a term->docfreq
# mapping from the global var `dictionary`.
model2 = tfidfmodel.TfidfModel(dictionary=dictionary)
self.assertEqual(model1.idfs, model2.idfs)
def testPersistence(self):
fname = testfile()
model = tfidfmodel.TfidfModel(self.corpus, normalize=True)
model.save(fname)
model2 = tfidfmodel.TfidfModel.load(fname)
self.assertTrue(model.idfs == model2.idfs)
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def testPersistenceCompressed(self):
fname = testfile() + '.gz'
model = tfidfmodel.TfidfModel(self.corpus, normalize=True)
model.save(fname)
model2 = tfidfmodel.TfidfModel.load(fname, mmap=None)
self.assertTrue(model.idfs == model2.idfs)
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
#endclass TestTfidfModel
class TestLogEntropyModel(unittest.TestCase):
def setUp(self):
self.corpus_small = mmcorpus.MmCorpus(datapath('test_corpus_small.mm'))
self.corpus_ok = mmcorpus.MmCorpus(datapath('test_corpus_ok.mm'))
def testTransform(self):
# create the transformation model
model = logentropy_model.LogEntropyModel(self.corpus_ok, normalize=False)
# transform one document
doc = list(self.corpus_ok)[0]
transformed = model[doc]
expected = [(0, 0.3748900964125389),
(1, 0.30730215324230725),
(3, 1.20941755462856)]
self.assertTrue(numpy.allclose(transformed, expected))
def testPersistence(self):
fname = testfile()
model = logentropy_model.LogEntropyModel(self.corpus_ok, normalize=True)
model.save(fname)
model2 = logentropy_model.LogEntropyModel.load(fname)
self.assertTrue(model.entr == model2.entr)
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec]))
def testPersistenceCompressed(self):
fname = testfile() + '.gz'
model = logentropy_model.LogEntropyModel(self.corpus_ok, normalize=True)
model.save(fname)
model2 = logentropy_model.LogEntropyModel.load(fname, mmap=None)
self.assertTrue(model.entr == model2.entr)
tstvec = []
self.assertTrue(numpy.allclose(model[tstvec], model2[tstvec]))
#endclass TestLogEntropyModel
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
|
BladeSmithJohn/nixysa
|
refs/heads/master
|
third_party/ply-3.1/test/lex_module_import.py
|
174
|
# -----------------------------------------------------------------------------
# lex_module_import.py
#
# A lexer defined in a module, but built in lex_module.py
# -----------------------------------------------------------------------------
tokens = (
'NAME','NUMBER',
'PLUS','MINUS','TIMES','DIVIDE','EQUALS',
'LPAREN','RPAREN',
)
# Tokens
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_EQUALS = r'='
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'
def t_NUMBER(t):
r'\d+'
try:
t.value = int(t.value)
except ValueError:
print("Integer value too large %s" % t.value)
t.value = 0
return t
t_ignore = " \t"
def t_newline(t):
r'\n+'
t.lineno += t.value.count("\n")
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
|
clumsy/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/contrib/gis/gdal/prototypes/geom.py
|
315
|
import re
from datetime import date
from ctypes import c_char, c_char_p, c_double, c_int, c_ubyte, c_void_p, POINTER
from django.contrib.gis.gdal.envelope import OGREnvelope
from django.contrib.gis.gdal.libgdal import lgdal, GEOJSON
from django.contrib.gis.gdal.prototypes.errcheck import check_bool, check_envelope
from django.contrib.gis.gdal.prototypes.generation import \
const_string_output, double_output, geom_output, int_output, \
srs_output, string_output, void_output
### Generation routines specific to this module ###
def env_func(f, argtypes):
"For getting OGREnvelopes."
f.argtypes = argtypes
f.restype = None
f.errcheck = check_envelope
return f
def pnt_func(f):
"For accessing point information."
return double_output(f, [c_void_p, c_int])
def topology_func(f):
f.argtypes = [c_void_p, c_void_p]
f.restype = c_int
f.errchck = check_bool
return f
### OGR_G ctypes function prototypes ###
# GeoJSON routines, if supported.
if GEOJSON:
from_json = geom_output(lgdal.OGR_G_CreateGeometryFromJson, [c_char_p])
to_json = string_output(lgdal.OGR_G_ExportToJson, [c_void_p], str_result=True)
to_kml = string_output(lgdal.OGR_G_ExportToKML, [c_void_p, c_char_p], str_result=True)
else:
from_json = False
to_json = False
to_kml = False
# GetX, GetY, GetZ all return doubles.
getx = pnt_func(lgdal.OGR_G_GetX)
gety = pnt_func(lgdal.OGR_G_GetY)
getz = pnt_func(lgdal.OGR_G_GetZ)
# Geometry creation routines.
from_wkb = geom_output(lgdal.OGR_G_CreateFromWkb, [c_char_p, c_void_p, POINTER(c_void_p), c_int], offset=-2)
from_wkt = geom_output(lgdal.OGR_G_CreateFromWkt, [POINTER(c_char_p), c_void_p, POINTER(c_void_p)], offset=-1)
create_geom = geom_output(lgdal.OGR_G_CreateGeometry, [c_int])
clone_geom = geom_output(lgdal.OGR_G_Clone, [c_void_p])
get_geom_ref = geom_output(lgdal.OGR_G_GetGeometryRef, [c_void_p, c_int])
get_boundary = geom_output(lgdal.OGR_G_GetBoundary, [c_void_p])
geom_convex_hull = geom_output(lgdal.OGR_G_ConvexHull, [c_void_p])
geom_diff = geom_output(lgdal.OGR_G_Difference, [c_void_p, c_void_p])
geom_intersection = geom_output(lgdal.OGR_G_Intersection, [c_void_p, c_void_p])
geom_sym_diff = geom_output(lgdal.OGR_G_SymmetricDifference, [c_void_p, c_void_p])
geom_union = geom_output(lgdal.OGR_G_Union, [c_void_p, c_void_p])
# Geometry modification routines.
add_geom = void_output(lgdal.OGR_G_AddGeometry, [c_void_p, c_void_p])
import_wkt = void_output(lgdal.OGR_G_ImportFromWkt, [c_void_p, POINTER(c_char_p)])
# Destroys a geometry
destroy_geom = void_output(lgdal.OGR_G_DestroyGeometry, [c_void_p], errcheck=False)
# Geometry export routines.
to_wkb = void_output(lgdal.OGR_G_ExportToWkb, None, errcheck=True) # special handling for WKB.
to_wkt = string_output(lgdal.OGR_G_ExportToWkt, [c_void_p, POINTER(c_char_p)])
to_gml = string_output(lgdal.OGR_G_ExportToGML, [c_void_p], str_result=True)
get_wkbsize = int_output(lgdal.OGR_G_WkbSize, [c_void_p])
# Geometry spatial-reference related routines.
assign_srs = void_output(lgdal.OGR_G_AssignSpatialReference, [c_void_p, c_void_p], errcheck=False)
get_geom_srs = srs_output(lgdal.OGR_G_GetSpatialReference, [c_void_p])
# Geometry properties
get_area = double_output(lgdal.OGR_G_GetArea, [c_void_p])
get_centroid = void_output(lgdal.OGR_G_Centroid, [c_void_p, c_void_p])
get_dims = int_output(lgdal.OGR_G_GetDimension, [c_void_p])
get_coord_dim = int_output(lgdal.OGR_G_GetCoordinateDimension, [c_void_p])
set_coord_dim = void_output(lgdal.OGR_G_SetCoordinateDimension, [c_void_p, c_int], errcheck=False)
get_geom_count = int_output(lgdal.OGR_G_GetGeometryCount, [c_void_p])
get_geom_name = const_string_output(lgdal.OGR_G_GetGeometryName, [c_void_p])
get_geom_type = int_output(lgdal.OGR_G_GetGeometryType, [c_void_p])
get_point_count = int_output(lgdal.OGR_G_GetPointCount, [c_void_p])
get_point = void_output(lgdal.OGR_G_GetPoint, [c_void_p, c_int, POINTER(c_double), POINTER(c_double), POINTER(c_double)], errcheck=False)
geom_close_rings = void_output(lgdal.OGR_G_CloseRings, [c_void_p], errcheck=False)
# Topology routines.
ogr_contains = topology_func(lgdal.OGR_G_Contains)
ogr_crosses = topology_func(lgdal.OGR_G_Crosses)
ogr_disjoint = topology_func(lgdal.OGR_G_Disjoint)
ogr_equals = topology_func(lgdal.OGR_G_Equals)
ogr_intersects = topology_func(lgdal.OGR_G_Intersects)
ogr_overlaps = topology_func(lgdal.OGR_G_Overlaps)
ogr_touches = topology_func(lgdal.OGR_G_Touches)
ogr_within = topology_func(lgdal.OGR_G_Within)
# Transformation routines.
geom_transform = void_output(lgdal.OGR_G_Transform, [c_void_p, c_void_p])
geom_transform_to = void_output(lgdal.OGR_G_TransformTo, [c_void_p, c_void_p])
# For retrieving the envelope of the geometry.
get_envelope = env_func(lgdal.OGR_G_GetEnvelope, [c_void_p, POINTER(OGREnvelope)])
|
hunterfu/it-manager
|
refs/heads/master
|
stock_tech/trade_system/trade_system.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import yaml
import sys
import getopt
import commands
#import pysqlite2.dbapi2 as sqlite
import sqlite3 as sqlite
import datetime
from pprint import pprint
def load_config():
"""
读取配置文件
"""
configFile = "%s/conf/%s" % (base_dir,"global.yaml")
stream = file(configFile, 'r')
data = yaml.load(stream)
return data
def connect_db():
"""
股票持仓数据库(portfolio)
symbol 股票代码
trade_date 交易日期
action buy/sell
quantity 数量
open_price 开仓价格
stop_price 止损价格
gain_price 预期止盈价格
Comm 手续费/佣金
is_close 是否平仓
资金数据库(money)
free_money 当前可用资金(开仓可用资金) 刚开始为初始资金
balance_date 结算日(每次交易或者每周日 结算一次一条记录)
"""
db_file = "%s/db/%s" % (base_dir,"portfolio_db")
if os.path.isfile(db_file):
cx = sqlite.connect(db_file)
cu = cx.cursor()
return (cu,cx)
else:
cx = sqlite.connect(db_file)
cu = cx.cursor()
# 投资组合表
cu.execute('''
create table portfolio(
id integer primary key,
symbol varchar(20),
trade_date varchar(50),
action varchar(10),
quantity varchar(10),
open_price varchar(20),
stop_price varchar(20),
gain_price varchar(20),
comm varchar(20),
is_close varchar(20) DEFAULT 'no'
)''')
cu.execute('''
create table money(
balance_date varchar(20) primary key,
free_money varchar(20)
)''')
return (cu,cx)
def update_db(trade_data,action):
""" 插入持仓数据 """
(db_cursor,cx) = connect_db()
symbol = trade_data['symbol']
trade_date = trade_data['trade_date']
action = trade_data['action']
quantity = trade_data['quantity']
open_price = trade_data['open_price']
stop_price = trade_data['stop_price']
gain_price = trade_data['gain_price']
comm = trade_data['comm']
sql_cmd = 'insert into portfolio values(NULL,"%s","%s","%s","%s","%s","%s","%s","%s","no")' % (symbol,trade_date,action,quantity,open_price,stop_price,gain_price,comm)
db_cursor.execute(sql_cmd)
cx.commit()
# 更新is_close 平仓标识
if action.lower() == "close":
sql_cmd = "update portfolio set is_close = 'yes' where id=%s" % (trade_data['stock_id'])
db_cursor.execute(sql_cmd)
cx.commit()
cx.close()
# 平仓
if action.lower() == "close":
trade_val = trade_data['gain_money']
# 计算本次交易使用的资金(不管是作多还是做空都是从资金中扣除)
else:
trade_val = float(open_price) * float(quantity) + float(comm)
return trade_val
def trade_update_cash(trade_val,action="open"):
"""
买入或者卖出关联现金帐户(买入减少现金,卖出增加现金)
"""
# 取得当前最后现金
(db_cursor,cx) = connect_db()
sql_cmd ="select free_money from money order by balance_date desc limit 1"
db_cursor.execute(sql_cmd)
rs = db_cursor.fetchone()
current_free_money = rs[0]
# 不管作多还是做空都是从资中扣除
if action.lower() == "close":
new_free_money = float(current_free_money) + trade_val
else:
new_free_money = float(current_free_money) - trade_val
cx.close()
#
#更新现金表
update_cash(new_free_money)
def update_cash(cash_val):
"""
更新初始现金数据
"""
(db_cursor,cx) = connect_db()
try:
sql_cmd = "insert into money values(date('now'),'%s')" % (cash_val)
db_cursor.execute(sql_cmd)
except sqlite.IntegrityError,e:
sql_cmd = "update money set free_money = '%s' where balance_date=date('now')" % (cash_val)
db_cursor.execute(sql_cmd)
except Exception as inst:
print "exception type = %s,Error = %s" % (type(inst),inst)
cx.commit()
cx.close()
def get_all_cash(cash_val):
"""
更新初始现金数据
"""
(db_cursor,cx) = connect_db()
try:
sql_cmd = "select free_money,balance_date from money order by insert into money values(date('now'),'%s')" % (cash_val)
db_cursor.execute(sql_cmd)
except sqlite.IntegrityError,e:
sql_cmd = "update money set free_money = '%s' where balance_date=date('now')" % (cash_val)
db_cursor.execute(sql_cmd)
except Exception as inst:
print "exception type = %s,Error = %s" % (type(inst),inst)
cx.commit()
cx.close()
def stop_atr(symbol,open_price,action):
"""
2倍ATR止损,自动计算
"""
symbol= symbol.upper()
atr_val = get_atr_output(symbol)
per_stock_loss = 1.5*float(atr_val)
per_stock_gain = 3*float(atr_val)
if action.lower() == "buy":
stop_price = float(open_price) - per_stock_loss
gain_price = float(open_price) + per_stock_gain
elif action.lower() == "sell":
stop_price = float(open_price) + per_stock_loss
gain_price = float(open_price) - per_stock_gain
return (per_stock_loss,stop_price,gain_price)
def get_atr_output(symbol,timeframe='day'):
"""
"""
symbol = symbol.upper()
##if DEBUG: print "DEBUG : CURRENT pROCESS SYMBOL=%s" % symbol
#print "DEBUG : CURRENT pROCESS SYMBOL=%s" % symbol
script_dir = "/home/hua.fu/it-manager/stock_tech/GeniusTrader/Scripts"
if timeframe == 'day':
cmd = "cd %s;./display_indicator.pl --last-record --timeframe=%s \
--tight I:ATR %s|grep -P '\[\d+-\d+\-\d+]*.*'" % (script_dir,timeframe,symbol)
if timeframe == 'week':
cmd = "cd %s;./display_indicator.pl --last-record --timeframe=%s \
--tight I:ATR %s|grep -P '\[\d+-\d+]*.*'" % (script_dir,timeframe,symbol)
if timeframe == 'month':
cmd = "cd %s;./display_indicator.pl --last-record --timeframe=%s \
--tight I:ATR %s| grep -P '\[\d+\/\d+]*.*'" % (script_dir,timeframe,symbol)
#print "DEBUG indicator_cmd = %s" % cmd
(status,output) = commands.getstatusoutput(cmd)
if status != 0:
return False
base_point = output.split("=")[1].strip()
return base_point
def auto_order(stock_symbol,open_price,stop_price,all_loss_money,commision,action,stock_num):
"""
根据atr自动计算止损,止盈,订单
"""
trade_data = {}
stock_symbol = stock_symbol.upper()
(per_stock_loss,stop_price_atr,gain_price) = stop_atr(stock_symbol,open_price,action)
if action.lower() =="buy":
buy_tag = "Buy"
sell_tag = "Sell"
elif action.lower() =="sell":
buy_tag = "Sell"
sell_tag = "Buy"
if stop_price:
per_stock_loss = abs(float(open_price) - float(stop_price))
else:
stop_price = stop_price_atr
comm_money = commision * float(stock_num)
stock_loss_money = per_stock_loss * float(stock_num) + comm_money
if stock_loss_money > all_loss_money:
print "===== Warning : stock stop money > global loss money ,please attention !!! ======"
print "stock loss money = %s , global loss money = %s" %( stock_loss_money,all_loss_money)
gain_money = abs(gain_price - float(open_price)) * float(stock_num) - comm_money
trade_data['symbol'] = stock_symbol
#trade_data['trade_date'] = "2012-07-01"
#trade_data['action'] = "buy"
trade_data['quantity'] = stock_num
trade_data['open_price'] = open_price
trade_data['stop_price'] = stop_price
trade_data['gain_price'] = gain_price
trade_data['comm'] = comm_money
sell_stop_tag = "%s Stop" % (sell_tag)
print "%-10s\t%-15s\t%-15s\t%-10s" % ("Symbol","Action.","Price","Num")
print "=" * 60
print "%-10s\t%-15s\t%-15s\t%-10s" % (stock_symbol,buy_tag,open_price,stock_num)
print "%-10s\t%-15s\t%-15s\t%-10s" % (stock_symbol,sell_tag,gain_price,stock_num)
print "%-10s\t%-15s\t%-15s\t%-10s" % (stock_symbol,sell_stop_tag,stop_price,stock_num)
print "+" * 60
print "gain_money=%s\tloss_money=%s" % (gain_money,stock_loss_money)
print "\n"
return trade_data
def show_stock_list():
""" 显示目前持有的股票,然后选择平仓 """
# 输出股票代码等信息
(db_cursor,cx) = connect_db()
sql = "select * from portfolio where is_close='no' and action!='close'"
db_cursor.execute(sql)
rs = db_cursor.fetchall()
if len(rs) == 0:
print "No stock to close ,exit"
sys.exit()
print "%-5s\t%-10s\t%-8s\t%-10s\t%-5s" % ("No.","Symbol","OpenPrice","Action","Num")
print "=" * 65
index = len(rs)
for item in rs:
trade_id = item[0]
symbol = item[1]
trade_date = item[2]
action = item[3]
stock_num = item[4]
open_price = item[5]
print "%-5s\t%-10s\t%-8s\t%-10s\t%-5s" % (trade_id,symbol,open_price,action,stock_num)
#print "%2s)\t%s" % (index+1,"Restart ........")
user_input = None
while(True):
user_input = raw_input("\nPlease Choice Stock Id : ")
if not user_input : continue
#elif user_input.isdigit() and 1 <= int(user_input) <= index:
elif user_input.isdigit() :
break
stock_id = user_input
cx.close()
return stock_id
def close_order(stock_id,commision,trade_date):
"""
平仓
"""
(db_cursor,cx) = connect_db()
sql = "select * from portfolio where id=%s" % (stock_id)
db_cursor.execute(sql)
rs = db_cursor.fetchone()
trade_id = rs[0]
symbol = rs[1]
#trade_date = rs[2]
action = rs[3]
stock_num = rs[4]
open_price = rs[5]
cx.close()
trade_data = {}
symbol = symbol.upper()
user_input = None
while(True):
user_input = raw_input("\nPlease Input Close Price: ")
if not user_input : continue
#elif user_input.isdigit():
break
close_price = user_input
#if action.lower() =="buy": # 卖出,得到现金
# per_stock_gain = float(close_price) - float(open_price) - float(commision)
#elif action.lower() =="sell": # 买回,减少现金
# per_stock_gain = float(open_price) - float(close_price) - float(commision)
per_stock_gain = abs(float(close_price) - float(open_price))
comm_money = float(commision) * float(stock_num)
#gain_money = float(per_stock_gain) * float(stock_num) - comm_money
if action.lower()=="buy": # 作多平仓
gain_money = float(close_price) * float(stock_num) - comm_money
else: # 做空平仓
gain_money = (float(open_price) + per_stock_gain) * float(stock_num) - comm_money
trade_data['symbol'] = symbol
trade_data['trade_date'] = trade_date
trade_data['action'] = "close"
trade_data['quantity'] = stock_num
trade_data['open_price'] = open_price
trade_data['stop_price'] = ""
trade_data['gain_price'] = close_price
trade_data['gain_money'] = gain_money
trade_data['stock_id'] = stock_id
trade_data['comm'] = comm_money
print "\n%-5s\t%-8s\t%-8s\t%-5s\t%-5s" % ("Symbol","OpenPrice","ClosePrice","Num","GainMoney")
print "=" * 65
print "%-5s\t%-8s\t%-8s\t%-5s\t%-5s" % (symbol,open_price,close_price,stock_num,gain_money)
print "+" * 65
user_input = raw_input("update trade db , Are you sure [Y/y]:")
if user_input.lower() == "y":
trade_val = update_db(trade_data,"close")
trade_update_cash(trade_val,"close")
def usage():
print '''
Usage: trade_system.py.py [options...]
Options:
-o/--open_price : buy/sell price
-s/--stop_price : stop lost price
-c/--symbol_code : stock symbol
-n/--stock_num : buy or sell number
-t/--trade_date : trade date [format: 2010-07-01]
-a/--action : buy or sell or close(平仓)
-D/--Deposit : setting init money(输入初始现金)
-d/--debug : run in debug mode
-h/--help : this help info page
Example:
# default is checking all stock which in monitor db
trade_system.py.py -o 10 -s 9.5
'''
def main():
""" main function """
global base_dir,DEBUG
DEBUG = False
base_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
#base_dir = /home/hua.fu/geniustrader
cache_dir = "%s/tmp" % (base_dir)
stock_db = "%s/db/stock_db" % (base_dir)
try:
opts, args = getopt.getopt(sys.argv[1:],'dho:s:c:n:t:a:D:')
except getopt.GetoptError:
usage()
sys.exit()
#各个变量保存
open_price = None #开仓价格
stop_price = None #止损价格
stock_symbol = None
stock_num = 200
trade_date = datetime.datetime.now().strftime("%Y-%m-%d")
action = "buy"
init_cash = None
for opt, arg in opts:
if opt in ('-h', '--help'):
usage()
sys.exit()
elif opt == '-s':
stop_price = arg
elif opt == '-o':
open_price = arg
elif opt == '-c':
stock_symbol = arg
elif opt == '-n':
stock_num = arg
elif opt == '-t':
trade_date = arg
elif opt == '-a':
action = arg
elif opt == '-D':
init_cash = arg
elif opt == '-d':
DEBUG = True
# 读取配置
trade_config = load_config()
#pprint(trade_config)
stop_loss = trade_config['stop_loss']
stop_gain = trade_config['stock_stop_gain']
all_money = trade_config['int_all_money']
commision = trade_config['commision']
if init_cash:
update_cash(init_cash)
print "Setting Init Cash Complete"
sys.exit()
if action.lower() == "close":
stock_id = show_stock_list()
close_order(stock_id,commision,trade_date)
sys.exit()
if not open_price:
usage()
sys.exit()
# 总资金的2%止损
loss_money = float(all_money) * float(stop_loss)
#if stock_num:
# trade_data = auto_order(stock_symbol,open_price,stop_price,loss_money,commision,stock_num)
#else:
# trade_data = auto_order(stock_symbol,open_price,stop_price,loss_money,commision)
trade_data = auto_order(stock_symbol,open_price,stop_price,loss_money,commision,action,stock_num)
trade_data['trade_date'] = trade_date
trade_data['action'] = action
pprint(trade_data)
user_input = raw_input("update trade db , Are you sure [Y/y]:")
if user_input.lower() == "y":
trade_val = update_db(trade_data,action)
trade_update_cash(trade_val)
if __name__ == "__main__":
main()
|
3dfxsoftware/cbss-addons
|
refs/heads/master
|
imsar_ui_customizations/imsar_ui.py
|
1
|
# -*- coding: utf-8 -*-
##############################################################################
#
# IMSAR LLC
# Author: Ben Olsen
# Copyright (C) 2014
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class mrp_bom_custom(osv.Model):
_inherit = "mrp.bom"
_columns = {
'type': fields.selection([('normal','Assembly BoM'),('phantom','Package BoM')], 'BoM Type', required=True,
help= "If a by-product is used in several products, it can be useful to create its own BoM. "\
"Though if you don't want separated production orders for this by-product, select Package as BoM type. "\
"If a Package BoM is used for a root product, it will be sold and shipped as a set of components, instead of being produced."),
}
|
66eli77/fle-home
|
refs/heads/master
|
fle_site/apps/about/migrations/0002_auto__add_organizationtype__add_supportingorganization.py
|
5
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'OrganizationType'
db.create_table('about_organizationtype', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=150)),
))
db.send_create_signal('about', ['OrganizationType'])
# Adding model 'SupportingOrganization'
db.create_table('about_supportingorganization', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('picture', self.gf('django.db.models.fields.files.ImageField')(default=None, max_length=100, blank=True)),
('description', self.gf('markupfield.fields.MarkupField')(rendered_field=True)),
('organization_type', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['about.OrganizationType'], blank=True)),
('description_markup_type', self.gf('django.db.models.fields.CharField')(default='html', max_length=30)),
('_description_rendered', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('about', ['SupportingOrganization'])
def backwards(self, orm):
# Deleting model 'OrganizationType'
db.delete_table('about_organizationtype')
# Deleting model 'SupportingOrganization'
db.delete_table('about_supportingorganization')
models = {
'about.boardmember': {
'Meta': {'object_name': 'BoardMember', '_ormbases': ['about.Person']},
'person_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['about.Person']", 'unique': 'True', 'primary_key': 'True'}),
'picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'})
},
'about.internship': {
'Meta': {'object_name': 'Internship'},
'_description_rendered': ('django.db.models.fields.TextField', [], {}),
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'description': ('markupfield.fields.MarkupField', [], {'rendered_field': 'True'}),
'description_markup_type': ('django.db.models.fields.CharField', [], {'default': "'html'", 'max_length': '30'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'about.organizationtype': {
'Meta': {'object_name': 'OrganizationType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
'about.person': {
'Meta': {'object_name': 'Person'},
'_bio_rendered': ('django.db.models.fields.TextField', [], {}),
'bio': ('markupfield.fields.MarkupField', [], {'rendered_field': 'True'}),
'bio_markup_type': ('django.db.models.fields.CharField', [], {'default': "'html'", 'max_length': '30'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'about.pressarticle': {
'Meta': {'object_name': 'PressArticle'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['about.PressLogo']", 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'about.presslogo': {
'Meta': {'object_name': 'PressLogo'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
'about.supportingorganization': {
'Meta': {'object_name': 'SupportingOrganization'},
'_description_rendered': ('django.db.models.fields.TextField', [], {}),
'description': ('markupfield.fields.MarkupField', [], {'rendered_field': 'True'}),
'description_markup_type': ('django.db.models.fields.CharField', [], {'default': "'html'", 'max_length': '30'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization_type': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['about.OrganizationType']", 'blank': 'True'}),
'picture': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'about.teammember': {
'Meta': {'object_name': 'TeamMember', '_ormbases': ['about.Person']},
'person_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['about.Person']", 'unique': 'True', 'primary_key': 'True'}),
'picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'})
}
}
complete_apps = ['about']
|
mchristopher/PokemonGo-DesktopMap
|
refs/heads/master
|
app/pylibs/shared/pgoapi/protos/POGOProtos/Networking/Requests/Messages/LevelUpRewardsMessage_pb2.py
|
16
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Networking/Requests/Messages/LevelUpRewardsMessage.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Networking/Requests/Messages/LevelUpRewardsMessage.proto',
package='POGOProtos.Networking.Requests.Messages',
syntax='proto3',
serialized_pb=_b('\nCPOGOProtos/Networking/Requests/Messages/LevelUpRewardsMessage.proto\x12\'POGOProtos.Networking.Requests.Messages\"&\n\x15LevelUpRewardsMessage\x12\r\n\x05level\x18\x01 \x01(\x05\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_LEVELUPREWARDSMESSAGE = _descriptor.Descriptor(
name='LevelUpRewardsMessage',
full_name='POGOProtos.Networking.Requests.Messages.LevelUpRewardsMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='level', full_name='POGOProtos.Networking.Requests.Messages.LevelUpRewardsMessage.level', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=112,
serialized_end=150,
)
DESCRIPTOR.message_types_by_name['LevelUpRewardsMessage'] = _LEVELUPREWARDSMESSAGE
LevelUpRewardsMessage = _reflection.GeneratedProtocolMessageType('LevelUpRewardsMessage', (_message.Message,), dict(
DESCRIPTOR = _LEVELUPREWARDSMESSAGE,
__module__ = 'POGOProtos.Networking.Requests.Messages.LevelUpRewardsMessage_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Networking.Requests.Messages.LevelUpRewardsMessage)
))
_sym_db.RegisterMessage(LevelUpRewardsMessage)
# @@protoc_insertion_point(module_scope)
|
pnkfelix/tamarin-redux
|
refs/heads/master
|
test/util/which.py
|
8
|
#!/usr/bin/env python
# Copyright (c) 2002-2005 ActiveState Corp.
# See LICENSE.txt for license details.
# Author:
# Trent Mick (TrentM@ActiveState.com)
# Home:
# http://trentm.com/projects/which/
r"""Find the full path to commands.
which(command, path=None, verbose=0, exts=None)
Return the full path to the first match of the given command on the
path.
whichall(command, path=None, verbose=0, exts=None)
Return a list of full paths to all matches of the given command on
the path.
whichgen(command, path=None, verbose=0, exts=None)
Return a generator which will yield full paths to all matches of the
given command on the path.
By default the PATH environment variable is searched (as well as, on
Windows, the AppPaths key in the registry), but a specific 'path' list
to search may be specified as well. On Windows, the PATHEXT environment
variable is applied as appropriate.
If "verbose" is true then a tuple of the form
(<fullpath>, <matched-where-description>)
is returned for each match. The latter element is a textual description
of where the match was found. For example:
from PATH element 0
from HKLM\SOFTWARE\...\perl.exe
"""
_cmdlnUsage = """
Show the full path of commands.
Usage:
which [<options>...] [<command-name>...]
Options:
-h, --help Print this help and exit.
-V, --version Print the version info and exit.
-a, --all Print *all* matching paths.
-v, --verbose Print out how matches were located and
show near misses on stderr.
-q, --quiet Just print out matches. I.e., do not print out
near misses.
-p <altpath>, --path=<altpath>
An alternative path (list of directories) may
be specified for searching.
-e <exts>, --exts=<exts>
Specify a list of extensions to consider instead
of the usual list (';'-separate list, Windows
only).
Show the full path to the program that would be run for each given
command name, if any. Which, like GNU's which, returns the number of
failed arguments, or -1 when no <command-name> was given.
Near misses include duplicates, non-regular files and (on Un*x)
files without executable access.
"""
__revision__ = "$Id: which.py 430 2005-08-20 03:11:58Z trentm $"
__version_info__ = (1, 1, 0)
__version__ = '.'.join(map(str, __version_info__))
import os
import sys
import getopt
import stat
#---- exceptions
class WhichError(Exception):
pass
#---- internal support stuff
def _getRegisteredExecutable(exeName):
"""Windows allow application paths to be registered in the registry."""
registered = None
if sys.platform.startswith('win'):
if os.path.splitext(exeName)[1].lower() != '.exe':
exeName += '.exe'
if sys.version_info[0] < 3:
import _winreg as winreg
else:
import winreg
try:
key = "SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\" +\
exeName
value = winreg.QueryValue(winreg.HKEY_LOCAL_MACHINE, key)
registered = (value, "from HKLM\\"+key)
except winreg.error:
pass
if registered and not os.path.exists(registered[0]):
registered = None
return registered
def _samefile(fname1, fname2):
if sys.platform.startswith('win'):
return ( os.path.normpath(os.path.normcase(fname1)) ==\
os.path.normpath(os.path.normcase(fname2)) )
else:
return os.path.samefile(fname1, fname2)
def _cull(potential, matches, verbose=0):
"""Cull inappropriate matches. Possible reasons:
- a duplicate of a previous match
- not a disk file
- not executable (non-Windows)
If 'potential' is approved it is returned and added to 'matches'.
Otherwise, None is returned.
"""
for match in matches: # don't yield duplicates
if _samefile(potential[0], match[0]):
if verbose:
sys.stderr.write("duplicate: %s (%s)\n" % potential)
return None
else:
if not stat.S_ISREG(os.stat(potential[0]).st_mode):
if verbose:
sys.stderr.write("not a regular file: %s (%s)\n" % potential)
elif not os.access(potential[0], os.X_OK):
if verbose:
sys.stderr.write("no executable access: %s (%s)\n"\
% potential)
else:
matches.append(potential)
return potential
#---- module API
def whichgen(command, path=None, verbose=0, exts=None):
"""Return a generator of full paths to the given command.
"command" is a the name of the executable to search for.
"path" is an optional alternate path list to search. The default it
to use the PATH environment variable.
"verbose", if true, will cause a 2-tuple to be returned for each
match. The second element is a textual description of where the
match was found.
"exts" optionally allows one to specify a list of extensions to use
instead of the standard list for this system. This can
effectively be used as an optimization to, for example, avoid
stat's of "foo.vbs" when searching for "foo" and you know it is
not a VisualBasic script but ".vbs" is on PATHEXT. This option
is only supported on Windows.
This method returns a generator which yields either full paths to
the given command or, if verbose, tuples of the form (<path to
command>, <where path found>).
"""
matches = []
if path is None:
usingGivenPath = 0
path = os.environ.get("PATH", "").split(os.pathsep)
if sys.platform.startswith("win"):
path.insert(0, os.curdir) # implied by Windows shell
else:
usingGivenPath = 1
# Windows has the concept of a list of extensions (PATHEXT env var).
if sys.platform.startswith("win"):
if exts is None:
exts = os.environ.get("PATHEXT", "").split(os.pathsep)
# If '.exe' is not in exts then obviously this is Win9x and
# or a bogus PATHEXT, then use a reasonable default.
for ext in exts:
if ext.lower() == ".exe":
break
else:
exts = ['.COM', '.EXE', '.BAT']
elif not isinstance(exts, list):
raise TypeError("'exts' argument must be a list or None")
else:
if exts is not None:
raise WhichError("'exts' argument is not supported on "\
"platform '%s'" % sys.platform)
exts = []
# File name cannot have path separators because PATH lookup does not
# work that way.
if os.sep in command or os.altsep and os.altsep in command:
pass
else:
for i in range(len(path)):
dirName = path[i]
# On windows the dirName *could* be quoted, drop the quotes
if sys.platform.startswith("win") and len(dirName) >= 2\
and dirName[0] == '"' and dirName[-1] == '"':
dirName = dirName[1:-1]
for ext in ['']+exts:
absName = os.path.abspath(
os.path.normpath(os.path.join(dirName, command+ext)))
if os.path.isfile(absName):
if usingGivenPath:
fromWhere = "from given path element %d" % i
elif not sys.platform.startswith("win"):
fromWhere = "from PATH element %d" % i
elif i == 0:
fromWhere = "from current directory"
else:
fromWhere = "from PATH element %d" % (i-1)
match = _cull((absName, fromWhere), matches, verbose)
if match:
if verbose:
yield match
else:
yield match[0]
match = _getRegisteredExecutable(command)
if match is not None:
match = _cull(match, matches, verbose)
if match:
if verbose:
yield match
else:
yield match[0]
def which(command, path=None, verbose=0, exts=None):
"""Return the full path to the first match of the given command on
the path.
"command" is a the name of the executable to search for.
"path" is an optional alternate path list to search. The default it
to use the PATH environment variable.
"verbose", if true, will cause a 2-tuple to be returned. The second
element is a textual description of where the match was found.
"exts" optionally allows one to specify a list of extensions to use
instead of the standard list for this system. This can
effectively be used as an optimization to, for example, avoid
stat's of "foo.vbs" when searching for "foo" and you know it is
not a VisualBasic script but ".vbs" is on PATHEXT. This option
is only supported on Windows.
If no match is found for the command, a WhichError is raised.
"""
try:
if sys.version_info[0] < 3:
match = whichgen(command, path, verbose, exts).next()
else:
match = next(whichgen(command, path, verbose, exts))
except StopIteration:
raise WhichError("Could not find '%s' on the path." % command)
return match
def whichall(command, path=None, verbose=0, exts=None):
"""Return a list of full paths to all matches of the given command
on the path.
"command" is a the name of the executable to search for.
"path" is an optional alternate path list to search. The default it
to use the PATH environment variable.
"verbose", if true, will cause a 2-tuple to be returned for each
match. The second element is a textual description of where the
match was found.
"exts" optionally allows one to specify a list of extensions to use
instead of the standard list for this system. This can
effectively be used as an optimization to, for example, avoid
stat's of "foo.vbs" when searching for "foo" and you know it is
not a VisualBasic script but ".vbs" is on PATHEXT. This option
is only supported on Windows.
"""
return list( whichgen(command, path, verbose, exts) )
#---- mainline
def main(argv):
all = 0
verbose = 0
altpath = None
exts = None
try:
optlist, args = getopt.getopt(argv[1:], 'haVvqp:e:',
['help', 'all', 'version', 'verbose', 'quiet', 'path=', 'exts='])
except getopt.GetoptError:
sys.stderr.write("which: error: %s. Your invocation was: %s\n"\
% (sys.exc_info(), argv))
sys.stderr.write("Try 'which --help'.\n")
return 1
for opt, optarg in optlist:
if opt in ('-h', '--help'):
print(_cmdlnUsage)
return 0
elif opt in ('-V', '--version'):
print("which %s" % __version__)
return 0
elif opt in ('-a', '--all'):
all = 1
elif opt in ('-v', '--verbose'):
verbose = 1
elif opt in ('-q', '--quiet'):
verbose = 0
elif opt in ('-p', '--path'):
if optarg:
altpath = optarg.split(os.pathsep)
else:
altpath = []
elif opt in ('-e', '--exts'):
if optarg:
exts = optarg.split(os.pathsep)
else:
exts = []
if len(args) == 0:
return -1
failures = 0
for arg in args:
#print("debug: search for %r" % arg)
nmatches = 0
for match in whichgen(arg, path=altpath, verbose=verbose, exts=exts):
if verbose:
print("%s (%s)" % match)
else:
print(match)
nmatches += 1
if not all:
break
if not nmatches:
failures += 1
return failures
if __name__ == "__main__":
sys.exit( main(sys.argv) )
|
NullSoldier/django
|
refs/heads/master
|
tests/migrations/test_migrations_no_default/__init__.py
|
12133432
| |
berkeley-cocosci/Wallace
|
refs/heads/master
|
tests/__init__.py
|
12133432
| |
fujita/ryu
|
refs/heads/master
|
ryu/services/protocols/bgp/info_base/vrf4.py
|
52
|
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Defines data types and models required specifically for VRF (for IPv4)
support. Represents data structures for VRF not VPN/global.
(Inside VRF you have IPv4 prefixes and inside VPN you have VPNv4 prefixes)
"""
import logging
from ryu.lib.packet.bgp import RF_IPv4_UC
from ryu.lib.packet.bgp import RF_IPv4_VPN
from ryu.lib.packet.bgp import IPAddrPrefix
from ryu.lib.packet.bgp import LabelledVPNIPAddrPrefix
from ryu.services.protocols.bgp.info_base.vpnv4 import Vpnv4Path
from ryu.services.protocols.bgp.info_base.vrf import VrfDest
from ryu.services.protocols.bgp.info_base.vrf import VrfNlriImportMap
from ryu.services.protocols.bgp.info_base.vrf import VrfPath
from ryu.services.protocols.bgp.info_base.vrf import VrfTable
LOG = logging.getLogger('bgpspeaker.info_base.vrf4')
class Vrf4Path(VrfPath):
"""Represents a way of reaching an IP destination with a VPN."""
ROUTE_FAMILY = RF_IPv4_UC
VPN_PATH_CLASS = Vpnv4Path
VPN_NLRI_CLASS = LabelledVPNIPAddrPrefix
class Vrf4Dest(VrfDest):
ROUTE_FAMILY = RF_IPv4_UC
class Vrf4Table(VrfTable):
"""Virtual Routing and Forwarding information base for IPv4."""
ROUTE_FAMILY = RF_IPv4_UC
VPN_ROUTE_FAMILY = RF_IPv4_VPN
NLRI_CLASS = IPAddrPrefix
VRF_PATH_CLASS = Vrf4Path
VRF_DEST_CLASS = Vrf4Dest
class Vrf4NlriImportMap(VrfNlriImportMap):
VRF_PATH_CLASS = Vrf4Path
NLRI_CLASS = IPAddrPrefix
|
campagnola/acq4
|
refs/heads/develop
|
acq4/modules/TaskRunner/analysisModules/Uncaging/__init__.py
|
3
|
from __future__ import print_function
from .interface import *
|
mattseymour/django
|
refs/heads/master
|
tests/indexes/models.py
|
15
|
from django.db import connection, models
class CurrentTranslation(models.ForeignObject):
"""
Creates virtual relation to the translation with model cache enabled.
"""
# Avoid validation
requires_unique_target = False
def __init__(self, to, on_delete, from_fields, to_fields, **kwargs):
# Disable reverse relation
kwargs['related_name'] = '+'
# Set unique to enable model cache.
kwargs['unique'] = True
super(CurrentTranslation, self).__init__(to, on_delete, from_fields, to_fields, **kwargs)
class ArticleTranslation(models.Model):
article = models.ForeignKey('indexes.Article', models.CASCADE)
article_no_constraint = models.ForeignKey('indexes.Article', models.CASCADE, db_constraint=False, related_name='+')
language = models.CharField(max_length=10, unique=True)
content = models.TextField()
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
# Add virtual relation to the ArticleTranslation model.
translation = CurrentTranslation(ArticleTranslation, models.CASCADE, ['id'], ['article'])
class Meta:
index_together = [
["headline", "pub_date"],
]
# Model for index_together being used only with single list
class IndexTogetherSingleList(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
class Meta:
index_together = ["headline", "pub_date"]
# Indexing a TextField on Oracle or MySQL results in index creation error.
if connection.vendor == 'postgresql':
class IndexedArticle(models.Model):
headline = models.CharField(max_length=100, db_index=True)
body = models.TextField(db_index=True)
slug = models.CharField(max_length=40, unique=True)
|
XiaosongWei/chromium-crosswalk
|
refs/heads/master
|
native_client_sdk/src/tools/tests/oshelpers_test.py
|
159
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
import zipfile
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PARENT_DIR = os.path.dirname(SCRIPT_DIR)
sys.path.append(PARENT_DIR)
import oshelpers
class RunError(subprocess.CalledProcessError):
def __init__(self, retcode, command, output, error_output):
subprocess.CalledProcessError.__init__(self, retcode, command)
self.output = output
self.error_output = error_output
def __str__(self):
msg = subprocess.CalledProcessError.__str__(self)
msg += '.\nstdout: """%s"""' % (self.output,)
msg += '.\nstderr: """%s"""' % (self.error_output,)
return msg
def RunCmd(cmd, args, cwd, env=None):
env = env or os.environ
command = [sys.executable, 'oshelpers.py', cmd] + args
process = subprocess.Popen(stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
args=command,
cwd=cwd,
env=env)
output, error_output = process.communicate()
retcode = process.returncode
if retcode:
raise RunError(retcode, command, output, error_output)
return output, error_output
class TestZip(unittest.TestCase):
def setUp(self):
# make zipname -> "testFooBar.zip"
self.zipname = self.id().split('.')[-1] + '.zip'
self.zipfile = None
self.tempdir = tempfile.mkdtemp()
shutil.copy(os.path.join(PARENT_DIR, 'oshelpers.py'),
self.tempdir)
def tearDown(self):
if self.zipfile:
self.zipfile.close()
shutil.rmtree(self.tempdir)
def GetTempPath(self, basename):
return os.path.join(self.tempdir, basename)
def MakeFile(self, rel_path, size):
with open(os.path.join(self.tempdir, rel_path), 'wb') as f:
f.write('0' * size)
return rel_path
def RunZip(self, *args):
return RunCmd('zip', list(args), cwd=self.tempdir)
def OpenZipFile(self):
self.zipfile = zipfile.ZipFile(self.GetTempPath(self.zipname), 'r')
def CloseZipFile(self):
self.zipfile.close()
self.zipfile = None
def GetZipInfo(self, path):
return self.zipfile.getinfo(oshelpers.OSMakeZipPath(path))
def testNothingToDo(self):
self.assertRaises(subprocess.CalledProcessError, self.RunZip,
self.zipname, 'nonexistent_file')
self.assertFalse(os.path.exists(self.zipname))
def testAddSomeFiles(self):
file1 = self.MakeFile('file1', 1024)
file2 = self.MakeFile('file2', 3354)
self.RunZip(self.zipname, file1, file2)
self.OpenZipFile()
self.assertEqual(len(self.zipfile.namelist()), 2)
self.assertEqual(self.GetZipInfo(file1).file_size, 1024)
self.assertEqual(self.GetZipInfo(file2).file_size, 3354)
# make sure files are added in order
self.assertEqual(self.zipfile.namelist()[0], file1)
def testAddFilesWithGlob(self):
self.MakeFile('file1', 1024)
self.MakeFile('file2', 3354)
self.RunZip(self.zipname, 'file*')
self.OpenZipFile()
self.assertEqual(len(self.zipfile.namelist()), 2)
def testAddDir(self):
os.mkdir(self.GetTempPath('dir1'))
self.RunZip(self.zipname, 'dir1')
self.OpenZipFile()
self.assertEqual(len(self.zipfile.namelist()), 1)
self.assertRaises(KeyError, self.zipfile.getinfo, 'dir1')
self.zipfile.getinfo('dir1/')
def testAddRecursive(self):
os.mkdir(self.GetTempPath('dir1'))
self.MakeFile(os.path.join('dir1', 'file1'), 256)
os.mkdir(self.GetTempPath(os.path.join('dir1', 'dir2')))
self.MakeFile(os.path.join('dir1', 'dir2', 'file2'), 1234)
self.RunZip(self.zipname, '-r', 'dir1')
self.OpenZipFile()
self.assertEqual(len(self.zipfile.namelist()), 4)
def testUpdate(self):
file1 = self.MakeFile('file1', 1223)
self.RunZip(self.zipname, file1)
self.OpenZipFile()
self.assertEqual(self.GetZipInfo(file1).file_size, 1223)
file1 = self.MakeFile('file1', 2334)
self.RunZip(self.zipname, file1)
self.OpenZipFile()
self.assertEqual(len(self.zipfile.namelist()), 1)
self.assertEqual(self.GetZipInfo(file1).file_size, 2334)
def testUpdateOneFileOutOfMany(self):
file1 = self.MakeFile('file1', 128)
file2 = self.MakeFile('file2', 256)
file3 = self.MakeFile('file3', 512)
file4 = self.MakeFile('file4', 1024)
self.RunZip(self.zipname, file1, file2, file3, file4)
self.OpenZipFile()
self.assertEqual(len(self.zipfile.namelist()), 4)
self.CloseZipFile()
file3 = self.MakeFile('file3', 768)
self.RunZip(self.zipname, file3)
self.OpenZipFile()
self.assertEqual(len(self.zipfile.namelist()), 4)
self.assertEqual(self.zipfile.namelist()[0], file1)
self.assertEqual(self.GetZipInfo(file1).file_size, 128)
self.assertEqual(self.zipfile.namelist()[1], file2)
self.assertEqual(self.GetZipInfo(file2).file_size, 256)
self.assertEqual(self.zipfile.namelist()[2], file3)
self.assertEqual(self.GetZipInfo(file3).file_size, 768)
self.assertEqual(self.zipfile.namelist()[3], file4)
self.assertEqual(self.GetZipInfo(file4).file_size, 1024)
def testUpdateSubdirectory(self):
os.mkdir(self.GetTempPath('dir1'))
file1 = self.MakeFile(os.path.join('dir1', 'file1'), 256)
os.mkdir(self.GetTempPath(os.path.join('dir1', 'dir2')))
self.MakeFile(os.path.join('dir1', 'dir2', 'file2'), 1234)
self.RunZip(self.zipname, '-r', 'dir1')
self.OpenZipFile()
self.assertEqual(len(self.zipfile.namelist()), 4)
self.assertEqual(self.GetZipInfo(file1).file_size, 256)
self.CloseZipFile()
self.MakeFile(file1, 2560)
self.RunZip(self.zipname, file1)
self.OpenZipFile()
self.assertEqual(len(self.zipfile.namelist()), 4)
self.assertEqual(self.GetZipInfo(file1).file_size, 2560)
def testAppend(self):
file1 = self.MakeFile('file1', 128)
file2 = self.MakeFile('file2', 256)
self.RunZip(self.zipname, file1, file2)
self.OpenZipFile()
self.assertEqual(len(self.zipfile.namelist()), 2)
self.CloseZipFile()
file3 = self.MakeFile('file3', 768)
self.RunZip(self.zipname, file3)
self.OpenZipFile()
self.assertEqual(len(self.zipfile.namelist()), 3)
class TestWhich(unittest.TestCase):
def setUp(self):
self.path_list = []
self.tempdir = tempfile.mkdtemp()
shutil.copy(os.path.join(PARENT_DIR, 'oshelpers.py'),
self.tempdir)
def tearDown(self):
shutil.rmtree(self.tempdir)
def Mkdir(self, path):
os.mkdir(os.path.join(self.tempdir, path))
def MakeExecutableFile(self, *path_components):
path = os.path.join(self.tempdir, *path_components)
if sys.platform == 'win32':
path += '.exe'
with open(path, 'w') as f:
f.write('')
os.chmod(path, 0755)
return path
def RunWhich(self, *args):
paths = os.pathsep.join(os.path.join(self.tempdir, p)
for p in self.path_list)
env = {'PATH': paths}
return RunCmd('which', list(args), cwd=self.tempdir, env=env)
def testNothing(self):
self.assertRaises(RunError, self.RunWhich, 'foo')
def testBasic(self):
self.Mkdir('bin')
bin_cp = self.MakeExecutableFile('bin', 'cp')
cp = os.path.basename(bin_cp)
self.path_list.append('bin')
output, _ = self.RunWhich(cp)
self.assertTrue(os.path.join(self.tempdir, 'bin', cp) in output)
def testMulti(self):
self.Mkdir('bin')
bin_cp = self.MakeExecutableFile('bin', 'cp')
bin_ls = self.MakeExecutableFile('bin', 'ls')
cp = os.path.basename(bin_cp)
ls = os.path.basename(bin_ls)
self.path_list.append('bin')
output, _ = self.RunWhich(cp, ls)
self.assertTrue(os.path.join(self.tempdir, 'bin', cp) in output)
self.assertTrue(os.path.join(self.tempdir, 'bin', ls) in output)
def testNonPath(self):
self.Mkdir('bin')
bin_cp = self.MakeExecutableFile('bin', 'cp')
cp = os.path.basename(bin_cp)
# Note, "bin" not added to PATH.
output, _ = self.RunWhich(bin_cp)
self.assertTrue(os.path.join('bin', cp) in output)
if __name__ == '__main__':
unittest.main()
|
Trust-Code/odoo8.0-addons
|
refs/heads/master
|
project_scrum/report/__init__.py
|
6
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sprint_burndown_story_point
import sprint_velocity
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
NL66278/OCB
|
refs/heads/8.0
|
addons/email_template/email_template.py
|
22
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009 Sharoon Thomas
# Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import base64
import datetime
import dateutil.relativedelta as relativedelta
import logging
import lxml
import urlparse
import openerp
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
from openerp import tools, api
from openerp.tools.translate import _
from urllib import urlencode, quote as quote
_logger = logging.getLogger(__name__)
def format_tz(pool, cr, uid, dt, tz=False, format=False, context=None):
context = dict(context or {})
if tz:
context['tz'] = tz or pool.get('res.users').read(cr, SUPERUSER_ID, uid, ['tz'])['tz'] or "UTC"
timestamp = datetime.datetime.strptime(dt, tools.DEFAULT_SERVER_DATETIME_FORMAT)
ts = fields.datetime.context_timestamp(cr, uid, timestamp, context)
if format:
return ts.strftime(format)
else:
lang = context.get("lang")
lang_params = {}
if lang:
res_lang = pool.get('res.lang')
ids = res_lang.search(cr, uid, [("code", "=", lang)])
if ids:
lang_params = res_lang.read(cr, uid, ids[0], ["date_format", "time_format"])
format_date = lang_params.get("date_format", '%B-%d-%Y')
format_time = lang_params.get("time_format", '%I-%M %p')
fdate = ts.strftime(format_date)
ftime = ts.strftime(format_time)
return "%s %s (%s)" % (fdate, ftime, tz)
try:
# We use a jinja2 sandboxed environment to render mako templates.
# Note that the rendering does not cover all the mako syntax, in particular
# arbitrary Python statements are not accepted, and not all expressions are
# allowed: only "public" attributes (not starting with '_') of objects may
# be accessed.
# This is done on purpose: it prevents incidental or malicious execution of
# Python code that may break the security of the server.
from jinja2.sandbox import SandboxedEnvironment
mako_template_env = SandboxedEnvironment(
block_start_string="<%",
block_end_string="%>",
variable_start_string="${",
variable_end_string="}",
comment_start_string="<%doc>",
comment_end_string="</%doc>",
line_statement_prefix="%",
line_comment_prefix="##",
trim_blocks=True, # do not output newline after blocks
autoescape=True, # XML/HTML automatic escaping
)
mako_template_env.globals.update({
'str': str,
'quote': quote,
'urlencode': urlencode,
'datetime': datetime,
'len': len,
'abs': abs,
'min': min,
'max': max,
'sum': sum,
'filter': filter,
'reduce': reduce,
'map': map,
'round': round,
# dateutil.relativedelta is an old-style class and cannot be directly
# instanciated wihtin a jinja2 expression, so a lambda "proxy" is
# is needed, apparently.
'relativedelta': lambda *a, **kw : relativedelta.relativedelta(*a, **kw),
})
except ImportError:
_logger.warning("jinja2 not available, templating features will not work!")
class email_template(osv.osv):
"Templates for sending email"
_name = "email.template"
_description = 'Email Templates'
_order = 'name'
def default_get(self, cr, uid, fields, context=None):
res = super(email_template, self).default_get(cr, uid, fields, context)
if res.get('model'):
res['model_id'] = self.pool['ir.model'].search(cr, uid, [('model', '=', res.pop('model'))], context=context)[0]
return res
def _replace_local_links(self, cr, uid, html, context=None):
""" Post-processing of html content to replace local links to absolute
links, using web.base.url as base url. """
if not html:
return html
# form a tree
root = lxml.html.fromstring(html)
if not len(root) and root.text is None and root.tail is None:
html = '<div>%s</div>' % html
root = lxml.html.fromstring(html)
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
(base_scheme, base_netloc, bpath, bparams, bquery, bfragment) = urlparse.urlparse(base_url)
def _process_link(url):
new_url = url
(scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url)
if not scheme and not netloc:
new_url = urlparse.urlunparse((base_scheme, base_netloc, path, params, query, fragment))
return new_url
# check all nodes, replace :
# - img src -> check URL
# - a href -> check URL
for node in root.iter():
if node.tag == 'a' and node.get('href'):
node.set('href', _process_link(node.get('href')))
elif node.tag == 'img' and not node.get('src', 'data').startswith('data'):
node.set('src', _process_link(node.get('src')))
html = lxml.html.tostring(root, pretty_print=False, method='html')
# this is ugly, but lxml/etree tostring want to put everything in a 'div' that breaks the editor -> remove that
if html.startswith('<div>') and html.endswith('</div>'):
html = html[5:-6]
return html
def render_post_process(self, cr, uid, html, context=None):
html = self._replace_local_links(cr, uid, html, context=context)
return html
def render_template_batch(self, cr, uid, template, model, res_ids, context=None, post_process=False):
"""Render the given template text, replace mako expressions ``${expr}``
with the result of evaluating these expressions with
an evaluation context containing:
* ``user``: browse_record of the current user
* ``object``: browse_record of the document record this mail is
related to
* ``context``: the context passed to the mail composition wizard
:param str template: the template text to render
:param str model: model name of the document record this mail is related to.
:param int res_ids: list of ids of document records those mails are related to.
"""
if context is None:
context = {}
res_ids = filter(None, res_ids) # to avoid browsing [None] below
results = dict.fromkeys(res_ids, u"")
# try to load the template
try:
template = mako_template_env.from_string(tools.ustr(template))
except Exception:
_logger.exception("Failed to load template %r", template)
return results
# prepare template variables
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
records = self.pool[model].browse(cr, uid, res_ids, context=context) or [None]
variables = {
'format_tz': lambda dt, tz=False, format=False: format_tz(self.pool, cr, uid, dt, tz, format, context),
'user': user,
'ctx': context, # context kw would clash with mako internals
}
for record in records:
res_id = record.id if record else None
variables['object'] = record
try:
render_result = template.render(variables)
except Exception:
_logger.exception("Failed to render template %r using values %r" % (template, variables))
render_result = u""
if render_result == u"False":
render_result = u""
results[res_id] = render_result
if post_process:
for res_id, result in results.iteritems():
results[res_id] = self.render_post_process(cr, uid, result, context=context)
return results
def get_email_template_batch(self, cr, uid, template_id=False, res_ids=None, context=None):
if context is None:
context = {}
if res_ids is None:
res_ids = [None]
results = dict.fromkeys(res_ids, False)
if not template_id:
return results
template = self.browse(cr, uid, template_id, context)
langs = self.render_template_batch(cr, uid, template.lang, template.model, res_ids, context)
for res_id, lang in langs.iteritems():
if lang:
# Use translated template if necessary
ctx = context.copy()
ctx['lang'] = lang
template = self.browse(cr, uid, template.id, ctx)
else:
template = self.browse(cr, uid, int(template_id), context)
results[res_id] = template
return results
def onchange_model_id(self, cr, uid, ids, model_id, context=None):
mod_name = False
if model_id:
mod_name = self.pool.get('ir.model').browse(cr, uid, model_id, context).model
return {'value': {'model': mod_name}}
_columns = {
'name': fields.char('Name'),
'model_id': fields.many2one('ir.model', 'Applies to', help="The kind of document with with this template can be used"),
'model': fields.related('model_id', 'model', type='char', string='Related Document Model',
select=True, store=True, readonly=True),
'lang': fields.char('Language',
help="Optional translation language (ISO code) to select when sending out an email. "
"If not set, the english version will be used. "
"This should usually be a placeholder expression "
"that provides the appropriate language, e.g. "
"${object.partner_id.lang}.",
placeholder="${object.partner_id.lang}"),
'user_signature': fields.boolean('Add Signature',
help="If checked, the user's signature will be appended to the text version "
"of the message"),
'subject': fields.char('Subject', translate=True, help="Subject (placeholders may be used here)",),
'email_from': fields.char('From',
help="Sender address (placeholders may be used here). If not set, the default "
"value will be the author's email alias if configured, or email address."),
'use_default_to': fields.boolean(
'Default recipients',
help="Default recipients of the record:\n"
"- partner (using id on a partner or the partner_id field) OR\n"
"- email (using email_from or email field)"),
'email_to': fields.char('To (Emails)', help="Comma-separated recipient addresses (placeholders may be used here)"),
'partner_to': fields.char('To (Partners)',
help="Comma-separated ids of recipient partners (placeholders may be used here)",
oldname='email_recipients'),
'email_cc': fields.char('Cc', help="Carbon copy recipients (placeholders may be used here)"),
'reply_to': fields.char('Reply-To', help="Preferred response address (placeholders may be used here)"),
'mail_server_id': fields.many2one('ir.mail_server', 'Outgoing Mail Server', readonly=False,
help="Optional preferred server for outgoing mails. If not set, the highest "
"priority one will be used."),
'body_html': fields.html('Body', translate=True, sanitize=False, help="Rich-text/HTML version of the message (placeholders may be used here)"),
'report_name': fields.char('Report Filename', translate=True,
help="Name to use for the generated report file (may contain placeholders)\n"
"The extension can be omitted and will then come from the report type."),
'report_template': fields.many2one('ir.actions.report.xml', 'Optional report to print and attach'),
'ref_ir_act_window': fields.many2one('ir.actions.act_window', 'Sidebar action', readonly=True, copy=False,
help="Sidebar action to make this template available on records "
"of the related document model"),
'ref_ir_value': fields.many2one('ir.values', 'Sidebar Button', readonly=True, copy=False,
help="Sidebar button to open the sidebar action"),
'attachment_ids': fields.many2many('ir.attachment', 'email_template_attachment_rel', 'email_template_id',
'attachment_id', 'Attachments',
help="You may attach files to this template, to be added to all "
"emails created from this template"),
'auto_delete': fields.boolean('Auto Delete', help="Permanently delete this email after sending it, to save space"),
# Fake fields used to implement the placeholder assistant
'model_object_field': fields.many2one('ir.model.fields', string="Field",
help="Select target field from the related document model.\n"
"If it is a relationship field you will be able to select "
"a target field at the destination of the relationship."),
'sub_object': fields.many2one('ir.model', 'Sub-model', readonly=True,
help="When a relationship field is selected as first field, "
"this field shows the document model the relationship goes to."),
'sub_model_object_field': fields.many2one('ir.model.fields', 'Sub-field',
help="When a relationship field is selected as first field, "
"this field lets you select the target field within the "
"destination document model (sub-model)."),
'null_value': fields.char('Default Value', help="Optional value to use if the target field is empty"),
'copyvalue': fields.char('Placeholder Expression', help="Final placeholder expression, to be copy-pasted in the desired template field."),
}
_defaults = {
'auto_delete': True,
}
def create_action(self, cr, uid, ids, context=None):
action_obj = self.pool.get('ir.actions.act_window')
data_obj = self.pool.get('ir.model.data')
for template in self.browse(cr, uid, ids, context=context):
src_obj = template.model_id.model
model_data_id = data_obj._get_id(cr, uid, 'mail', 'email_compose_message_wizard_form')
res_id = data_obj.browse(cr, uid, model_data_id, context=context).res_id
button_name = _('Send Mail (%s)') % template.name
act_id = action_obj.create(cr, SUPERUSER_ID, {
'name': button_name,
'type': 'ir.actions.act_window',
'res_model': 'mail.compose.message',
'src_model': src_obj,
'view_type': 'form',
'context': "{'default_composition_mode': 'mass_mail', 'default_template_id' : %d, 'default_use_template': True}" % (template.id),
'view_mode':'form,tree',
'view_id': res_id,
'target': 'new',
'auto_refresh':1
}, context)
ir_values_id = self.pool.get('ir.values').create(cr, SUPERUSER_ID, {
'name': button_name,
'model': src_obj,
'key2': 'client_action_multi',
'value': "ir.actions.act_window,%s" % act_id,
'object': True,
}, context)
template.write({
'ref_ir_act_window': act_id,
'ref_ir_value': ir_values_id,
})
return True
def unlink_action(self, cr, uid, ids, context=None):
for template in self.browse(cr, uid, ids, context=context):
try:
if template.ref_ir_act_window:
self.pool.get('ir.actions.act_window').unlink(cr, SUPERUSER_ID, template.ref_ir_act_window.id, context)
if template.ref_ir_value:
ir_values_obj = self.pool.get('ir.values')
ir_values_obj.unlink(cr, SUPERUSER_ID, template.ref_ir_value.id, context)
except Exception:
raise osv.except_osv(_("Warning"), _("Deletion of the action record failed."))
return True
def unlink(self, cr, uid, ids, context=None):
self.unlink_action(cr, uid, ids, context=context)
return super(email_template, self).unlink(cr, uid, ids, context=context)
def copy(self, cr, uid, id, default=None, context=None):
template = self.browse(cr, uid, id, context=context)
default = dict(default or {},
name=_("%s (copy)") % template.name)
return super(email_template, self).copy(cr, uid, id, default, context)
def build_expression(self, field_name, sub_field_name, null_value):
"""Returns a placeholder expression for use in a template field,
based on the values provided in the placeholder assistant.
:param field_name: main field name
:param sub_field_name: sub field name (M2O)
:param null_value: default value if the target value is empty
:return: final placeholder expression
"""
expression = ''
if field_name:
expression = "${object." + field_name
if sub_field_name:
expression += "." + sub_field_name
if null_value:
expression += " or '''%s'''" % null_value
expression += "}"
return expression
def onchange_sub_model_object_value_field(self, cr, uid, ids, model_object_field, sub_model_object_field=False, null_value=None, context=None):
result = {
'sub_object': False,
'copyvalue': False,
'sub_model_object_field': False,
'null_value': False
}
if model_object_field:
fields_obj = self.pool.get('ir.model.fields')
field_value = fields_obj.browse(cr, uid, model_object_field, context)
if field_value.ttype in ['many2one', 'one2many', 'many2many']:
res_ids = self.pool.get('ir.model').search(cr, uid, [('model', '=', field_value.relation)], context=context)
sub_field_value = False
if sub_model_object_field:
sub_field_value = fields_obj.browse(cr, uid, sub_model_object_field, context)
if res_ids:
result.update({
'sub_object': res_ids[0],
'copyvalue': self.build_expression(field_value.name, sub_field_value and sub_field_value.name or False, null_value or False),
'sub_model_object_field': sub_model_object_field or False,
'null_value': null_value or False
})
else:
result.update({
'copyvalue': self.build_expression(field_value.name, False, null_value or False),
'null_value': null_value or False
})
return {'value': result}
def generate_recipients_batch(self, cr, uid, results, template_id, res_ids, context=None):
"""Generates the recipients of the template. Default values can ben generated
instead of the template values if requested by template or context.
Emails (email_to, email_cc) can be transformed into partners if requested
in the context. """
if context is None:
context = {}
template = self.browse(cr, uid, template_id, context=context)
if template.use_default_to or context.get('tpl_force_default_to'):
ctx = dict(context, thread_model=template.model)
default_recipients = self.pool['mail.thread'].message_get_default_recipients(cr, uid, res_ids, context=ctx)
for res_id, recipients in default_recipients.iteritems():
results[res_id].pop('partner_to', None)
results[res_id].update(recipients)
for res_id, values in results.iteritems():
partner_ids = values.get('partner_ids', list())
if context and context.get('tpl_partners_only'):
mails = tools.email_split(values.pop('email_to', '')) + tools.email_split(values.pop('email_cc', ''))
for mail in mails:
partner_id = self.pool.get('res.partner').find_or_create(cr, uid, mail, context=context)
partner_ids.append(partner_id)
partner_to = values.pop('partner_to', '')
if partner_to:
# placeholders could generate '', 3, 2 due to some empty field values
tpl_partner_ids = [int(pid) for pid in partner_to.split(',') if pid]
partner_ids += self.pool['res.partner'].exists(cr, SUPERUSER_ID, tpl_partner_ids, context=context)
results[res_id]['partner_ids'] = partner_ids
return results
def generate_email_batch(self, cr, uid, template_id, res_ids, context=None, fields=None):
"""Generates an email from the template for given the given model based on
records given by res_ids.
:param template_id: id of the template to render.
:param res_id: id of the record to use for rendering the template (model
is taken from template definition)
:returns: a dict containing all relevant fields for creating a new
mail.mail entry, with one extra key ``attachments``, in the
format [(report_name, data)] where data is base64 encoded.
"""
if context is None:
context = {}
if fields is None:
fields = ['subject', 'body_html', 'email_from', 'email_to', 'partner_to', 'email_cc', 'reply_to']
report_xml_pool = self.pool.get('ir.actions.report.xml')
res_ids_to_templates = self.get_email_template_batch(cr, uid, template_id, res_ids, context)
# templates: res_id -> template; template -> res_ids
templates_to_res_ids = {}
for res_id, template in res_ids_to_templates.iteritems():
templates_to_res_ids.setdefault(template, []).append(res_id)
results = dict()
for template, template_res_ids in templates_to_res_ids.iteritems():
# generate fields value for all res_ids linked to the current template
for field in fields:
generated_field_values = self.render_template_batch(
cr, uid, getattr(template, field), template.model, template_res_ids,
post_process=(field == 'body_html'),
context=context)
for res_id, field_value in generated_field_values.iteritems():
results.setdefault(res_id, dict())[field] = field_value
# compute recipients
results = self.generate_recipients_batch(cr, uid, results, template.id, template_res_ids, context=context)
# update values for all res_ids
for res_id in template_res_ids:
values = results[res_id]
# body: add user signature, sanitize
if 'body_html' in fields and template.user_signature:
signature = self.pool.get('res.users').browse(cr, uid, uid, context).signature
if signature:
values['body_html'] = tools.append_content_to_html(values['body_html'], signature, plaintext=False)
if values.get('body_html'):
values['body'] = tools.html_sanitize(values['body_html'])
# technical settings
values.update(
mail_server_id=template.mail_server_id.id or False,
auto_delete=template.auto_delete,
model=template.model,
res_id=res_id or False,
attachment_ids=[attach.id for attach in template.attachment_ids],
)
# Add report in attachments: generate once for all template_res_ids
if template.report_template:
for res_id in template_res_ids:
attachments = []
report_name = self.render_template(cr, uid, template.report_name, template.model, res_id, context=context)
report = report_xml_pool.browse(cr, uid, template.report_template.id, context)
report_service = report.report_name
# Ensure report is rendered using template's language
ctx = context.copy()
if template.lang:
ctx['lang'] = self.render_template_batch(cr, uid, template.lang, template.model, [res_id], context)[res_id] # take 0 ?
if report.report_type in ['qweb-html', 'qweb-pdf']:
result, format = self.pool['report'].get_pdf(cr, uid, [res_id], report_service, context=ctx), 'pdf'
else:
result, format = openerp.report.render_report(cr, uid, [res_id], report_service, {'model': template.model}, ctx)
# TODO in trunk, change return format to binary to match message_post expected format
result = base64.b64encode(result)
if not report_name:
report_name = 'report.' + report_service
ext = "." + format
if not report_name.endswith(ext):
report_name += ext
attachments.append((report_name, result))
results[res_id]['attachments'] = attachments
return results
@api.cr_uid_id_context
def send_mail(self, cr, uid, template_id, res_id, force_send=False, raise_exception=False, context=None):
"""Generates a new mail message for the given template and record,
and schedules it for delivery through the ``mail`` module's scheduler.
:param int template_id: id of the template to render
:param int res_id: id of the record to render the template with
(model is taken from the template)
:param bool force_send: if True, the generated mail.message is
immediately sent after being created, as if the scheduler
was executed for this message only.
:returns: id of the mail.message that was created
"""
if context is None:
context = {}
mail_mail = self.pool.get('mail.mail')
ir_attachment = self.pool.get('ir.attachment')
# create a mail_mail based on values, without attachments
values = self.generate_email(cr, uid, template_id, res_id, context=context)
if not values.get('email_from'):
raise osv.except_osv(_('Warning!'), _("Sender email is missing or empty after template rendering. Specify one to deliver your message"))
values['recipient_ids'] = [(4, pid) for pid in values.get('partner_ids', list())]
attachment_ids = values.pop('attachment_ids', [])
attachments = values.pop('attachments', [])
msg_id = mail_mail.create(cr, uid, values, context=context)
mail = mail_mail.browse(cr, uid, msg_id, context=context)
# manage attachments
for attachment in attachments:
attachment_data = {
'name': attachment[0],
'datas_fname': attachment[0],
'datas': attachment[1],
'res_model': 'mail.message',
'res_id': mail.mail_message_id.id,
}
context = dict(context)
context.pop('default_type', None)
attachment_ids.append(ir_attachment.create(cr, uid, attachment_data, context=context))
if attachment_ids:
values['attachment_ids'] = [(6, 0, attachment_ids)]
mail_mail.write(cr, uid, msg_id, {'attachment_ids': [(6, 0, attachment_ids)]}, context=context)
if force_send:
mail_mail.send(cr, uid, [msg_id], raise_exception=raise_exception, context=context)
return msg_id
# Compatibility method
def render_template(self, cr, uid, template, model, res_id, context=None):
return self.render_template_batch(cr, uid, template, model, [res_id], context)[res_id]
def get_email_template(self, cr, uid, template_id=False, record_id=None, context=None):
return self.get_email_template_batch(cr, uid, template_id, [record_id], context)[record_id]
def generate_email(self, cr, uid, template_id, res_id, context=None):
return self.generate_email_batch(cr, uid, template_id, [res_id], context)[res_id]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
tequa/ammisoft
|
refs/heads/master
|
ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/site-packages/matplotlib/backends/backend_mixed.py
|
10
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import six
from matplotlib.backends.backend_agg import RendererAgg
from matplotlib.tight_bbox import process_figure_for_rasterizing
class MixedModeRenderer(object):
"""
A helper class to implement a renderer that switches between
vector and raster drawing. An example may be a PDF writer, where
most things are drawn with PDF vector commands, but some very
complex objects, such as quad meshes, are rasterised and then
output as images.
"""
def __init__(self, figure, width, height, dpi, vector_renderer,
raster_renderer_class=None,
bbox_inches_restore=None):
"""
figure: The figure instance.
width: The width of the canvas in logical units
height: The height of the canvas in logical units
dpi: The dpi of the canvas
vector_renderer: An instance of a subclass of RendererBase
that will be used for the vector drawing.
raster_renderer_class: The renderer class to use for the
raster drawing. If not provided, this will use the Agg
backend (which is currently the only viable option anyway.)
"""
if raster_renderer_class is None:
raster_renderer_class = RendererAgg
self._raster_renderer_class = raster_renderer_class
self._width = width
self._height = height
self.dpi = dpi
self._vector_renderer = vector_renderer
self._raster_renderer = None
self._rasterizing = 0
# A reference to the figure is needed as we need to change
# the figure dpi before and after the rasterization. Although
# this looks ugly, I couldn't find a better solution. -JJL
self.figure = figure
self._figdpi = figure.get_dpi()
self._bbox_inches_restore = bbox_inches_restore
self._set_current_renderer(vector_renderer)
_methods = """
close_group draw_image draw_markers draw_path
draw_path_collection draw_quad_mesh draw_tex draw_text
finalize flipy get_canvas_width_height get_image_magnification
get_texmanager get_text_width_height_descent new_gc open_group
option_image_nocomposite points_to_pixels strip_math
start_filter stop_filter draw_gouraud_triangle
draw_gouraud_triangles option_scale_image
_text2path _get_text_path_transform height width
""".split()
def _set_current_renderer(self, renderer):
self._renderer = renderer
for method in self._methods:
if hasattr(renderer, method):
setattr(self, method, getattr(renderer, method))
renderer.start_rasterizing = self.start_rasterizing
renderer.stop_rasterizing = self.stop_rasterizing
def start_rasterizing(self):
"""
Enter "raster" mode. All subsequent drawing commands (until
stop_rasterizing is called) will be drawn with the raster
backend.
If start_rasterizing is called multiple times before
stop_rasterizing is called, this method has no effect.
"""
# change the dpi of the figure temporarily.
self.figure.set_dpi(self.dpi)
if self._bbox_inches_restore: # when tight bbox is used
r = process_figure_for_rasterizing(self.figure,
self._bbox_inches_restore)
self._bbox_inches_restore = r
if self._rasterizing == 0:
self._raster_renderer = self._raster_renderer_class(
self._width*self.dpi, self._height*self.dpi, self.dpi)
self._set_current_renderer(self._raster_renderer)
self._rasterizing += 1
def stop_rasterizing(self):
"""
Exit "raster" mode. All of the drawing that was done since
the last start_rasterizing command will be copied to the
vector backend by calling draw_image.
If stop_rasterizing is called multiple times before
start_rasterizing is called, this method has no effect.
"""
self._rasterizing -= 1
if self._rasterizing == 0:
self._set_current_renderer(self._vector_renderer)
height = self._height * self.dpi
buffer, bounds = self._raster_renderer.tostring_rgba_minimized()
l, b, w, h = bounds
if w > 0 and h > 0:
image = np.frombuffer(buffer, dtype=np.uint8)
image = image.reshape((h, w, 4))
image = image[::-1]
gc = self._renderer.new_gc()
# TODO: If the mixedmode resolution differs from the figure's
# dpi, the image must be scaled (dpi->_figdpi). Not all
# backends support this.
self._renderer.draw_image(
gc,
float(l) / self.dpi * self._figdpi,
(float(height)-b-h) / self.dpi * self._figdpi,
image)
self._raster_renderer = None
self._rasterizing = False
# restore the figure dpi.
self.figure.set_dpi(self._figdpi)
if self._bbox_inches_restore: # when tight bbox is used
r = process_figure_for_rasterizing(self.figure,
self._bbox_inches_restore,
self._figdpi)
self._bbox_inches_restore = r
|
kenshay/ImageScripter
|
refs/heads/master
|
ProgramData/SystemFiles/Python/Lib/site-packages/urllib3/contrib/_securetransport/bindings.py
|
162
|
"""
This module uses ctypes to bind a whole bunch of functions and constants from
SecureTransport. The goal here is to provide the low-level API to
SecureTransport. These are essentially the C-level functions and constants, and
they're pretty gross to work with.
This code is a bastardised version of the code found in Will Bond's oscrypto
library. An enormous debt is owed to him for blazing this trail for us. For
that reason, this code should be considered to be covered both by urllib3's
license and by oscrypto's:
Copyright (c) 2015-2016 Will Bond <will@wbond.net>
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import
import platform
from ctypes.util import find_library
from ctypes import (
c_void_p, c_int32, c_char_p, c_size_t, c_byte, c_uint32, c_ulong, c_long,
c_bool
)
from ctypes import CDLL, POINTER, CFUNCTYPE
security_path = find_library('Security')
if not security_path:
raise ImportError('The library Security could not be found')
core_foundation_path = find_library('CoreFoundation')
if not core_foundation_path:
raise ImportError('The library CoreFoundation could not be found')
version = platform.mac_ver()[0]
version_info = tuple(map(int, version.split('.')))
if version_info < (10, 8):
raise OSError(
'Only OS X 10.8 and newer are supported, not %s.%s' % (
version_info[0], version_info[1]
)
)
Security = CDLL(security_path, use_errno=True)
CoreFoundation = CDLL(core_foundation_path, use_errno=True)
Boolean = c_bool
CFIndex = c_long
CFStringEncoding = c_uint32
CFData = c_void_p
CFString = c_void_p
CFArray = c_void_p
CFMutableArray = c_void_p
CFDictionary = c_void_p
CFError = c_void_p
CFType = c_void_p
CFTypeID = c_ulong
CFTypeRef = POINTER(CFType)
CFAllocatorRef = c_void_p
OSStatus = c_int32
CFDataRef = POINTER(CFData)
CFStringRef = POINTER(CFString)
CFArrayRef = POINTER(CFArray)
CFMutableArrayRef = POINTER(CFMutableArray)
CFDictionaryRef = POINTER(CFDictionary)
CFArrayCallBacks = c_void_p
CFDictionaryKeyCallBacks = c_void_p
CFDictionaryValueCallBacks = c_void_p
SecCertificateRef = POINTER(c_void_p)
SecExternalFormat = c_uint32
SecExternalItemType = c_uint32
SecIdentityRef = POINTER(c_void_p)
SecItemImportExportFlags = c_uint32
SecItemImportExportKeyParameters = c_void_p
SecKeychainRef = POINTER(c_void_p)
SSLProtocol = c_uint32
SSLCipherSuite = c_uint32
SSLContextRef = POINTER(c_void_p)
SecTrustRef = POINTER(c_void_p)
SSLConnectionRef = c_uint32
SecTrustResultType = c_uint32
SecTrustOptionFlags = c_uint32
SSLProtocolSide = c_uint32
SSLConnectionType = c_uint32
SSLSessionOption = c_uint32
try:
Security.SecItemImport.argtypes = [
CFDataRef,
CFStringRef,
POINTER(SecExternalFormat),
POINTER(SecExternalItemType),
SecItemImportExportFlags,
POINTER(SecItemImportExportKeyParameters),
SecKeychainRef,
POINTER(CFArrayRef),
]
Security.SecItemImport.restype = OSStatus
Security.SecCertificateGetTypeID.argtypes = []
Security.SecCertificateGetTypeID.restype = CFTypeID
Security.SecIdentityGetTypeID.argtypes = []
Security.SecIdentityGetTypeID.restype = CFTypeID
Security.SecKeyGetTypeID.argtypes = []
Security.SecKeyGetTypeID.restype = CFTypeID
Security.SecCertificateCreateWithData.argtypes = [
CFAllocatorRef,
CFDataRef
]
Security.SecCertificateCreateWithData.restype = SecCertificateRef
Security.SecCertificateCopyData.argtypes = [
SecCertificateRef
]
Security.SecCertificateCopyData.restype = CFDataRef
Security.SecCopyErrorMessageString.argtypes = [
OSStatus,
c_void_p
]
Security.SecCopyErrorMessageString.restype = CFStringRef
Security.SecIdentityCreateWithCertificate.argtypes = [
CFTypeRef,
SecCertificateRef,
POINTER(SecIdentityRef)
]
Security.SecIdentityCreateWithCertificate.restype = OSStatus
Security.SecKeychainCreate.argtypes = [
c_char_p,
c_uint32,
c_void_p,
Boolean,
c_void_p,
POINTER(SecKeychainRef)
]
Security.SecKeychainCreate.restype = OSStatus
Security.SecKeychainDelete.argtypes = [
SecKeychainRef
]
Security.SecKeychainDelete.restype = OSStatus
Security.SecPKCS12Import.argtypes = [
CFDataRef,
CFDictionaryRef,
POINTER(CFArrayRef)
]
Security.SecPKCS12Import.restype = OSStatus
SSLReadFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, c_void_p, POINTER(c_size_t))
SSLWriteFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, POINTER(c_byte), POINTER(c_size_t))
Security.SSLSetIOFuncs.argtypes = [
SSLContextRef,
SSLReadFunc,
SSLWriteFunc
]
Security.SSLSetIOFuncs.restype = OSStatus
Security.SSLSetPeerID.argtypes = [
SSLContextRef,
c_char_p,
c_size_t
]
Security.SSLSetPeerID.restype = OSStatus
Security.SSLSetCertificate.argtypes = [
SSLContextRef,
CFArrayRef
]
Security.SSLSetCertificate.restype = OSStatus
Security.SSLSetCertificateAuthorities.argtypes = [
SSLContextRef,
CFTypeRef,
Boolean
]
Security.SSLSetCertificateAuthorities.restype = OSStatus
Security.SSLSetConnection.argtypes = [
SSLContextRef,
SSLConnectionRef
]
Security.SSLSetConnection.restype = OSStatus
Security.SSLSetPeerDomainName.argtypes = [
SSLContextRef,
c_char_p,
c_size_t
]
Security.SSLSetPeerDomainName.restype = OSStatus
Security.SSLHandshake.argtypes = [
SSLContextRef
]
Security.SSLHandshake.restype = OSStatus
Security.SSLRead.argtypes = [
SSLContextRef,
c_char_p,
c_size_t,
POINTER(c_size_t)
]
Security.SSLRead.restype = OSStatus
Security.SSLWrite.argtypes = [
SSLContextRef,
c_char_p,
c_size_t,
POINTER(c_size_t)
]
Security.SSLWrite.restype = OSStatus
Security.SSLClose.argtypes = [
SSLContextRef
]
Security.SSLClose.restype = OSStatus
Security.SSLGetNumberSupportedCiphers.argtypes = [
SSLContextRef,
POINTER(c_size_t)
]
Security.SSLGetNumberSupportedCiphers.restype = OSStatus
Security.SSLGetSupportedCiphers.argtypes = [
SSLContextRef,
POINTER(SSLCipherSuite),
POINTER(c_size_t)
]
Security.SSLGetSupportedCiphers.restype = OSStatus
Security.SSLSetEnabledCiphers.argtypes = [
SSLContextRef,
POINTER(SSLCipherSuite),
c_size_t
]
Security.SSLSetEnabledCiphers.restype = OSStatus
Security.SSLGetNumberEnabledCiphers.argtype = [
SSLContextRef,
POINTER(c_size_t)
]
Security.SSLGetNumberEnabledCiphers.restype = OSStatus
Security.SSLGetEnabledCiphers.argtypes = [
SSLContextRef,
POINTER(SSLCipherSuite),
POINTER(c_size_t)
]
Security.SSLGetEnabledCiphers.restype = OSStatus
Security.SSLGetNegotiatedCipher.argtypes = [
SSLContextRef,
POINTER(SSLCipherSuite)
]
Security.SSLGetNegotiatedCipher.restype = OSStatus
Security.SSLGetNegotiatedProtocolVersion.argtypes = [
SSLContextRef,
POINTER(SSLProtocol)
]
Security.SSLGetNegotiatedProtocolVersion.restype = OSStatus
Security.SSLCopyPeerTrust.argtypes = [
SSLContextRef,
POINTER(SecTrustRef)
]
Security.SSLCopyPeerTrust.restype = OSStatus
Security.SecTrustSetAnchorCertificates.argtypes = [
SecTrustRef,
CFArrayRef
]
Security.SecTrustSetAnchorCertificates.restype = OSStatus
Security.SecTrustSetAnchorCertificatesOnly.argstypes = [
SecTrustRef,
Boolean
]
Security.SecTrustSetAnchorCertificatesOnly.restype = OSStatus
Security.SecTrustEvaluate.argtypes = [
SecTrustRef,
POINTER(SecTrustResultType)
]
Security.SecTrustEvaluate.restype = OSStatus
Security.SecTrustGetCertificateCount.argtypes = [
SecTrustRef
]
Security.SecTrustGetCertificateCount.restype = CFIndex
Security.SecTrustGetCertificateAtIndex.argtypes = [
SecTrustRef,
CFIndex
]
Security.SecTrustGetCertificateAtIndex.restype = SecCertificateRef
Security.SSLCreateContext.argtypes = [
CFAllocatorRef,
SSLProtocolSide,
SSLConnectionType
]
Security.SSLCreateContext.restype = SSLContextRef
Security.SSLSetSessionOption.argtypes = [
SSLContextRef,
SSLSessionOption,
Boolean
]
Security.SSLSetSessionOption.restype = OSStatus
Security.SSLSetProtocolVersionMin.argtypes = [
SSLContextRef,
SSLProtocol
]
Security.SSLSetProtocolVersionMin.restype = OSStatus
Security.SSLSetProtocolVersionMax.argtypes = [
SSLContextRef,
SSLProtocol
]
Security.SSLSetProtocolVersionMax.restype = OSStatus
Security.SecCopyErrorMessageString.argtypes = [
OSStatus,
c_void_p
]
Security.SecCopyErrorMessageString.restype = CFStringRef
Security.SSLReadFunc = SSLReadFunc
Security.SSLWriteFunc = SSLWriteFunc
Security.SSLContextRef = SSLContextRef
Security.SSLProtocol = SSLProtocol
Security.SSLCipherSuite = SSLCipherSuite
Security.SecIdentityRef = SecIdentityRef
Security.SecKeychainRef = SecKeychainRef
Security.SecTrustRef = SecTrustRef
Security.SecTrustResultType = SecTrustResultType
Security.SecExternalFormat = SecExternalFormat
Security.OSStatus = OSStatus
Security.kSecImportExportPassphrase = CFStringRef.in_dll(
Security, 'kSecImportExportPassphrase'
)
Security.kSecImportItemIdentity = CFStringRef.in_dll(
Security, 'kSecImportItemIdentity'
)
# CoreFoundation time!
CoreFoundation.CFRetain.argtypes = [
CFTypeRef
]
CoreFoundation.CFRetain.restype = CFTypeRef
CoreFoundation.CFRelease.argtypes = [
CFTypeRef
]
CoreFoundation.CFRelease.restype = None
CoreFoundation.CFGetTypeID.argtypes = [
CFTypeRef
]
CoreFoundation.CFGetTypeID.restype = CFTypeID
CoreFoundation.CFStringCreateWithCString.argtypes = [
CFAllocatorRef,
c_char_p,
CFStringEncoding
]
CoreFoundation.CFStringCreateWithCString.restype = CFStringRef
CoreFoundation.CFStringGetCStringPtr.argtypes = [
CFStringRef,
CFStringEncoding
]
CoreFoundation.CFStringGetCStringPtr.restype = c_char_p
CoreFoundation.CFStringGetCString.argtypes = [
CFStringRef,
c_char_p,
CFIndex,
CFStringEncoding
]
CoreFoundation.CFStringGetCString.restype = c_bool
CoreFoundation.CFDataCreate.argtypes = [
CFAllocatorRef,
c_char_p,
CFIndex
]
CoreFoundation.CFDataCreate.restype = CFDataRef
CoreFoundation.CFDataGetLength.argtypes = [
CFDataRef
]
CoreFoundation.CFDataGetLength.restype = CFIndex
CoreFoundation.CFDataGetBytePtr.argtypes = [
CFDataRef
]
CoreFoundation.CFDataGetBytePtr.restype = c_void_p
CoreFoundation.CFDictionaryCreate.argtypes = [
CFAllocatorRef,
POINTER(CFTypeRef),
POINTER(CFTypeRef),
CFIndex,
CFDictionaryKeyCallBacks,
CFDictionaryValueCallBacks
]
CoreFoundation.CFDictionaryCreate.restype = CFDictionaryRef
CoreFoundation.CFDictionaryGetValue.argtypes = [
CFDictionaryRef,
CFTypeRef
]
CoreFoundation.CFDictionaryGetValue.restype = CFTypeRef
CoreFoundation.CFArrayCreate.argtypes = [
CFAllocatorRef,
POINTER(CFTypeRef),
CFIndex,
CFArrayCallBacks,
]
CoreFoundation.CFArrayCreate.restype = CFArrayRef
CoreFoundation.CFArrayCreateMutable.argtypes = [
CFAllocatorRef,
CFIndex,
CFArrayCallBacks
]
CoreFoundation.CFArrayCreateMutable.restype = CFMutableArrayRef
CoreFoundation.CFArrayAppendValue.argtypes = [
CFMutableArrayRef,
c_void_p
]
CoreFoundation.CFArrayAppendValue.restype = None
CoreFoundation.CFArrayGetCount.argtypes = [
CFArrayRef
]
CoreFoundation.CFArrayGetCount.restype = CFIndex
CoreFoundation.CFArrayGetValueAtIndex.argtypes = [
CFArrayRef,
CFIndex
]
CoreFoundation.CFArrayGetValueAtIndex.restype = c_void_p
CoreFoundation.kCFAllocatorDefault = CFAllocatorRef.in_dll(
CoreFoundation, 'kCFAllocatorDefault'
)
CoreFoundation.kCFTypeArrayCallBacks = c_void_p.in_dll(CoreFoundation, 'kCFTypeArrayCallBacks')
CoreFoundation.kCFTypeDictionaryKeyCallBacks = c_void_p.in_dll(
CoreFoundation, 'kCFTypeDictionaryKeyCallBacks'
)
CoreFoundation.kCFTypeDictionaryValueCallBacks = c_void_p.in_dll(
CoreFoundation, 'kCFTypeDictionaryValueCallBacks'
)
CoreFoundation.CFTypeRef = CFTypeRef
CoreFoundation.CFArrayRef = CFArrayRef
CoreFoundation.CFStringRef = CFStringRef
CoreFoundation.CFDictionaryRef = CFDictionaryRef
except (AttributeError):
raise ImportError('Error initializing ctypes')
class CFConst(object):
"""
A class object that acts as essentially a namespace for CoreFoundation
constants.
"""
kCFStringEncodingUTF8 = CFStringEncoding(0x08000100)
class SecurityConst(object):
"""
A class object that acts as essentially a namespace for Security constants.
"""
kSSLSessionOptionBreakOnServerAuth = 0
kSSLProtocol2 = 1
kSSLProtocol3 = 2
kTLSProtocol1 = 4
kTLSProtocol11 = 7
kTLSProtocol12 = 8
kSSLClientSide = 1
kSSLStreamType = 0
kSecFormatPEMSequence = 10
kSecTrustResultInvalid = 0
kSecTrustResultProceed = 1
# This gap is present on purpose: this was kSecTrustResultConfirm, which
# is deprecated.
kSecTrustResultDeny = 3
kSecTrustResultUnspecified = 4
kSecTrustResultRecoverableTrustFailure = 5
kSecTrustResultFatalTrustFailure = 6
kSecTrustResultOtherError = 7
errSSLProtocol = -9800
errSSLWouldBlock = -9803
errSSLClosedGraceful = -9805
errSSLClosedNoNotify = -9816
errSSLClosedAbort = -9806
errSSLXCertChainInvalid = -9807
errSSLCrypto = -9809
errSSLInternal = -9810
errSSLCertExpired = -9814
errSSLCertNotYetValid = -9815
errSSLUnknownRootCert = -9812
errSSLNoRootCert = -9813
errSSLHostNameMismatch = -9843
errSSLPeerHandshakeFail = -9824
errSSLPeerUserCancelled = -9839
errSSLWeakPeerEphemeralDHKey = -9850
errSSLServerAuthCompleted = -9841
errSSLRecordOverflow = -9847
errSecVerifyFailed = -67808
errSecNoTrustSettings = -25263
errSecItemNotFound = -25300
errSecInvalidTrustSettings = -25262
# Cipher suites. We only pick the ones our default cipher string allows.
TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C
TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xC030
TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B
TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xC02F
TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 = 0x00A3
TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = 0x009F
TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 = 0x00A2
TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = 0x009E
TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024
TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xC028
TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xC00A
TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xC014
TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = 0x006B
TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 = 0x006A
TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039
TLS_DHE_DSS_WITH_AES_256_CBC_SHA = 0x0038
TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xC027
TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xC009
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xC013
TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = 0x0067
TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 = 0x0040
TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033
TLS_DHE_DSS_WITH_AES_128_CBC_SHA = 0x0032
TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009D
TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009C
TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x003D
TLS_RSA_WITH_AES_128_CBC_SHA256 = 0x003C
TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035
TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F
TLS_AES_128_GCM_SHA256 = 0x1301
TLS_AES_256_GCM_SHA384 = 0x1302
TLS_CHACHA20_POLY1305_SHA256 = 0x1303
|
priyankajain18/stock_package
|
refs/heads/develop
|
stock.py
|
1
|
# This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
from trytond.model import ModelSQL, ModelView, Workflow, fields
from trytond.pool import Pool, PoolMeta
from trytond.pyson import Eval
from trytond.report import Report
__all__ = ['Configuration', 'Package', 'Type', 'Move',
'ShipmentOut', 'ShipmentInReturn', 'PackageLabel']
class Configuration:
__metaclass__ = PoolMeta
__name__ = 'stock.configuration'
package_sequence = fields.Property(fields.Many2One('ir.sequence',
'Package Sequence', domain=[
('company', 'in', [Eval('context', {}).get('company'), None]),
('code', '=', 'stock.package'),
], required=True))
class Package(ModelSQL, ModelView):
'Stock Package'
__name__ = 'stock.package'
_rec_name = 'code'
code = fields.Char('Code', select=True, readonly=True, required=True)
type = fields.Many2One('stock.package.type', 'Type', required=True)
shipment = fields.Reference('Shipment', selection='get_shipment',
select=True)
moves = fields.One2Many('stock.move', 'package', 'Moves',
domain=[
('shipment', '=', Eval('shipment')),
('to_location.type', 'in', ['customer', 'supplier']),
],
add_remove=[
('package', '=', None),
],
depends=['shipment'])
parent = fields.Many2One('stock.package', 'Parent', select=True,
ondelete='CASCADE', domain=[('shipment', '=', Eval('shipment'))],
depends=['shipment'])
children = fields.One2Many('stock.package', 'parent', 'Children',
domain=[('shipment', '=', Eval('shipment'))],
depends=['shipment'])
@staticmethod
def _get_shipment():
'Return list of Model names for shipment Reference'
return [
'stock.shipment.out',
'stock.shipment.in.return',
]
@classmethod
def get_shipment(cls):
pool = Pool()
Model = pool.get('ir.model')
models = cls._get_shipment()
models = Model.search([
('model', 'in', models),
])
return [(None, '')] + [(m.model, m.name) for m in models]
@classmethod
def create(cls, vlist):
pool = Pool()
Sequence = pool.get('ir.sequence')
Config = pool.get('stock.configuration')
vlist = [v.copy() for v in vlist]
config = Config(1)
for values in vlist:
values['code'] = Sequence.get_id(config.package_sequence)
return super(Package, cls).create(vlist)
@classmethod
def validate(cls, packages):
super(Package, cls).validate(packages)
cls.check_recursion(packages)
class Type(ModelSQL, ModelView):
'Stock Package Type'
__name__ = 'stock.package.type'
name = fields.Char('Name', required=True)
class Move:
__metaclass__ = PoolMeta
__name__ = 'stock.move'
package = fields.Many2One('stock.package', 'Package', select=True)
class PackageMixin:
packages = fields.One2Many('stock.package', 'shipment', 'Packages',
states={
'readonly': Eval('state').in_(['done', 'cancel']),
})
root_packages = fields.Function(fields.One2Many('stock.package',
'shipment', 'Packages',
domain=[('parent', '=', None)],
states={
'readonly': Eval('state').in_(['done', 'cancel']),
}), 'get_root_packages', setter='set_root_packages')
def get_root_packages(self, name):
return [p.id for p in self.packages if not p.parent]
@classmethod
def set_root_packages(cls, shipments, name, value):
if not value:
return
cls.write(shipments, {
'packages': value,
})
class ShipmentOut(object, PackageMixin):
__metaclass__ = PoolMeta
__name__ = 'stock.shipment.out'
@classmethod
def __setup__(cls):
super(ShipmentOut, cls).__setup__()
cls._error_messages.update({
'package_mismatch': ('Not all Outgoing Moves of '
'Customer Shipment "%s" are packaged.'),
})
@classmethod
@ModelView.button
@Workflow.transition('done')
def done(cls, shipments):
super(ShipmentOut, cls).done(shipments)
for shipment in shipments:
if not shipment.packages:
continue
if (len(shipment.outgoing_moves)
!= sum(len(p.moves) for p in shipment.packages)):
cls.raise_user_error('package_mismatch', shipment.rec_name)
class ShipmentInReturn(object, PackageMixin):
__metaclass__ = PoolMeta
__name__ = 'stock.shipment.in.return'
@classmethod
def __setup__(cls):
super(ShipmentInReturn, cls).__setup__()
cls._error_messages.update({
'package_mismatch': ('Not all Outgoing Moves of '
'Supplier Return Shipment "%s" are packaged.'),
})
@classmethod
@ModelView.button
@Workflow.transition('done')
def done(cls, shipments):
super(ShipmentInReturn, cls).done(shipments)
for shipment in shipments:
if not shipment.packages:
continue
if (len(shipment.outgoing_moves)
!= sum(len(p.moves) for p in shipment.packages)):
cls.raise_user_error('package_mismatch', shipment.rec_name)
class PackageLabel(Report):
'Package Label'
__name__ = 'stock.package.label'
|
Containers-Testing-Framework/ctf-cli
|
refs/heads/master
|
features/steps/command_util.py
|
3
|
# -*- coding -*-
"""
Provides some command utility functions.
TODO:
matcher that ignores empty lines and whitespace and has contains comparison
"""
from __future__ import absolute_import
import pathutil
from __setup import TOP, TOPA
import os.path
import shutil
from fnmatch import fnmatch
# -----------------------------------------------------------------------------
# CONSTANTS:
# -----------------------------------------------------------------------------
# HERE = os.path.dirname(__file__)
# TOP = os.path.join(HERE, "..")
# TOPA = os.path.abspath(TOP)
WORKDIR = os.path.join(TOP, "__WORKDIR__")
# -----------------------------------------------------------------------------
# UTILITY FUNCTIONS:
# -----------------------------------------------------------------------------
def workdir_save_coverage_files(workdir, destdir=None):
assert os.path.isdir(workdir)
if not destdir:
destdir = TOPA
if os.path.abspath(workdir) == os.path.abspath(destdir):
return # -- SKIP: Source directory is destination directory (SAME).
for fname in os.listdir(workdir):
if fnmatch(fname, ".coverage.*"):
# -- MOVE COVERAGE FILES:
sourcename = os.path.join(workdir, fname)
shutil.move(sourcename, destdir)
# def ensure_directory_exists(dirname):
# """
# Ensures that a directory exits.
# If it does not exist, it is automatically created.
# """
# if not os.path.exists(dirname):
# os.makedirs(dirname)
# assert os.path.exists(dirname)
# assert os.path.isdir(dirname)
def ensure_context_attribute_exists(context, name, default_value=None):
"""
Ensure a behave resource exists as attribute in the behave context.
If this is not the case, the attribute is created by using the default_value.
"""
if not hasattr(context, name):
setattr(context, name, default_value)
def ensure_workdir_exists(context):
"""
Ensures that the work directory exists.
In addition, the location of the workdir is stored as attribute in
the context object.
"""
ensure_context_attribute_exists(context, "workdir", None)
if not context.workdir:
context.workdir = os.path.abspath(WORKDIR)
pathutil.ensure_directory_exists(context.workdir)
# def create_textfile_with_contents(filename, contents):
# """
# Creates a textual file with the provided contents in the workdir.
# Overwrites an existing file.
# """
# ensure_directory_exists(os.path.dirname(filename))
# if os.path.exists(filename):
# os.remove(filename)
# outstream = open(filename, "w")
# outstream.write(contents)
# if not contents.endswith("\n"):
# outstream.write("\n")
# outstream.flush()
# outstream.close()
# assert os.path.exists(filename)
# def text_remove_empty_lines(text):
# """
# Whitespace normalization:
# - Strip empty lines
# - Strip trailing whitespace
# """
# lines = [ line.rstrip() for line in text.splitlines() if line.strip() ]
# return "\n".join(lines)
#
# def text_normalize(text):
# """
# Whitespace normalization:
# - Strip empty lines
# - Strip leading whitespace in a line
# - Strip trailing whitespace in a line
# - Normalize line endings
# """
# lines = [ line.strip() for line in text.splitlines() if line.strip() ]
# return "\n".join(lines)
# def posixpath_normpath(pathname):
# """
# Convert path into POSIX path:
# - Normalize path
# - Replace backslash with slash
# """
# backslash = '\\'
# pathname = os.path.normpath(pathname)
# if backslash in pathname:
# pathname = pathname.replace(backslash, '/')
# return pathname
|
ormnv/os_final_project
|
refs/heads/master
|
django/contrib/messages/constants.py
|
630
|
DEBUG = 10
INFO = 20
SUCCESS = 25
WARNING = 30
ERROR = 40
DEFAULT_TAGS = {
DEBUG: 'debug',
INFO: 'info',
SUCCESS: 'success',
WARNING: 'warning',
ERROR: 'error',
}
|
NewpTone/stacklab-nova
|
refs/heads/master
|
debian/tmp/usr/lib/python2.7/dist-packages/nova/api/openstack/compute/consoles.py
|
19
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from webob import exc
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova.console import api as console_api
from nova import exception
def _translate_keys(cons):
"""Coerces a console instance into proper dictionary format """
pool = cons['pool']
info = {'id': cons['id'],
'console_type': pool['console_type']}
return dict(console=info)
def _translate_detail_keys(cons):
"""Coerces a console instance into proper dictionary format with
correctly mapped attributes """
pool = cons['pool']
info = {'id': cons['id'],
'console_type': pool['console_type'],
'password': cons['password'],
'instance_name': cons['instance_name'],
'port': cons['port'],
'host': pool['public_hostname']}
return dict(console=info)
class ConsoleTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('console', selector='console')
id_elem = xmlutil.SubTemplateElement(root, 'id', selector='id')
id_elem.text = xmlutil.Selector()
port_elem = xmlutil.SubTemplateElement(root, 'port', selector='port')
port_elem.text = xmlutil.Selector()
host_elem = xmlutil.SubTemplateElement(root, 'host', selector='host')
host_elem.text = xmlutil.Selector()
passwd_elem = xmlutil.SubTemplateElement(root, 'password',
selector='password')
passwd_elem.text = xmlutil.Selector()
constype_elem = xmlutil.SubTemplateElement(root, 'console_type',
selector='console_type')
constype_elem.text = xmlutil.Selector()
return xmlutil.MasterTemplate(root, 1)
class ConsolesTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('consoles')
console = xmlutil.SubTemplateElement(root, 'console',
selector='consoles')
console.append(ConsoleTemplate())
return xmlutil.MasterTemplate(root, 1)
class Controller(object):
"""The Consoles controller for the OpenStack API"""
def __init__(self):
self.console_api = console_api.API()
@wsgi.serializers(xml=ConsolesTemplate)
def index(self, req, server_id):
"""Returns a list of consoles for this instance"""
consoles = self.console_api.get_consoles(
req.environ['nova.context'],
server_id)
return dict(consoles=[_translate_keys(console)
for console in consoles])
def create(self, req, server_id):
"""Creates a new console"""
self.console_api.create_console(
req.environ['nova.context'],
server_id)
@wsgi.serializers(xml=ConsoleTemplate)
def show(self, req, server_id, id):
"""Shows in-depth information on a specific console"""
try:
console = self.console_api.get_console(
req.environ['nova.context'],
server_id,
int(id))
except exception.NotFound:
raise exc.HTTPNotFound()
return _translate_detail_keys(console)
def update(self, req, server_id, id):
"""You can't update a console"""
raise exc.HTTPNotImplemented()
def delete(self, req, server_id, id):
"""Deletes a console"""
try:
self.console_api.delete_console(req.environ['nova.context'],
server_id,
int(id))
except exception.NotFound:
raise exc.HTTPNotFound()
return webob.Response(status_int=202)
def create_resource():
return wsgi.Resource(Controller())
|
habeanf/Open-Knesset
|
refs/heads/upmaster
|
mks/migrations/0016_bill_stats.py
|
15
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
for mk in orm.Member.objects.all():
mk.bills_stats_proposed = mk.bills.count()
mk.bills_stats_pre = mk.bills.filter(stage__in=['2','3','4','5','6']).count()
mk.bills_stats_first = mk.bills.filter(stage__in=['4','5','6']).count()
mk.bills_stats_approved = mk.bills.filter(stage='6').count()
mk.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'committees.committee': {
'Meta': {'object_name': 'Committee'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'committees'", 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'committees.committeemeeting': {
'Meta': {'object_name': 'CommitteeMeeting'},
'committee': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'meetings'", 'to': "orm['committees.Committee']"}),
'date': ('django.db.models.fields.DateField', [], {}),
'date_string': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mks_attended': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'committee_meetings'", 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'protocol_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'topics': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'votes_mentioned': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'committee_meetings'", 'blank': 'True', 'to': "orm['laws.Vote']"})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'laws.bill': {
'Meta': {'object_name': 'Bill'},
'approval_vote': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'bill_approved'", 'unique': 'True', 'null': 'True', 'to': "orm['laws.Vote']"}),
'first_committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_first'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']"}),
'first_vote': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bills_first'", 'null': 'True', 'to': "orm['laws.Vote']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'law': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bills'", 'null': 'True', 'to': "orm['laws.Law']"}),
'pre_votes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_pre_votes'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['laws.Vote']"}),
'proposers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'second_committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_second'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']"}),
'stage': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'stage_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
'laws.govlegislationcommitteedecision': {
'Meta': {'object_name': 'GovLegislationCommitteeDecision'},
'bill': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'gov_decisions'", 'null': 'True', 'to': "orm['laws.Bill']"}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'stand': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'subtitle': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
'laws.govproposal': {
'Meta': {'object_name': 'GovProposal'},
'bill': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'gov_proposal'", 'unique': 'True', 'null': 'True', 'to': "orm['laws.Bill']"}),
'booklet_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_govproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']"}),
'content_html': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'knesset_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'law': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'laws_govproposal_related'", 'null': 'True', 'to': "orm['laws.Law']"}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_govproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['laws.Vote']"})
},
'laws.knessetproposal': {
'Meta': {'object_name': 'KnessetProposal'},
'bill': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'knesset_proposal'", 'unique': 'True', 'null': 'True', 'to': "orm['laws.Bill']"}),
'booklet_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'committee': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bills'", 'null': 'True', 'to': "orm['committees.Committee']"}),
'committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_knessetproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']"}),
'content_html': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'knesset_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'law': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'laws_knessetproposal_related'", 'null': 'True', 'to': "orm['laws.Law']"}),
'originals': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'knesset_proposals'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['laws.PrivateProposal']"}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_knessetproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['laws.Vote']"})
},
'laws.law': {
'Meta': {'object_name': 'Law'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'merged_into': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'duplicates'", 'null': 'True', 'to': "orm['laws.Law']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
'laws.membervotingstatistics': {
'Meta': {'object_name': 'MemberVotingStatistics'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'voting_statistics'", 'unique': 'True', 'to': "orm['mks.Member']"})
},
'laws.partyvotingstatistics': {
'Meta': {'object_name': 'PartyVotingStatistics'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'party': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'voting_statistics'", 'unique': 'True', 'to': "orm['mks.Party']"})
},
'laws.privateproposal': {
'Meta': {'object_name': 'PrivateProposal'},
'bill': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'proposals'", 'null': 'True', 'to': "orm['laws.Bill']"}),
'committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_privateproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']"}),
'content_html': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'joiners': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_joined'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'knesset_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'law': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'laws_privateproposal_related'", 'null': 'True', 'to': "orm['laws.Law']"}),
'proposal_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'proposers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_proposed'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_privateproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['laws.Vote']"})
},
'laws.vote': {
'Meta': {'object_name': 'Vote'},
'against_party': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'controversy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'full_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_text_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'meeting_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'src_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'src_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'time_string': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'vote_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'votes'", 'blank': 'True', 'through': "orm['laws.VoteAction']", 'to': "orm['mks.Member']"}),
'votes_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'laws.voteaction': {
'Meta': {'object_name': 'VoteAction'},
'against_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'against_opposition': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'against_party': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'vote': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['laws.Vote']"})
},
'mks.correlation': {
'Meta': {'object_name': 'Correlation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'm1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'m1'", 'to': "orm['mks.Member']"}),
'm2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'m2'", 'to': "orm['mks.Member']"}),
'normalized_score': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'not_same_party': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'mks.member': {
'Meta': {'object_name': 'Member'},
'area_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'bills_stats_approved': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_first': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_pre': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_proposed': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'blog': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['planet.Blog']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'current_party': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'members'", 'null': 'True', 'to': "orm['mks.Party']"}),
'current_role_descriptions': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_of_death': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'family_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_children': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parties': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'all_members'", 'symmetrical': 'False', 'through': "orm['mks.Membership']", 'to': "orm['mks.Party']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'place_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lat': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lon': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'residence_centrality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'residence_economy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'year_of_aliyah': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'mks.membership': {
'Meta': {'object_name': 'Membership'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Party']"}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'mks.party': {
'Meta': {'object_name': 'Party'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_members': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'number_of_seats': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'mks.weeklypresence': {
'Meta': {'object_name': 'WeeklyPresence'},
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'hours': ('django.db.models.fields.FloatField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"})
},
'planet.blog': {
'Meta': {'object_name': 'Blog'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '1024', 'db_index': 'True'})
},
'tagging.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'tagging.taggeditem': {
'Meta': {'unique_together': "(('tag', 'content_type', 'object_id'),)", 'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['tagging.Tag']"})
}
}
complete_apps = ['laws', 'mks']
|
AlphaCluster/NewsBlur
|
refs/heads/master
|
utils/munin/newsblur_app_times.py
|
3
|
#!/srv/newsblur/venv/newsblur/bin/python
from utils.munin.base import MuninGraph
class NBMuninGraph(MuninGraph):
@property
def graph_config(self):
graph = {
'graph_category' : 'NewsBlur',
'graph_title' : 'NewsBlur App Server Times',
'graph_vlabel' : 'Page load time / server',
'graph_args' : '-l 0',
}
stats = self.stats
graph['graph_order'] = ' '.join(sorted(s['_id'] for s in stats))
graph.update(dict((("%s.label" % s['_id'], s['_id']) for s in stats)))
graph.update(dict((("%s.draw" % s['_id'], 'LINE1') for s in stats)))
return graph
def calculate_metrics(self):
servers = dict((("%s" % s['_id'], s['page_load']) for s in self.stats))
return servers
@property
def stats(self):
import datetime
from django.conf import settings
stats = settings.MONGOANALYTICSDB.nbanalytics.page_loads.aggregate([{
"$match": {
"date": {
"$gt": datetime.datetime.now() - datetime.timedelta(minutes=5),
},
},
}, {
"$group": {
"_id" : "$server",
"page_load" : {"$avg": "$page_load"},
},
}])
return list(stats)
if __name__ == '__main__':
NBMuninGraph().run()
|
crdoconnor/pexpect
|
refs/heads/master
|
tests/depricated_test_filedescriptor.py
|
20
|
#!/usr/bin/env python
'''
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <noah@noah.org>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
import pexpect
import unittest
import PexpectTestCase
import os
class ExpectTestCase(PexpectTestCase.PexpectTestCase):
def setUp(self):
print(self.id())
PexpectTestCase.PexpectTestCase.setUp(self)
def test_fd (self):
fd = os.open ('TESTDATA.txt', os.O_RDONLY)
s = pexpect.spawn (fd)
s.expect ('This is the end of test data:')
s.expect (pexpect.EOF)
assert s.before == ' END\n'
def test_maxread (self):
fd = os.open ('TESTDATA.txt', os.O_RDONLY)
s = pexpect.spawn (fd)
s.maxread = 100
s.expect('2')
s.expect ('This is the end of test data:')
s.expect (pexpect.EOF)
assert s.before == ' END\n'
def test_fd_isalive (self):
fd = os.open ('TESTDATA.txt', os.O_RDONLY)
s = pexpect.spawn (fd)
assert s.isalive()
os.close (fd)
assert not s.isalive()
def test_fd_isatty (self):
fd = os.open ('TESTDATA.txt', os.O_RDONLY)
s = pexpect.spawn (fd)
assert not s.isatty()
os.close(fd)
### def test_close_does_not_close_fd (self):
### '''Calling close() on a pexpect.spawn object should not
### close the underlying file descriptor.
### '''
### fd = os.open ('TESTDATA.txt', os.O_RDONLY)
### s = pexpect.spawn (fd)
### try:
### s.close()
### self.fail('Expected an Exception.')
### except pexpect.ExceptionPexpect, e:
### pass
if __name__ == '__main__':
unittest.main()
suite = unittest.makeSuite(ExpectTestCase, 'test')
#fout = open('delete_me_1','wb')
#fout.write(the_old_way)
#fout.close
#fout = open('delete_me_2', 'wb')
#fout.write(the_new_way)
#fout.close
|
jaggu303619/asylum
|
refs/heads/master
|
openerp/tests/common.py
|
58
|
# -*- coding: utf-8 -*-
"""
The module :mod:`openerp.tests.common` provides a few helpers and classes to write
tests.
"""
import threading
import time
import unittest2
import xmlrpclib
import openerp
# The openerp library is supposed already configured.
ADDONS_PATH = openerp.tools.config['addons_path']
PORT = openerp.tools.config['xmlrpc_port']
DB = openerp.tools.config['db_name']
# If the database name is not provided on the command-line,
# use the one on the thread (which means if it is provided on
# the command-line, this will break when installing another
# database from XML-RPC).
if not DB and hasattr(threading.current_thread(), 'dbname'):
DB = threading.current_thread().dbname
HOST = '127.0.0.1'
ADMIN_USER = 'admin'
ADMIN_USER_ID = openerp.SUPERUSER_ID
ADMIN_PASSWORD = 'admin'
def start_openerp():
"""
Start the OpenERP server similary to the openerp-server script.
"""
openerp.service.start_services()
# Ugly way to ensure the server is listening.
time.sleep(2)
def stop_openerp():
"""
Shutdown the OpenERP server similarly to a single ctrl-c.
"""
openerp.service.stop_services()
class BaseCase(unittest2.TestCase):
"""
Subclass of TestCase for common OpenERP-specific code.
This class is abstract and expects self.cr and self.uid to be initialized by subclasses.
"""
@classmethod
def cursor(self):
return openerp.modules.registry.RegistryManager.get(DB).db.cursor()
@classmethod
def registry(self, model):
return openerp.modules.registry.RegistryManager.get(DB)[model]
@classmethod
def ref(self, xid):
""" Returns database ID corresponding to a given identifier.
:param xid: fully-qualified record identifier, in the form ``module.identifier``
:raise: ValueError if not found
"""
assert "." in xid, "this method requires a fully qualified parameter, in the following form: 'module.identifier'"
module, xid = xid.split('.')
_, id = self.registry('ir.model.data').get_object_reference(self.cr, self.uid, module, xid)
return id
@classmethod
def browse_ref(self, xid):
""" Returns a browsable record for the given identifier.
:param xid: fully-qualified record identifier, in the form ``module.identifier``
:raise: ValueError if not found
"""
assert "." in xid, "this method requires a fully qualified parameter, in the following form: 'module.identifier'"
module, xid = xid.split('.')
return self.registry('ir.model.data').get_object(self.cr, self.uid, module, xid)
class TransactionCase(BaseCase):
"""
Subclass of BaseCase with a single transaction, rolled-back at the end of
each test (method).
"""
def setUp(self):
# Store cr and uid in class variables, to allow ref() and browse_ref to be BaseCase @classmethods
# and still access them
TransactionCase.cr = self.cursor()
TransactionCase.uid = openerp.SUPERUSER_ID
def tearDown(self):
self.cr.rollback()
self.cr.close()
class SingleTransactionCase(BaseCase):
"""
Subclass of BaseCase with a single transaction for the whole class,
rolled-back after all the tests.
"""
@classmethod
def setUpClass(cls):
cls.cr = cls.cursor()
cls.uid = openerp.SUPERUSER_ID
@classmethod
def tearDownClass(cls):
cls.cr.rollback()
cls.cr.close()
class RpcCase(unittest2.TestCase):
"""
Subclass of TestCase with a few XML-RPC proxies.
"""
def __init__(self, methodName='runTest'):
super(RpcCase, self).__init__(methodName)
class A(object):
pass
self.proxy = A()
# Use the old (pre 6.1) API.
self.proxy.url_60 = url_60 = 'http://%s:%d/xmlrpc/' % (HOST, PORT)
self.proxy.common_60 = xmlrpclib.ServerProxy(url_60 + 'common')
self.proxy.db_60 = xmlrpclib.ServerProxy(url_60 + 'db')
self.proxy.object_60 = xmlrpclib.ServerProxy(url_60 + 'object')
# Use the new (6.1) API.
self.proxy.url_61 = url_61 = 'http://%s:%d/openerp/xmlrpc/1/' % (HOST, PORT)
self.proxy.common_61 = xmlrpclib.ServerProxy(url_61 + 'common')
self.proxy.db_61 = xmlrpclib.ServerProxy(url_61 + 'db')
self.proxy.model_61 = xmlrpclib.ServerProxy(url_61 + 'model/' + DB)
@classmethod
def generate_database_name(cls):
if hasattr(cls, '_database_id'):
cls._database_id += 1
else:
cls._database_id = 0
return '_fresh_name_' + str(cls._database_id) + '_'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
naturalatlas/mapnik
|
refs/heads/master
|
scons/scons-local-3.0.1/SCons/PathList.py
|
6
|
#
# Copyright (c) 2001 - 2017 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/PathList.py 74b2c53bc42290e911b334a6b44f187da698a668 2017/11/14 13:16:53 bdbaddog"
__doc__ = """SCons.PathList
A module for handling lists of directory paths (the sort of things
that get set as CPPPATH, LIBPATH, etc.) with as much caching of data and
efficiency as we can, while still keeping the evaluation delayed so that we
Do the Right Thing (almost) regardless of how the variable is specified.
"""
import os
import SCons.Memoize
import SCons.Node
import SCons.Util
#
# Variables to specify the different types of entries in a PathList object:
#
TYPE_STRING_NO_SUBST = 0 # string with no '$'
TYPE_STRING_SUBST = 1 # string containing '$'
TYPE_OBJECT = 2 # other object
def node_conv(obj):
"""
This is the "string conversion" routine that we have our substitutions
use to return Nodes, not strings. This relies on the fact that an
EntryProxy object has a get() method that returns the underlying
Node that it wraps, which is a bit of architectural dependence
that we might need to break or modify in the future in response to
additional requirements.
"""
try:
get = obj.get
except AttributeError:
if isinstance(obj, SCons.Node.Node) or SCons.Util.is_Sequence( obj ):
result = obj
else:
result = str(obj)
else:
result = get()
return result
class _PathList(object):
"""
An actual PathList object.
"""
def __init__(self, pathlist):
"""
Initializes a PathList object, canonicalizing the input and
pre-processing it for quicker substitution later.
The stored representation of the PathList is a list of tuples
containing (type, value), where the "type" is one of the TYPE_*
variables defined above. We distinguish between:
strings that contain no '$' and therefore need no
delayed-evaluation string substitution (we expect that there
will be many of these and that we therefore get a pretty
big win from avoiding string substitution)
strings that contain '$' and therefore need substitution
(the hard case is things like '${TARGET.dir}/include',
which require re-evaluation for every target + source)
other objects (which may be something like an EntryProxy
that needs a method called to return a Node)
Pre-identifying the type of each element in the PathList up-front
and storing the type in the list of tuples is intended to reduce
the amount of calculation when we actually do the substitution
over and over for each target.
"""
if SCons.Util.is_String(pathlist):
pathlist = pathlist.split(os.pathsep)
elif not SCons.Util.is_Sequence(pathlist):
pathlist = [pathlist]
pl = []
for p in pathlist:
try:
found = '$' in p
except (AttributeError, TypeError):
type = TYPE_OBJECT
else:
if not found:
type = TYPE_STRING_NO_SUBST
else:
type = TYPE_STRING_SUBST
pl.append((type, p))
self.pathlist = tuple(pl)
def __len__(self): return len(self.pathlist)
def __getitem__(self, i): return self.pathlist[i]
def subst_path(self, env, target, source):
"""
Performs construction variable substitution on a pre-digested
PathList for a specific target and source.
"""
result = []
for type, value in self.pathlist:
if type == TYPE_STRING_SUBST:
value = env.subst(value, target=target, source=source,
conv=node_conv)
if SCons.Util.is_Sequence(value):
result.extend(SCons.Util.flatten(value))
elif value:
result.append(value)
elif type == TYPE_OBJECT:
value = node_conv(value)
if value:
result.append(value)
elif value:
result.append(value)
return tuple(result)
class PathListCache(object):
"""
A class to handle caching of PathList lookups.
This class gets instantiated once and then deleted from the namespace,
so it's used as a Singleton (although we don't enforce that in the
usual Pythonic ways). We could have just made the cache a dictionary
in the module namespace, but putting it in this class allows us to
use the same Memoizer pattern that we use elsewhere to count cache
hits and misses, which is very valuable.
Lookup keys in the cache are computed by the _PathList_key() method.
Cache lookup should be quick, so we don't spend cycles canonicalizing
all forms of the same lookup key. For example, 'x:y' and ['x',
'y'] logically represent the same list, but we don't bother to
split string representations and treat those two equivalently.
(Note, however, that we do, treat lists and tuples the same.)
The main type of duplication we're trying to catch will come from
looking up the same path list from two different clones of the
same construction environment. That is, given
env2 = env1.Clone()
both env1 and env2 will have the same CPPPATH value, and we can
cheaply avoid re-parsing both values of CPPPATH by using the
common value from this cache.
"""
def __init__(self):
self._memo = {}
def _PathList_key(self, pathlist):
"""
Returns the key for memoization of PathLists.
Note that we want this to be pretty quick, so we don't completely
canonicalize all forms of the same list. For example,
'dir1:$ROOT/dir2' and ['$ROOT/dir1', 'dir'] may logically
represent the same list if you're executing from $ROOT, but
we're not going to bother splitting strings into path elements,
or massaging strings into Nodes, to identify that equivalence.
We just want to eliminate obvious redundancy from the normal
case of re-using exactly the same cloned value for a path.
"""
if SCons.Util.is_Sequence(pathlist):
pathlist = tuple(SCons.Util.flatten(pathlist))
return pathlist
@SCons.Memoize.CountDictCall(_PathList_key)
def PathList(self, pathlist):
"""
Returns the cached _PathList object for the specified pathlist,
creating and caching a new object as necessary.
"""
pathlist = self._PathList_key(pathlist)
try:
memo_dict = self._memo['PathList']
except KeyError:
memo_dict = {}
self._memo['PathList'] = memo_dict
else:
try:
return memo_dict[pathlist]
except KeyError:
pass
result = _PathList(pathlist)
memo_dict[pathlist] = result
return result
PathList = PathListCache().PathList
del PathListCache
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
djkonro/client-python
|
refs/heads/master
|
kubernetes/client/models/v1_git_repo_volume_source.py
|
2
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1GitRepoVolumeSource(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, directory=None, repository=None, revision=None):
"""
V1GitRepoVolumeSource - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'directory': 'str',
'repository': 'str',
'revision': 'str'
}
self.attribute_map = {
'directory': 'directory',
'repository': 'repository',
'revision': 'revision'
}
self._directory = directory
self._repository = repository
self._revision = revision
@property
def directory(self):
"""
Gets the directory of this V1GitRepoVolumeSource.
Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.
:return: The directory of this V1GitRepoVolumeSource.
:rtype: str
"""
return self._directory
@directory.setter
def directory(self, directory):
"""
Sets the directory of this V1GitRepoVolumeSource.
Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.
:param directory: The directory of this V1GitRepoVolumeSource.
:type: str
"""
self._directory = directory
@property
def repository(self):
"""
Gets the repository of this V1GitRepoVolumeSource.
Repository URL
:return: The repository of this V1GitRepoVolumeSource.
:rtype: str
"""
return self._repository
@repository.setter
def repository(self, repository):
"""
Sets the repository of this V1GitRepoVolumeSource.
Repository URL
:param repository: The repository of this V1GitRepoVolumeSource.
:type: str
"""
if repository is None:
raise ValueError("Invalid value for `repository`, must not be `None`")
self._repository = repository
@property
def revision(self):
"""
Gets the revision of this V1GitRepoVolumeSource.
Commit hash for the specified revision.
:return: The revision of this V1GitRepoVolumeSource.
:rtype: str
"""
return self._revision
@revision.setter
def revision(self, revision):
"""
Sets the revision of this V1GitRepoVolumeSource.
Commit hash for the specified revision.
:param revision: The revision of this V1GitRepoVolumeSource.
:type: str
"""
self._revision = revision
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1GitRepoVolumeSource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
Leila20/django
|
refs/heads/master
|
django/template/loader_tags.py
|
15
|
import logging
import posixpath
import warnings
from collections import defaultdict
from django.utils import six
from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.safestring import mark_safe
from .base import (
Node, Template, TemplateSyntaxError, TextNode, Variable, token_kwargs,
)
from .library import Library
register = Library()
BLOCK_CONTEXT_KEY = 'block_context'
logger = logging.getLogger('django.template')
class ExtendsError(Exception):
pass
class BlockContext(object):
def __init__(self):
# Dictionary of FIFO queues.
self.blocks = defaultdict(list)
def add_blocks(self, blocks):
for name, block in six.iteritems(blocks):
self.blocks[name].insert(0, block)
def pop(self, name):
try:
return self.blocks[name].pop()
except IndexError:
return None
def push(self, name, block):
self.blocks[name].append(block)
def get_block(self, name):
try:
return self.blocks[name][-1]
except IndexError:
return None
class BlockNode(Node):
def __init__(self, name, nodelist, parent=None):
self.name, self.nodelist, self.parent = name, nodelist, parent
def __repr__(self):
return "<Block Node: %s. Contents: %r>" % (self.name, self.nodelist)
def render(self, context):
block_context = context.render_context.get(BLOCK_CONTEXT_KEY)
with context.push():
if block_context is None:
context['block'] = self
result = self.nodelist.render(context)
else:
push = block = block_context.pop(self.name)
if block is None:
block = self
# Create new block so we can store context without thread-safety issues.
block = type(self)(block.name, block.nodelist)
block.context = context
context['block'] = block
result = block.nodelist.render(context)
if push is not None:
block_context.push(self.name, push)
return result
def super(self):
if not hasattr(self, 'context'):
raise TemplateSyntaxError(
"'%s' object has no attribute 'context'. Did you use "
"{{ block.super }} in a base template?" % self.__class__.__name__
)
render_context = self.context.render_context
if (BLOCK_CONTEXT_KEY in render_context and
render_context[BLOCK_CONTEXT_KEY].get_block(self.name) is not None):
return mark_safe(self.render(self.context))
return ''
class ExtendsNode(Node):
must_be_first = True
context_key = 'extends_context'
def __init__(self, nodelist, parent_name, template_dirs=None):
self.nodelist = nodelist
self.parent_name = parent_name
self.template_dirs = template_dirs
self.blocks = {n.name: n for n in nodelist.get_nodes_by_type(BlockNode)}
def __repr__(self):
return '<ExtendsNode: extends %s>' % self.parent_name.token
def find_template(self, template_name, context):
"""
This is a wrapper around engine.find_template(). A history is kept in
the render_context attribute between successive extends calls and
passed as the skip argument. This enables extends to work recursively
without extending the same template twice.
"""
# RemovedInDjango20Warning: If any non-recursive loaders are installed
# do a direct template lookup. If the same template name appears twice,
# raise an exception to avoid system recursion.
for loader in context.template.engine.template_loaders:
if not loader.supports_recursion:
history = context.render_context.setdefault(
self.context_key, [context.template.origin.template_name],
)
if template_name in history:
raise ExtendsError(
"Cannot extend templates recursively when using "
"non-recursive template loaders",
)
template = context.template.engine.get_template(template_name)
history.append(template_name)
return template
history = context.render_context.setdefault(
self.context_key, [context.template.origin],
)
template, origin = context.template.engine.find_template(
template_name, skip=history,
)
history.append(origin)
return template
def get_parent(self, context):
parent = self.parent_name.resolve(context)
if not parent:
error_msg = "Invalid template name in 'extends' tag: %r." % parent
if self.parent_name.filters or\
isinstance(self.parent_name.var, Variable):
error_msg += " Got this from the '%s' variable." %\
self.parent_name.token
raise TemplateSyntaxError(error_msg)
if isinstance(parent, Template):
# parent is a django.template.Template
return parent
if isinstance(getattr(parent, 'template', None), Template):
# parent is a django.template.backends.django.Template
return parent.template
return self.find_template(parent, context)
def render(self, context):
compiled_parent = self.get_parent(context)
if BLOCK_CONTEXT_KEY not in context.render_context:
context.render_context[BLOCK_CONTEXT_KEY] = BlockContext()
block_context = context.render_context[BLOCK_CONTEXT_KEY]
# Add the block nodes from this node to the block context
block_context.add_blocks(self.blocks)
# If this block's parent doesn't have an extends node it is the root,
# and its block nodes also need to be added to the block context.
for node in compiled_parent.nodelist:
# The ExtendsNode has to be the first non-text node.
if not isinstance(node, TextNode):
if not isinstance(node, ExtendsNode):
blocks = {n.name: n for n in
compiled_parent.nodelist.get_nodes_by_type(BlockNode)}
block_context.add_blocks(blocks)
break
# Call Template._render explicitly so the parser context stays
# the same.
return compiled_parent._render(context)
class IncludeNode(Node):
context_key = '__include_context'
def __init__(self, template, *args, **kwargs):
self.template = template
self.extra_context = kwargs.pop('extra_context', {})
self.isolated_context = kwargs.pop('isolated_context', False)
super(IncludeNode, self).__init__(*args, **kwargs)
def render(self, context):
"""
Render the specified template and context. Cache the template object
in render_context to avoid reparsing and loading when used in a for
loop.
"""
try:
template = self.template.resolve(context)
# Does this quack like a Template?
if not callable(getattr(template, 'render', None)):
# If not, we'll try our cache, and get_template()
template_name = template
cache = context.render_context.setdefault(self.context_key, {})
template = cache.get(template_name)
if template is None:
template = context.template.engine.get_template(template_name)
cache[template_name] = template
values = {
name: var.resolve(context)
for name, var in six.iteritems(self.extra_context)
}
if self.isolated_context:
return template.render(context.new(values))
with context.push(**values):
return template.render(context)
except Exception as e:
if context.template.engine.debug:
raise
template_name = getattr(context, 'template_name', None) or 'unknown'
warnings.warn(
"Rendering {%% include '%s' %%} raised %s. In Django 2.1, "
"this exception will be raised rather than silenced and "
"rendered as an empty string." %
(template_name, e.__class__.__name__),
RemovedInDjango21Warning,
)
logger.warning(
"Exception raised while rendering {%% include %%} for "
"template '%s'. Empty string rendered instead.",
template_name,
exc_info=True,
)
return ''
@register.tag('block')
def do_block(parser, token):
"""
Define a block that can be overridden by child templates.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
bits = token.contents.split()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' tag takes only one argument" % bits[0])
block_name = bits[1]
# Keep track of the names of BlockNodes found in this template, so we can
# check for duplication.
try:
if block_name in parser.__loaded_blocks:
raise TemplateSyntaxError("'%s' tag with name '%s' appears more than once" % (bits[0], block_name))
parser.__loaded_blocks.append(block_name)
except AttributeError: # parser.__loaded_blocks isn't a list yet
parser.__loaded_blocks = [block_name]
nodelist = parser.parse(('endblock',))
# This check is kept for backwards-compatibility. See #3100.
endblock = parser.next_token()
acceptable_endblocks = ('endblock', 'endblock %s' % block_name)
if endblock.contents not in acceptable_endblocks:
parser.invalid_block_tag(endblock, 'endblock', acceptable_endblocks)
return BlockNode(block_name, nodelist)
def construct_relative_path(current_template_name, relative_name):
"""
Convert a relative path (starting with './' or '../') to the full template
name based on the current_template_name.
"""
if not any(relative_name.startswith(x) for x in ["'./", "'../", '"./', '"../']):
# relative_name is a variable or a literal that doesn't contain a
# relative path.
return relative_name
new_name = posixpath.normpath(
posixpath.join(
posixpath.dirname(current_template_name.lstrip('/')),
relative_name.strip('\'"')
)
)
if new_name.startswith('../'):
raise TemplateSyntaxError(
"The relative path '%s' points outside the file hierarchy that "
"template '%s' is in." % (relative_name, current_template_name)
)
if current_template_name.lstrip('/') == new_name:
raise TemplateSyntaxError(
"The relative path '%s' was translated to template name '%s', the "
"same template in which the tag appears."
% (relative_name, current_template_name)
)
return '"%s"' % new_name
@register.tag('extends')
def do_extends(parser, token):
"""
Signal that this template extends a parent template.
This tag may be used in two ways: ``{% extends "base" %}`` (with quotes)
uses the literal value "base" as the name of the parent template to extend,
or ``{% extends variable %}`` uses the value of ``variable`` as either the
name of the parent template to extend (if it evaluates to a string) or as
the parent template itself (if it evaluates to a Template object).
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument" % bits[0])
bits[1] = construct_relative_path(parser.origin.template_name, bits[1])
parent_name = parser.compile_filter(bits[1])
nodelist = parser.parse()
if nodelist.get_nodes_by_type(ExtendsNode):
raise TemplateSyntaxError("'%s' cannot appear more than once in the same template" % bits[0])
return ExtendsNode(nodelist, parent_name)
@register.tag('include')
def do_include(parser, token):
"""
Loads a template and renders it with the current context. You can pass
additional context using keyword arguments.
Example::
{% include "foo/some_include" %}
{% include "foo/some_include" with bar="BAZZ!" baz="BING!" %}
Use the ``only`` argument to exclude the current context when rendering
the included template::
{% include "foo/some_include" only %}
{% include "foo/some_include" with bar="1" only %}
"""
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError(
"%r tag takes at least one argument: the name of the template to "
"be included." % bits[0]
)
options = {}
remaining_bits = bits[2:]
while remaining_bits:
option = remaining_bits.pop(0)
if option in options:
raise TemplateSyntaxError('The %r option was specified more '
'than once.' % option)
if option == 'with':
value = token_kwargs(remaining_bits, parser, support_legacy=False)
if not value:
raise TemplateSyntaxError('"with" in %r tag needs at least '
'one keyword argument.' % bits[0])
elif option == 'only':
value = True
else:
raise TemplateSyntaxError('Unknown argument for %r tag: %r.' %
(bits[0], option))
options[option] = value
isolated_context = options.get('only', False)
namemap = options.get('with', {})
bits[1] = construct_relative_path(parser.origin.template_name, bits[1])
return IncludeNode(parser.compile_filter(bits[1]), extra_context=namemap,
isolated_context=isolated_context)
|
amir-qayyum-khan/edx-platform
|
refs/heads/master
|
common/djangoapps/terrain/stubs/tests/test_http.py
|
172
|
"""
Unit tests for stub HTTP server base class.
"""
import unittest
import requests
import json
from terrain.stubs.http import StubHttpService, StubHttpRequestHandler, require_params
class StubHttpServiceTest(unittest.TestCase):
def setUp(self):
super(StubHttpServiceTest, self).setUp()
self.server = StubHttpService()
self.addCleanup(self.server.shutdown)
self.url = "http://127.0.0.1:{0}/set_config".format(self.server.port)
def test_configure(self):
"""
All HTTP stub servers have an end-point that allows
clients to configure how the server responds.
"""
params = {
'test_str': 'This is only a test',
'test_empty': '',
'test_int': 12345,
'test_float': 123.45,
'test_dict': {
'test_key': 'test_val',
},
'test_empty_dict': {},
'test_unicode': u'\u2603 the snowman',
'test_none': None,
'test_boolean': False
}
for key, val in params.iteritems():
# JSON-encode each parameter
post_params = {key: json.dumps(val)}
response = requests.put(self.url, data=post_params)
self.assertEqual(response.status_code, 200)
# Check that the expected values were set in the configuration
for key, val in params.iteritems():
self.assertEqual(self.server.config.get(key), val)
def test_bad_json(self):
response = requests.put(self.url, data="{,}")
self.assertEqual(response.status_code, 400)
def test_no_post_data(self):
response = requests.put(self.url, data={})
self.assertEqual(response.status_code, 200)
def test_unicode_non_json(self):
# Send unicode without json-encoding it
response = requests.put(self.url, data={'test_unicode': u'\u2603 the snowman'})
self.assertEqual(response.status_code, 400)
def test_unknown_path(self):
response = requests.put(
"http://127.0.0.1:{0}/invalid_url".format(self.server.port),
data="{}"
)
self.assertEqual(response.status_code, 404)
class RequireRequestHandler(StubHttpRequestHandler):
@require_params('GET', 'test_param')
def do_GET(self):
self.send_response(200)
@require_params('POST', 'test_param')
def do_POST(self):
self.send_response(200)
class RequireHttpService(StubHttpService):
HANDLER_CLASS = RequireRequestHandler
class RequireParamTest(unittest.TestCase):
"""
Test the decorator for requiring parameters.
"""
def setUp(self):
super(RequireParamTest, self).setUp()
self.server = RequireHttpService()
self.addCleanup(self.server.shutdown)
self.url = "http://127.0.0.1:{port}".format(port=self.server.port)
def test_require_get_param(self):
# Expect success when we provide the required param
response = requests.get(self.url, params={"test_param": 2})
self.assertEqual(response.status_code, 200)
# Expect failure when we do not proivde the param
response = requests.get(self.url)
self.assertEqual(response.status_code, 400)
# Expect failure when we provide an empty param
response = requests.get(self.url + "?test_param=")
self.assertEqual(response.status_code, 400)
def test_require_post_param(self):
# Expect success when we provide the required param
response = requests.post(self.url, data={"test_param": 2})
self.assertEqual(response.status_code, 200)
# Expect failure when we do not proivde the param
response = requests.post(self.url)
self.assertEqual(response.status_code, 400)
# Expect failure when we provide an empty param
response = requests.post(self.url, data={"test_param": None})
self.assertEqual(response.status_code, 400)
|
zhuyue1314/Empire
|
refs/heads/master
|
lib/modules/situational_awareness/network/stealth_userhunter.py
|
10
|
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-StealthUserHunter',
'Author': ['@harmj0y'],
'Description': ('Finds which machines users of a specified group are logged into by '
'querying AD for servers likely to have high traffic (file servers, DCs, etc.) '
'and enumerating sessions again each. Part of PowerView.'),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'MinPSVersion' : '2',
'Comments': [
'https://github.com/PowerShellEmpire/PowerTools/tree/master/PowerView'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'Hosts' : {
'Description' : 'Hosts to enumerate.',
'Required' : False,
'Value' : ''
},
'HostList' : {
'Description' : 'Hostlist to enumerate.',
'Required' : False,
'Value' : ''
},
'UserName' : {
'Description' : 'Specific username to search for.',
'Required' : False,
'Value' : ''
},
'GroupName' : {
'Description' : 'Group to query for user names.',
'Required' : False,
'Value' : ''
},
'UserList' : {
'Description' : 'List of usernames to search for.',
'Required' : False,
'Value' : ''
},
'StopOnSuccess' : {
'Description' : 'Switch. Stop when a target user is found.',
'Required' : False,
'Value' : ''
},
'NoPing' : {
'Description' : 'Don\'t ping each host to ensure it\'s up before enumerating.',
'Required' : False,
'Value' : ''
},
'CheckAccess' : {
'Description' : 'Switch. Check if the current user has local admin access to found machines.',
'Required' : False,
'Value' : ''
},
'Delay' : {
'Description' : 'Delay between enumerating hosts, defaults to 0.',
'Required' : False,
'Value' : ''
},
'ShowAll' : {
'Description' : 'Switch. Show all result output.',
'Required' : False,
'Value' : ''
},
'Domain' : {
'Description' : 'Domain to enumerate for hosts.',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
# read in the common module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/network/Invoke-UserHunter.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
script += "Invoke-StealthUserHunter "
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " " + str(values['Value'])
script += "| Select-Object TargetUser, Computer, IP, SessionFrom, LocalAdmin | ft -autosize | Out-String | %{$_ + \"`n\"}"
script += ';"`nInvoke-StealthUserHunter completed"'
return script
|
GbalsaC/bitnamiP
|
refs/heads/master
|
pyfs/fs/expose/http.py
|
14
|
__all__ = ["serve_fs"]
import SimpleHTTPServer
import SocketServer
from fs.path import pathjoin, dirname
from fs.errors import FSError
from time import mktime
from cStringIO import StringIO
import cgi
import urllib
import posixpath
import time
import threading
import socket
def _datetime_to_epoch(d):
return mktime(d.timetuple())
class FSHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""A hacked together version of SimpleHTTPRequestHandler"""
def __init__(self, fs, request, client_address, server):
self._fs = fs
SimpleHTTPServer.SimpleHTTPRequestHandler.__init__(self, request, client_address, server)
def do_GET(self):
"""Serve a GET request."""
f = None
try:
f = self.send_head()
if f:
try:
self.copyfile(f, self.wfile)
except socket.error:
pass
finally:
if f is not None:
f.close()
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if self._fs.isdir(path):
if not self.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in ("index.html", "index.htm"):
index = pathjoin(path, index)
if self._fs.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
info = self._fs.getinfo(path)
f = self._fs.open(path, 'r')
except FSError, e:
self.send_error(404, str(e))
return None
self.send_response(200)
self.send_header("Content-type", ctype)
self.send_header("Content-Length", str(info['size']))
if 'modified_time' in info:
self.send_header("Last-Modified", self.date_time_string(_datetime_to_epoch(info['modified_time'])))
self.end_headers()
return f
def list_directory(self, path):
"""Helper to produce a directory listing (absent index.html).
Return value is either a file object, or None (indicating an
error). In either case, the headers are sent, making the
interface the same as for send_head().
"""
try:
dir_paths = self._fs.listdir(path, dirs_only=True)
file_paths = self._fs.listdir(path, files_only=True)
except FSError:
self.send_error(404, "No permission to list directory")
return None
paths = [p+'/' for p in sorted(dir_paths, key=lambda p:p.lower())] + sorted(file_paths, key=lambda p:p.lower())
#list.sort(key=lambda a: a.lower())
f = StringIO()
displaypath = cgi.escape(urllib.unquote(self.path))
f.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
f.write("<html>\n<title>Directory listing for %s</title>\n" % displaypath)
f.write("<body>\n<h2>Directory listing for %s</h2>\n" % displaypath)
f.write("<hr>\n<ul>\n")
parent = dirname(path)
if path != parent:
f.write('<li><a href="%s">../</a></li>' % urllib.quote(parent.rstrip('/') + '/'))
for path in paths:
f.write('<li><a href="%s">%s</a>\n'
% (urllib.quote(path), cgi.escape(path)))
f.write("</ul>\n<hr>\n</body>\n</html>\n")
length = f.tell()
f.seek(0)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(length))
self.end_headers()
return f
def translate_path(self, path):
# abandon query parameters
path = path.split('?',1)[0]
path = path.split('#',1)[0]
path = posixpath.normpath(urllib.unquote(path))
return path
def serve_fs(fs, address='', port=8000):
"""Serve an FS instance over http
:param fs: an FS object
:param address: IP address to serve on
:param port: port number
"""
def Handler(request, client_address, server):
return FSHTTPRequestHandler(fs, request, client_address, server)
#class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
# pass
httpd = SocketServer.TCPServer((address, port), Handler, bind_and_activate=False)
#httpd = ThreadedTCPServer((address, port), Handler, bind_and_activate=False)
httpd.allow_reuse_address = True
httpd.server_bind()
httpd.server_activate()
server_thread = threading.Thread(target=httpd.serve_forever)
server_thread.start()
try:
while True:
time.sleep(0.1)
except (KeyboardInterrupt, SystemExit):
httpd.shutdown()
if __name__ == "__main__":
from fs.osfs import OSFS
serve_fs(OSFS('~/'))
|
dana-i2cat/felix
|
refs/heads/master
|
optin_manager/src/python/openflow/optin_manager/opts/tests.py
|
4
|
from django.conf import settings
from expedient.common.tests.manager import SettingsTestCase
from django.core.urlresolvers import reverse
import logging
from django.contrib.auth.models import User
from openflow.optin_manager.opts.models import UserFlowSpace,\
Experiment,ExperimentFLowSpace, UserOpts, OptsFlowSpace, MatchStruct
import random
from openflow.optin_manager.xmlrpc_server.ch_api import om_ch_translate
from expedient.common.tests.client import Browser, test_get_and_post_form
from openflow.optin_manager.users.models import UserProfile, Priority
from openflow.optin_manager.dummyfv.models import DummyFV, DummyFVRule
from openflow.optin_manager.xmlrpc_server.models import FVServerProxy
logger = logging.getLogger("OMOptsTest")
SCHEME = "test"
HOST = "testserver"
USE_RANDOM = False
class Tests(SettingsTestCase):
def setUp(self):
# Create a test user
self.test_user = User.objects.create_user(
"user", "user@user.com", "password")
profile = UserProfile.get_or_create_profile(self.test_user)
# Create a test admin
self.test_admin = User.objects.create_superuser(
"admin", "admin@user.com", "password")
# Assign a flowpsace to uer
self.user_ip_src_s = random.randint(0,0x80000000) & 0xFFFF0000
self.user_ip_src_e = random.randint(0x80000000,0xFFFFFFFF) & 0xFFFF0000
# Assign a flowpsace to experiment
self.exp_ip_src_s = random.randint(0,0x80000000) & 0xFFFF0000
self.exp_ip_src_e = random.randint(0x80000000,0xFFFFFFFF) & 0xFFFF0000
# Choose a random field
fields=["dl_src","dl_dst","vlan_id","tp_src","tp_dst"]
random.shuffle(fields)
(to_str,from_str,width,om_name,of_name) = om_ch_translate.attr_funcs[fields[0]]
self.user_field_name = om_name
self.user_field_s = random.randint(0,2**width-3)
self.user_field_e = self.user_field_s + 1
(to_str,from_str,width,om_name,of_name) = om_ch_translate.attr_funcs[fields[1]]
self.exp_field_name = om_name
self.exp_field_s = random.randint(0,2**width-3)
self.exp_field_e = self.exp_field_s + 1
#save flowspace for user
ufs = UserFlowSpace(user=self.test_user, ip_src_s=self.user_ip_src_s,
ip_src_e=self.user_ip_src_e,approver=self.test_admin)
setattr(ufs,"%s_s"%self.user_field_name,self.user_field_s)
setattr(ufs,"%s_e"%self.user_field_name,self.user_field_e)
ufs.save()
#create an experiment and assign a flowspace to it
exp = Experiment.objects.create(slice_id="slice_id_1", project_name="project name_1",
project_desc="project description", slice_name="slice name_1",
slice_desc="slice description", controller_url="controller url",
owner_email="owner email", owner_password="owner password")
expfs = ExperimentFLowSpace.objects.create(exp=exp, dpid="00:00:00:00:00:00:01",
ip_src_s=self.exp_ip_src_s,
ip_src_e=self.exp_ip_src_e,
)
setattr(expfs,"%s_s"%self.exp_field_name,self.exp_field_s)
setattr(expfs,"%s_e"%self.exp_field_name,self.exp_field_e)
expfs.save()
# Create dummy FV
fv = DummyFV.objects.create()
# Load up a fake topology in the Dummy FV
fv.populateTopology(10, 20, use_random=USE_RANDOM)
# create fake users for the Dummy FV
username = "om"
password = "password"
u = User.objects.create(username=username)
u.set_password(password)
u.save()
# Create the FV proxy connection
FVServerProxy.objects.create(
name="Flowvisor",
username=username,
password=password,
url = SCHEME+"://%s:8443/dummyfv/%d/xmlrpc/" % (
HOST,fv.id,
),
)
#Login
logged = self.client.login(username="user",password="password")
self.assertEqual(logged,True)
def test_user_optin(self):
'''
Test if a single opt-in is happening correctly
'''
all_exps = Experiment.objects.all()
self.assertEqual(all_exps.count(),1)
response = test_get_and_post_form(
self.client,
reverse("opt_in_experiment"),
{"experiment":all_exps[0].id},
)
self.assertContains(response, "successfully")
uopt = UserOpts.objects.filter(user__username__exact="user")
self.assertEqual(len(uopt),1)
self.assertEqual(uopt[0].experiment.slice_name,"slice name_1")
optfs = OptsFlowSpace.objects.filter(opt=uopt[0])
self.assertEqual(len(optfs),1)
self.num_fv_rules = MatchStruct.objects.filter(optfs=optfs[0]).count()
actual_fv_rules_count = DummyFVRule.objects.all().count()
self.assertEqual(actual_fv_rules_count,self.num_fv_rules)
self.assertEqual(optfs[0].ip_src_s,max(self.user_ip_src_s,self.exp_ip_src_s))
self.assertEqual(optfs[0].ip_src_e,min(self.user_ip_src_e,self.exp_ip_src_e))
self.assertEqual(getattr(optfs[0],"%s_s"%self.user_field_name), self.user_field_s)
self.assertEqual(getattr(optfs[0],"%s_e"%self.user_field_name), self.user_field_e)
self.assertEqual(getattr(optfs[0],"%s_s"%self.exp_field_name), self.exp_field_s)
self.assertEqual(getattr(optfs[0],"%s_e"%self.exp_field_name), self.exp_field_e)
def test_user_optin_invalid(self):
'''
Test if a single opt-in is happening correctly
'''
#opt into an experiemnt that doesn't
response = test_get_and_post_form(
self.client,
reverse("opt_in_experiment"),
{"experiment":234},
)
self.assertNotContains(response, "successfully")
uopt = UserOpts.objects.filter(user__username__exact="user")
self.assertEqual(len(uopt),0)
actual_fv_rules_count = DummyFVRule.objects.all().count()
self.assertEqual(actual_fv_rules_count,0)
def test_user_optin_invalid_fv(self):
fv_server_proxy = FVServerProxy.objects.all()[0]
fv_server_proxy.username = "wrong_username"
fv_server_proxy.save()
all_exps = Experiment.objects.all()
self.assertEqual(all_exps.count(),1)
response = test_get_and_post_form(
self.client,
reverse("opt_in_experiment"),
{"experiment":all_exps[0].id},
)
print response
self.assertNotContains(response, "successfully")
uopt = UserOpts.objects.filter(user__username__exact="user")
self.assertEqual(len(uopt),0)
def test_user_re_optin(self):
'''
Test if opting into the same experiment just updates the previous opt and
doesn't double opt
'''
self.test_user_optin()
all_exps = Experiment.objects.all()
self.assertEqual(all_exps.count(),1)
uopt = UserOpts.objects.filter(user__username__exact="user")
optfs = OptsFlowSpace.objects.filter(opt=uopt[0])
self.assertEqual(len(optfs),1)
response = test_get_and_post_form(
self.client,
reverse("opt_in_experiment"),
{"experiment":all_exps[0].id},
)
self.assertContains(response, "successfully")
uopt = UserOpts.objects.filter(user__username__exact="user")
self.assertEqual(len(uopt),1)
optfs = OptsFlowSpace.objects.filter(opt=uopt[0])
self.assertEqual(len(optfs),1)
actual_fv_rules_count = DummyFVRule.objects.all().count()
self.assertEqual(self.num_fv_rules,actual_fv_rules_count)
def test_optout(self):
self.test_user_optin()
uopt = UserOpts.objects.filter(user__username__exact="user")
optfs = OptsFlowSpace.objects.filter(opt=uopt[0])
self.assertEqual(len(optfs),1)
response = test_get_and_post_form(
self.client,
reverse("opt_out_of_experiment"),
{"1":"checked"},
)
uopt = UserOpts.objects.filter(user__username__exact="user")
self.assertEqual(len(uopt),0)
optfs = OptsFlowSpace.objects.filter(opt__user__username="user")
self.assertEqual(optfs.count(),0)
def test_user_multiple_opts(self):
'''
Opt into multiple experiments,change their priorities and then opt all out.
at each of the three steps, test internal database to make sure that it is done
correctly.
'''
max_opt = random.randint(5,9)
exp_ids = []
first_id = Experiment.objects.all()[0].id
exp_ids.append(first_id)
for index in range(2,max_opt):
#create a random number of experiments
exp = Experiment.objects.create(slice_id="slice_id_%d"%index, project_name="project name_%d"%index,
project_desc="project description", slice_name="slice name_%d"%index,
slice_desc="slice description", controller_url="controller url",
owner_email="owner email", owner_password="owner password")
expfs = ExperimentFLowSpace.objects.create(exp=exp, dpid="00:00:00:00:00:00:0%d"%index,
ip_src_s=random.randint(0,0x80000000) & 0xFFFF0000,
ip_src_e=random.randint(0x80000000,0xFFFFFFFF) & 0xFFFF0000,
)
expfs.save()
exp_ids.append(exp.id)
# opt into all of them
count = 0
for exp in Experiment.objects.all():
count = count + 1
response = test_get_and_post_form(
self.client,
reverse("opt_in_experiment"),
{"experiment":exp.id},
)
self.assertContains(response, "successfully")
uopt = UserOpts.objects.filter(user__username__exact="user")
self.assertEqual(len(uopt),count)
self.assertEqual(uopt[count-1].experiment.slice_name,exp.slice_name)
optfs = OptsFlowSpace.objects.filter(opt=uopt[count-1])
self.assertEqual(len(optfs),1)
# change priority
request_post = {}
for id in range(1,max_opt):
request_post["p_%d"%exp_ids[id-1]] = max_opt - id + 1
response = test_get_and_post_form(
self.client,
reverse("change_priority"),
request_post,
)
self.assertContains(response, "Successfully")
for id in range(1,max_opt):
uopt = UserOpts.objects.filter(user__username__exact="user",\
experiment__slice_name="slice name_%d"%id)
self.assertEqual(uopt.count(),1,"uopt.count()!=1 for id=%d"%id)
self.assertEqual(uopt[0].priority,max_opt - id + 1)
optfs = OptsFlowSpace.objects.filter(opt = uopt[0])
self.assertEqual(optfs.count(),1)
mstr = optfs[0].matchstruct_set.all()
self.assertNotEqual(mstr.count(),0)
fv_rule = DummyFVRule.objects.filter(match=mstr[0].match,\
dpid="00:00:00:00:00:00:0%d"%id)
self.assertEqual(fv_rule.count(),1)
self.assertEqual(fv_rule[0].priority,mstr[0].priority)
#opt out of all of them
request_post = {}
for id in range(1,max_opt):
request_post["%d"%exp_ids[id-1]] = "checked"
response = test_get_and_post_form(
self.client,
reverse("opt_out_of_experiment"),
request_post,
)
uopt = UserOpts.objects.filter(user__username__exact="user")
self.assertEqual(len(uopt),0)
optfs = OptsFlowSpace.objects.filter(opt__user__username="user")
self.assertEqual(optfs.count(),0)
actual_fv_rules_count = DummyFVRule.objects.all().count()
self.assertEqual(actual_fv_rules_count,0)
|
atanasAV/forex-trading-api
|
refs/heads/master
|
setup.py
|
1
|
#!/usr/bin/env python
from distutils.core import setup
setup(name='tradingModule',
version='1.0',
description='Trading API',
author='atanasAV',
author_email='atanas.vasilev.av@gmail.com',
packages=['tradingModule'],
)
|
leafclick/intellij-community
|
refs/heads/master
|
python/testData/editing/enterInIncompleteDictLiteral.after.py
|
18
|
d = {'foo': 'bar',
'baz'
|
xzYue/odoo
|
refs/heads/8.0
|
addons/l10n_ch/account_wizard.py
|
424
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi. Copyright Camptocamp SA
# Financial contributors: Hasa SA, Open Net SA,
# Prisme Solutions Informatique SA, Quod SA
#
# Translation contributors: brain-tec AG, Agile Business Group
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv.orm import TransientModel
class WizardMultiChartsAccounts(TransientModel):
_inherit ='wizard.multi.charts.accounts'
def onchange_chart_template_id(self, cursor, uid, ids, chart_template_id=False, context=None):
if context is None: context = {}
res = super(WizardMultiChartsAccounts, self).onchange_chart_template_id(cursor, uid, ids,
chart_template_id=chart_template_id,
context=context)
# 0 is evaluated as False in python so we have to do this
# because original wizard test code_digits value on a float widget
if chart_template_id:
sterchi_template = self.pool.get('ir.model.data').get_object(cursor, uid, 'l10n_ch', 'l10nch_chart_template')
if sterchi_template.id == chart_template_id:
res['value']['code_digits'] = 0
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Boris-Barboris/PySubs
|
refs/heads/master
|
engine/IOBroker.py
|
1
|
# Copyright Alexander Baranin 2016
import sfml
from engine.Reloadable import reloadable
_import_modules = (
('EngineCore', 'engine.EngineCore'),
('Logging', 'engine.Logging'),
('WindowModule', 'engine.WindowModule'))
SCHED_ORDER = 20
def onLoad(core):
EngineCore.schedule_FIFO(run, SCHED_ORDER)
# load event map
global event_map
event_map = EventHandlerMap._persistent('IOBroker.event_map')
# standard window event handlers
register_handler(close_std_event, sfml.window.CloseEvent)
register_handler(fullscreen_std_event, sfml.window.KeyEvent)
register_handler(resize_std_event, sfml.window.ResizeEvent)
def onUnload():
# standard window event handlers
unregister_handler(close_std_event, sfml.window.CloseEvent)
unregister_handler(fullscreen_std_event, sfml.window.KeyEvent)
unregister_handler(resize_std_event, sfml.window.ResizeEvent)
EngineCore.unschedule_FIFO(SCHED_ORDER)
event_map = None # static reloadable event map
@reloadable
class EventHandlerMap:
def __init__(self, proxy):
self._handlers = {}
def _reload(self, other, proxy):
self._handlers = other._handlers
def register_handler(self, f, event_type):
if event_type in self._handlers:
self._handlers[event_type].append(f)
else:
self._handlers[event_type] = [f]
def unregister_handler(self, f, event_type):
if event_type in self._handlers:
if f in self._handlers[event_type]:
self._handlers[event_type].remove(f)
if len(self._handlers[event_type]) == 0:
del self._handlers[event_type]
def run():
for event in WindowModule.app_window.wnd_handle.events:
t = type(event)
d = event_map._handlers
if t in d:
handlers_list = d[t]
for hdlr in handlers_list:
hdlr(event, WindowModule.app_window)
def register_handler(f, event_type):
event_map.register_handler(f, event_type)
def unregister_handler(f, event_type):
event_map.unregister_handler(f, event_type)
# Standard handlers themselves
def close_std_event(event, wnd):
# close window
wnd.close_window()
EngineCore.request_shutdown()
def fullscreen_std_event(event, wnd):
# switch fullscreen on\off
if event.code == sfml.window.Keyboard.F and event.pressed:
wnd.set_fullscreen(not wnd.is_fullscreen())
def resize_std_event(event, wnd):
# window was resized
if not wnd.is_fullscreen():
wnd.wnd_size = event.size
|
onezens/python
|
refs/heads/master
|
basic/15_regex_getimage.py
|
1
|
#encoding=utf8
import re
import urllib
def getUrl(url):
page = urllib.urlopen(url)
return page.read()
def getImages(html):
images = re.findall(r'src="(.*?\.(jpg|png))"', html)
x = 1
for imageurl in images :
print('downloading %s'%imageurl[0])
urllib.urlretrieve(imageurl[0], './images/%d.jpg'%x)
x += 1
html = getUrl('http://www.tooopen.com/img/87.aspx')
getImages(html)
|
peterstace/project-euler
|
refs/heads/master
|
OLD_PY_CODE/project_euler_old_old/155/155.py
|
1
|
from fractions import Fraction
from itertools import product
N = 18
capacities = [None] + [set() for _ in range(N)] #none for index 0
capacities[1].add(Fraction(1))
for n in range(2, N+1):
print(n)
for i in range(1, n // 2 + 1):
for c1, c2 in product(capacities[i], capacities[n-i]):
capacities[n].add(c1 + c2)
capacities[n].add(c1 * c2 / (c1 + c2))
union_cap = set()
for cap in capacities[1:]: #don't include capacities[0] (which is None)
union_cap |= cap
print(len(union_cap))
|
stripe/stripe-python
|
refs/heads/master
|
stripe/api_resources/abstract/nested_resource_class_methods.py
|
1
|
from __future__ import absolute_import, division, print_function
from stripe import api_requestor, util
from stripe.six.moves.urllib.parse import quote_plus
def nested_resource_class_methods(
resource, path=None, operations=None, resource_plural=None
):
if resource_plural is None:
resource_plural = "%ss" % resource
if path is None:
path = resource_plural
if operations is None:
raise ValueError("operations list required")
def wrapper(cls):
def nested_resource_url(cls, id, nested_id=None):
url = "%s/%s/%s" % (
cls.class_url(),
quote_plus(id),
quote_plus(path),
)
if nested_id is not None:
url += "/%s" % quote_plus(nested_id)
return url
resource_url_method = "%ss_url" % resource
setattr(cls, resource_url_method, classmethod(nested_resource_url))
def nested_resource_request(
cls,
method,
url,
api_key=None,
idempotency_key=None,
stripe_version=None,
stripe_account=None,
**params
):
requestor = api_requestor.APIRequestor(
api_key, api_version=stripe_version, account=stripe_account
)
headers = util.populate_headers(idempotency_key)
response, api_key = requestor.request(method, url, params, headers)
return util.convert_to_stripe_object(
response, api_key, stripe_version, stripe_account
)
resource_request_method = "%ss_request" % resource
setattr(
cls, resource_request_method, classmethod(nested_resource_request)
)
for operation in operations:
if operation == "create":
def create_nested_resource(cls, id, **params):
url = getattr(cls, resource_url_method)(id)
return getattr(cls, resource_request_method)(
"post", url, **params
)
create_method = "create_%s" % resource
setattr(
cls, create_method, classmethod(create_nested_resource)
)
elif operation == "retrieve":
def retrieve_nested_resource(cls, id, nested_id, **params):
url = getattr(cls, resource_url_method)(id, nested_id)
return getattr(cls, resource_request_method)(
"get", url, **params
)
retrieve_method = "retrieve_%s" % resource
setattr(
cls, retrieve_method, classmethod(retrieve_nested_resource)
)
elif operation == "update":
def modify_nested_resource(cls, id, nested_id, **params):
url = getattr(cls, resource_url_method)(id, nested_id)
return getattr(cls, resource_request_method)(
"post", url, **params
)
modify_method = "modify_%s" % resource
setattr(
cls, modify_method, classmethod(modify_nested_resource)
)
elif operation == "delete":
def delete_nested_resource(cls, id, nested_id, **params):
url = getattr(cls, resource_url_method)(id, nested_id)
return getattr(cls, resource_request_method)(
"delete", url, **params
)
delete_method = "delete_%s" % resource
setattr(
cls, delete_method, classmethod(delete_nested_resource)
)
elif operation == "list":
def list_nested_resources(cls, id, **params):
url = getattr(cls, resource_url_method)(id)
return getattr(cls, resource_request_method)(
"get", url, **params
)
list_method = "list_%s" % resource_plural
setattr(cls, list_method, classmethod(list_nested_resources))
else:
raise ValueError("Unknown operation: %s" % operation)
return cls
return wrapper
|
ddboline/pylearn2
|
refs/heads/master
|
pylearn2/devtools/__init__.py
|
147
|
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
|
nilbody/h2o-3
|
refs/heads/master
|
h2o-py/tests/testdir_jira/pyunit_pubdev_1839_citi_bike_small_repro.py
|
3
|
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
def pubdev_1839():
train = h2o.import_file(pyunit_utils.locate("smalldata/jira/pubdev_1839_repro_train.csv"))
test = h2o.import_file(pyunit_utils.locate("smalldata/jira/pubdev_1839_repro_test.csv"))
glm0 = h2o.glm(x =train.drop("bikes"),
y =train ["bikes"],
validation_x=test .drop("bikes"),
validation_y=test ["bikes"],
family="poisson")
if __name__ == "__main__":
pyunit_utils.standalone_test(pubdev_1839)
else:
pubdev_1839()
|
fergalmoran/dss
|
refs/heads/master
|
spa/migrations/0006_auto__chg_field_mix_title.py
|
1
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Mix.title'
db.alter_column(u'spa_mix', 'title', self.gf('django.db.models.fields.CharField')(max_length=150))
def backwards(self, orm):
# Changing field 'Mix.title'
db.alter_column(u'spa_mix', 'title', self.gf('django.db.models.fields.CharField')(max_length=50))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'spa._activity': {
'Meta': {'object_name': '_Activity'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'})
},
'spa._lookup': {
'Meta': {'object_name': '_Lookup'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'spa.chatmessage': {
'Meta': {'object_name': 'ChatMessage'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'chat_messages'", 'null': 'True', 'to': "orm['spa.UserProfile']"})
},
'spa.comment': {
'Meta': {'object_name': 'Comment'},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['spa.Mix']"}),
'time_index': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'spa.event': {
'Meta': {'object_name': 'Event'},
'attendees': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'attendees'", 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'date_created': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 4, 24, 0, 0)'}),
'event_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 4, 24, 0, 0)'}),
'event_description': ('tinymce.views.HTMLField', [], {}),
'event_recurrence': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spa.Recurrence']"}),
'event_time': ('django.db.models.fields.TimeField', [], {'default': 'datetime.datetime(2013, 4, 24, 0, 0)'}),
'event_title': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'event_venue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spa.Venue']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'spa.genre': {
'Meta': {'object_name': 'Genre'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
'spa.label': {
'Meta': {'object_name': 'Label'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'spa.mix': {
'Meta': {'object_name': 'Mix'},
'description': ('django.db.models.fields.TextField', [], {}),
'download_allowed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'download_url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'duration': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'genres': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['spa.Genre']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_featured': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'local_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'mix_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'stream_url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'uid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '38', 'blank': 'True'}),
'upload_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 24, 0, 0)'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spa.UserProfile']"}),
'waveform_generated': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'spa.mixdownload': {
'Meta': {'object_name': 'MixDownload', '_ormbases': ['spa._Activity']},
u'_activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa._Activity']", 'unique': 'True', 'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'downloads'", 'to': "orm['spa.Mix']"})
},
'spa.mixfavourite': {
'Meta': {'object_name': 'MixFavourite', '_ormbases': ['spa._Activity']},
u'_activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa._Activity']", 'unique': 'True', 'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'favourites'", 'to': "orm['spa.Mix']"})
},
'spa.mixlike': {
'Meta': {'object_name': 'MixLike', '_ormbases': ['spa._Activity']},
u'_activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa._Activity']", 'unique': 'True', 'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'likes'", 'to': "orm['spa.Mix']"})
},
'spa.mixplay': {
'Meta': {'object_name': 'MixPlay', '_ormbases': ['spa._Activity']},
u'_activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa._Activity']", 'unique': 'True', 'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'plays'", 'to': "orm['spa.Mix']"})
},
'spa.purchaselink': {
'Meta': {'object_name': 'PurchaseLink'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'track': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'purchase_link'", 'to': "orm['spa.Tracklist']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'spa.recurrence': {
'Meta': {'object_name': 'Recurrence', '_ormbases': ['spa._Lookup']},
u'_lookup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa._Lookup']", 'unique': 'True', 'primary_key': 'True'})
},
'spa.release': {
'Meta': {'object_name': 'Release'},
'embed_code': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'release_artist': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'release_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 4, 24, 0, 0)'}),
'release_description': ('django.db.models.fields.TextField', [], {}),
'release_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'release_label': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spa.Label']"}),
'release_title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spa.UserProfile']"})
},
'spa.releaseaudio': {
'Meta': {'object_name': 'ReleaseAudio'},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'local_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_audio'", 'null': 'True', 'to': "orm['spa.Release']"})
},
'spa.tracklist': {
'Meta': {'object_name': 'Tracklist'},
'artist': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.SmallIntegerField', [], {}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tracklist'", 'to': "orm['spa.Mix']"}),
'remixer': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'timeindex': ('django.db.models.fields.TimeField', [], {'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'spa.userfollows': {
'Meta': {'object_name': 'UserFollows'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user_from': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'followers'", 'unique': 'True', 'to': "orm['spa.UserProfile']"}),
'user_to': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'following'", 'unique': 'True', 'to': "orm['spa.UserProfile']"})
},
'spa.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'activity_sharing': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'activity_sharing_networks': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'avatar_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'social'", 'max_length': '15'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '35', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'default': 'None', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
'spa.venue': {
'Meta': {'object_name': 'Venue'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'venue_address': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'venue_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'venue_name': ('django.db.models.fields.CharField', [], {'max_length': '250'})
}
}
complete_apps = ['spa']
|
cgaspoz/l10n-switzerland
|
refs/heads/8.0
|
l10n_ch_payment_slip/bank.py
|
9
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi. Copyright Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
class ResPartnerBank(models.Model):
"""
Inherit res.partner.bank class in order to add swiss specific fields
such as:
- BVR data
- BVR print options for company accounts
We leave it here in order
"""
_inherit = "res.partner.bank"
print_bank = fields.Boolean('Print Bank on BVR')
print_account = fields.Boolean('Print Account Number on BVR')
print_partner = fields.Boolean('Print Partner Address on BVR')
|
systers/vms
|
refs/heads/develop
|
vms/organization/__init__.py
|
12133432
| |
P0cL4bs/3vilTwinAttacker
|
refs/heads/master
|
plugins/external/BDFProxy-ng/bdf/aPLib/contrib/python/__init__.py
|
12133432
| |
TQRG/physalia
|
refs/heads/master
|
physalia/fixtures/__init__.py
|
12133432
| |
MazeFX/cookiecutter_website_project
|
refs/heads/master
|
config/__init__.py
|
12133432
| |
lucidmotifs/auto-aoc
|
refs/heads/master
|
.venv/lib/python3.5/site-packages/flake8/plugins/manager.py
|
6
|
"""Plugin loading and management logic and classes."""
import collections
import logging
import pkg_resources
from flake8 import exceptions
from flake8 import utils
from flake8.plugins import notifier
LOG = logging.getLogger(__name__)
__all__ = (
'Checkers',
'Listeners',
'Plugin',
'PluginManager',
'ReportFormatters',
)
NO_GROUP_FOUND = object()
class Plugin(object):
"""Wrap an EntryPoint from setuptools and other logic."""
def __init__(self, name, entry_point, local=False):
"""Initialize our Plugin.
:param str name:
Name of the entry-point as it was registered with setuptools.
:param entry_point:
EntryPoint returned by setuptools.
:type entry_point:
setuptools.EntryPoint
:param bool local:
Is this a repo-local plugin?
"""
self.name = name
self.entry_point = entry_point
self.local = local
self._plugin = None
self._parameters = None
self._parameter_names = None
self._group = None
self._plugin_name = None
self._version = None
def __repr__(self):
"""Provide an easy to read description of the current plugin."""
return 'Plugin(name="{0}", entry_point="{1}")'.format(
self.name, self.entry_point
)
def to_dictionary(self):
"""Convert this plugin to a dictionary."""
return {
'name': self.name,
'parameters': self.parameters,
'parameter_names': self.parameter_names,
'plugin': self.plugin,
'plugin_name': self.plugin_name,
}
def is_in_a_group(self):
"""Determine if this plugin is in a group.
:returns:
True if the plugin is in a group, otherwise False.
:rtype:
bool
"""
return self.group() is not None
def group(self):
"""Find and parse the group the plugin is in."""
if self._group is None:
name = self.name.split('.', 1)
if len(name) > 1:
self._group = name[0]
else:
self._group = NO_GROUP_FOUND
if self._group is NO_GROUP_FOUND:
return None
return self._group
@property
def parameters(self):
"""List of arguments that need to be passed to the plugin."""
if self._parameters is None:
self._parameters = utils.parameters_for(self)
return self._parameters
@property
def parameter_names(self):
"""List of argument names that need to be passed to the plugin."""
if self._parameter_names is None:
self._parameter_names = list(self.parameters)
return self._parameter_names
@property
def plugin(self):
"""Load and return the plugin associated with the entry-point.
This property implicitly loads the plugin and then caches it.
"""
self.load_plugin()
return self._plugin
@property
def version(self):
"""Return the version of the plugin."""
if self._version is None:
if self.is_in_a_group():
self._version = version_for(self)
else:
self._version = self.plugin.version
return self._version
@property
def plugin_name(self):
"""Return the name of the plugin."""
if self._plugin_name is None:
if self.is_in_a_group():
self._plugin_name = self.group()
else:
self._plugin_name = self.plugin.name
return self._plugin_name
@property
def off_by_default(self):
"""Return whether the plugin is ignored by default."""
return getattr(self.plugin, 'off_by_default', False)
def execute(self, *args, **kwargs):
r"""Call the plugin with \*args and \*\*kwargs."""
return self.plugin(*args, **kwargs) # pylint: disable=not-callable
def _load(self, verify_requirements):
# Avoid relying on hasattr() here.
resolve = getattr(self.entry_point, 'resolve', None)
require = getattr(self.entry_point, 'require', None)
if resolve and require:
if verify_requirements:
LOG.debug('Verifying plugin "%s"\'s requirements.',
self.name)
require()
self._plugin = resolve()
else:
self._plugin = self.entry_point.load(
require=verify_requirements
)
if not callable(self._plugin):
msg = ('Plugin %r is not a callable. It might be written for an'
' older version of flake8 and might not work with this'
' version' % self._plugin)
LOG.critical(msg)
raise TypeError(msg)
def load_plugin(self, verify_requirements=False):
"""Retrieve the plugin for this entry-point.
This loads the plugin, stores it on the instance and then returns it.
It does not reload it after the first time, it merely returns the
cached plugin.
:param bool verify_requirements:
Whether or not to make setuptools verify that the requirements for
the plugin are satisfied.
:returns:
Nothing
"""
if self._plugin is None:
LOG.info('Loading plugin "%s" from entry-point.', self.name)
try:
self._load(verify_requirements)
except Exception as load_exception:
LOG.exception(load_exception)
failed_to_load = exceptions.FailedToLoadPlugin(
plugin=self,
exception=load_exception,
)
LOG.critical(str(failed_to_load))
raise failed_to_load
def enable(self, optmanager, options=None):
"""Remove plugin name from the default ignore list."""
optmanager.remove_from_default_ignore([self.name])
optmanager.extend_default_select([self.name])
if not options:
return
try:
options.ignore.remove(self.name)
except (ValueError, KeyError):
LOG.debug('Attempted to remove %s from the ignore list but it was '
'not a member of the list.', self.name)
def disable(self, optmanager):
"""Add the plugin name to the default ignore list."""
optmanager.extend_default_ignore([self.name])
def provide_options(self, optmanager, options, extra_args):
"""Pass the parsed options and extra arguments to the plugin."""
parse_options = getattr(self.plugin, 'parse_options', None)
if parse_options is not None:
LOG.debug('Providing options to plugin "%s".', self.name)
try:
parse_options(optmanager, options, extra_args)
except TypeError:
parse_options(options)
if self.name in options.enable_extensions:
self.enable(optmanager, options)
def register_options(self, optmanager):
"""Register the plugin's command-line options on the OptionManager.
:param optmanager:
Instantiated OptionManager to register options on.
:type optmanager:
flake8.options.manager.OptionManager
:returns:
Nothing
"""
add_options = getattr(self.plugin, 'add_options', None)
if add_options is not None:
LOG.debug(
'Registering options from plugin "%s" on OptionManager %r',
self.name, optmanager
)
add_options(optmanager)
if self.off_by_default:
self.disable(optmanager)
class PluginManager(object): # pylint: disable=too-few-public-methods
"""Find and manage plugins consistently."""
def __init__(self, namespace,
verify_requirements=False, local_plugins=None):
"""Initialize the manager.
:param str namespace:
Namespace of the plugins to manage, e.g., 'flake8.extension'.
:param list local_plugins:
Plugins from config (as "X = path.to:Plugin" strings).
:param bool verify_requirements:
Whether or not to make setuptools verify that the requirements for
the plugin are satisfied.
"""
self.namespace = namespace
self.verify_requirements = verify_requirements
self.plugins = {}
self.names = []
self._load_local_plugins(local_plugins or [])
self._load_entrypoint_plugins()
def _load_local_plugins(self, local_plugins):
"""Load local plugins from config.
:param list local_plugins:
Plugins from config (as "X = path.to:Plugin" strings).
"""
for plugin_str in local_plugins:
entry_point = pkg_resources.EntryPoint.parse(plugin_str)
self._load_plugin_from_entrypoint(entry_point, local=True)
def _load_entrypoint_plugins(self):
LOG.info('Loading entry-points for "%s".', self.namespace)
for entry_point in pkg_resources.iter_entry_points(self.namespace):
self._load_plugin_from_entrypoint(entry_point)
def _load_plugin_from_entrypoint(self, entry_point, local=False):
"""Load a plugin from a setuptools EntryPoint.
:param EntryPoint entry_point:
EntryPoint to load plugin from.
:param bool local:
Is this a repo-local plugin?
"""
name = entry_point.name
self.plugins[name] = Plugin(name, entry_point, local=local)
self.names.append(name)
LOG.debug('Loaded %r for plugin "%s".', self.plugins[name], name)
def map(self, func, *args, **kwargs):
r"""Call ``func`` with the plugin and \*args and \**kwargs after.
This yields the return value from ``func`` for each plugin.
:param collections.Callable func:
Function to call with each plugin. Signature should at least be:
.. code-block:: python
def myfunc(plugin):
pass
Any extra positional or keyword arguments specified with map will
be passed along to this function after the plugin. The plugin
passed is a :class:`~flake8.plugins.manager.Plugin`.
:param args:
Positional arguments to pass to ``func`` after each plugin.
:param kwargs:
Keyword arguments to pass to ``func`` after each plugin.
"""
for name in self.names:
yield func(self.plugins[name], *args, **kwargs)
def versions(self):
# () -> (str, str)
"""Generate the versions of plugins.
:returns:
Tuples of the plugin_name and version
:rtype:
tuple
"""
plugins_seen = set()
for entry_point_name in self.names:
plugin = self.plugins[entry_point_name]
plugin_name = plugin.plugin_name
if plugin.plugin_name in plugins_seen:
continue
plugins_seen.add(plugin_name)
yield (plugin_name, plugin.version)
def version_for(plugin):
# (Plugin) -> Union[str, NoneType]
"""Determine the version of a plugin by it's module.
:param plugin:
The loaded plugin
:type plugin:
Plugin
:returns:
version string for the module
:rtype:
str
"""
module_name = plugin.plugin.__module__
try:
module = __import__(module_name)
except ImportError:
return None
return getattr(module, '__version__', None)
class PluginTypeManager(object):
"""Parent class for most of the specific plugin types."""
namespace = None
def __init__(self, local_plugins=None):
"""Initialize the plugin type's manager.
:param list local_plugins:
Plugins from config file instead of entry-points
"""
self.manager = PluginManager(
self.namespace, local_plugins=local_plugins)
self.plugins_loaded = False
def __contains__(self, name):
"""Check if the entry-point name is in this plugin type manager."""
LOG.debug('Checking for "%s" in plugin type manager.', name)
return name in self.plugins
def __getitem__(self, name):
"""Retrieve a plugin by its name."""
LOG.debug('Retrieving plugin for "%s".', name)
return self.plugins[name]
def get(self, name, default=None):
"""Retrieve the plugin referred to by ``name`` or return the default.
:param str name:
Name of the plugin to retrieve.
:param default:
Default value to return.
:returns:
Plugin object referred to by name, if it exists.
:rtype:
:class:`Plugin`
"""
if name in self:
return self[name]
return default
@property
def names(self):
"""Proxy attribute to underlying manager."""
return self.manager.names
@property
def plugins(self):
"""Proxy attribute to underlying manager."""
return self.manager.plugins
@staticmethod
def _generate_call_function(method_name, optmanager, *args, **kwargs):
def generated_function(plugin): # noqa: D105
method = getattr(plugin, method_name, None)
if (method is not None and
isinstance(method, collections.Callable)):
return method(optmanager, *args, **kwargs)
return generated_function
def load_plugins(self):
"""Load all plugins of this type that are managed by this manager."""
if self.plugins_loaded:
return
def load_plugin(plugin):
"""Call each plugin's load_plugin method."""
return plugin.load_plugin()
plugins = list(self.manager.map(load_plugin))
# Do not set plugins_loaded if we run into an exception
self.plugins_loaded = True
return plugins
def register_plugin_versions(self, optmanager):
"""Register the plugins and their versions with the OptionManager."""
self.load_plugins()
for (plugin_name, version) in self.manager.versions():
optmanager.register_plugin(name=plugin_name, version=version)
def register_options(self, optmanager):
"""Register all of the checkers' options to the OptionManager."""
self.load_plugins()
call_register_options = self._generate_call_function(
'register_options', optmanager,
)
list(self.manager.map(call_register_options))
def provide_options(self, optmanager, options, extra_args):
"""Provide parsed options and extra arguments to the plugins."""
call_provide_options = self._generate_call_function(
'provide_options', optmanager, options, extra_args,
)
list(self.manager.map(call_provide_options))
class NotifierBuilderMixin(object): # pylint: disable=too-few-public-methods
"""Mixin class that builds a Notifier from a PluginManager."""
def build_notifier(self):
"""Build a Notifier for our Listeners.
:returns:
Object to notify our listeners of certain error codes and
warnings.
:rtype:
:class:`~flake8.notifier.Notifier`
"""
notifier_trie = notifier.Notifier()
for name in self.names:
notifier_trie.register_listener(name, self.manager[name])
return notifier_trie
class Checkers(PluginTypeManager):
"""All of the checkers registered through entry-points or config."""
namespace = 'flake8.extension'
def checks_expecting(self, argument_name):
"""Retrieve checks that expect an argument with the specified name.
Find all checker plugins that are expecting a specific argument.
"""
for plugin in self.plugins.values():
if argument_name == plugin.parameter_names[0]:
yield plugin
def to_dictionary(self):
"""Return a dictionary of AST and line-based plugins."""
return {
'ast_plugins': [
plugin.to_dictionary() for plugin in self.ast_plugins
],
'logical_line_plugins': [
plugin.to_dictionary() for plugin in self.logical_line_plugins
],
'physical_line_plugins': [
plugin.to_dictionary() for plugin in self.physical_line_plugins
],
}
def register_options(self, optmanager):
"""Register all of the checkers' options to the OptionManager.
This also ensures that plugins that are not part of a group and are
enabled by default are enabled on the option manager.
"""
# NOTE(sigmavirus24) We reproduce a little of
# PluginTypeManager.register_options to reduce the number of times
# that we loop over the list of plugins. Instead of looping twice,
# option registration and enabling the plugin, we loop once with one
# function to map over the plugins.
self.load_plugins()
call_register_options = self._generate_call_function(
'register_options', optmanager,
)
def register_and_enable(plugin):
call_register_options(plugin)
if plugin.group() is None and not plugin.off_by_default:
plugin.enable(optmanager)
list(self.manager.map(register_and_enable))
@property
def ast_plugins(self):
"""List of plugins that expect the AST tree."""
plugins = getattr(self, '_ast_plugins', [])
if not plugins:
plugins = list(self.checks_expecting('tree'))
self._ast_plugins = plugins
return plugins
@property
def logical_line_plugins(self):
"""List of plugins that expect the logical lines."""
plugins = getattr(self, '_logical_line_plugins', [])
if not plugins:
plugins = list(self.checks_expecting('logical_line'))
self._logical_line_plugins = plugins
return plugins
@property
def physical_line_plugins(self):
"""List of plugins that expect the physical lines."""
plugins = getattr(self, '_physical_line_plugins', [])
if not plugins:
plugins = list(self.checks_expecting('physical_line'))
self._physical_line_plugins = plugins
return plugins
class Listeners(PluginTypeManager, NotifierBuilderMixin):
"""All of the listeners registered through entry-points or config."""
namespace = 'flake8.listen'
class ReportFormatters(PluginTypeManager):
"""All of the report formatters registered through entry-points/config."""
namespace = 'flake8.report'
|
limbocode/anime-list
|
refs/heads/master
|
exceptions.py
|
1
|
class ImproperlyConfigured(Exception): # Django feelings
pass
class SettingsFileMissing(ImproperlyConfigured):
pass
class LanguageNotAvailable(ImproperlyConfigured):
pass
class WrongTimezone(ImproperlyConfigured):
pass
class AnimeListNotFound(ImproperlyConfigured):
pass
|
olt/mapproxy
|
refs/heads/master
|
mapproxy/util/ext/odict.py
|
8
|
# -*- coding: utf-8 -*-
"""
odict
~~~~~
This module is an example implementation of an ordered dict for the
collections module. It's not written for performance (it actually
performs pretty bad) but to show how the API works.
Questions and Answers
=====================
Why would anyone need ordered dicts?
Dicts in python are unordered which means that the order of items when
iterating over dicts is undefined. As a matter of fact it is most of
the time useless and differs from implementation to implementation.
Many developers stumble upon that problem sooner or later when
comparing the output of doctests which often does not match the order
the developer thought it would.
Also XML systems such as Genshi have their problems with unordered
dicts as the input and output ordering of tag attributes is often
mixed up because the ordering is lost when converting the data into
a dict. Switching to lists is often not possible because the
complexity of a lookup is too high.
Another very common case is metaprogramming. The default namespace
of a class in python is a dict. With Python 3 it becomes possible
to replace it with a different object which could be an ordered dict.
Django is already doing something similar with a hack that assigns
numbers to some descriptors initialized in the class body of a
specific subclass to restore the ordering after class creation.
When porting code from programming languages such as PHP and Ruby
where the item-order in a dict is guaranteed it's also a great help
to have an equivalent data structure in Python to ease the transition.
Where are new keys added?
At the end. This behavior is consistent with Ruby 1.9 Hashmaps
and PHP Arrays. It also matches what common ordered dict
implementations do currently.
What happens if an existing key is reassigned?
The key is *not* moved. This is consitent with existing
implementations and can be changed by a subclass very easily::
class movingodict(odict):
def __setitem__(self, key, value):
self.pop(key, None)
odict.__setitem__(self, key, value)
Moving keys to the end of a ordered dict on reassignment is not
very useful for most applications.
Does it mean the dict keys are sorted by a sort expression?
That's not the case. The odict only guarantees that there is an order
and that newly inserted keys are inserted at the end of the dict. If
you want to sort it you can do so, but newly added keys are again added
at the end of the dict.
I initializes the odict with a dict literal but the keys are not
ordered like they should!
Dict literals in Python generate dict objects and as such the order of
their items is not guaranteed. Before they are passed to the odict
constructor they are already unordered.
What happens if keys appear multiple times in the list passed to the
constructor?
The same as for the dict. The latter item overrides the former. This
has the side-effect that the position of the first key is used because
the key is actually overwritten:
>>> odict([('a', 1), ('b', 2), ('a', 3)])
odict.odict([('a', 3), ('b', 2)])
This behavor is consistent with existing implementation in Python
and the PHP array and the hashmap in Ruby 1.9.
This odict doesn't scale!
Yes it doesn't. The delitem operation is O(n). This is file is a
mockup of a real odict that could be implemented for collections
based on an linked list.
Why is there no .insert()?
There are few situations where you really want to insert a key at
an specified index. To now make the API too complex the proposed
solution for this situation is creating a list of items, manipulating
that and converting it back into an odict:
>>> d = odict([('a', 42), ('b', 23), ('c', 19)])
>>> l = d.items()
>>> l.insert(1, ('x', 0))
>>> odict(l)
odict.odict([('a', 42), ('x', 0), ('b', 23), ('c', 19)])
:copyright: (c) 2008 by Armin Ronacher and PEP 273 authors.
:license: modified BSD license.
"""
from __future__ import absolute_import
from mapproxy.compat import iteritems
from mapproxy.compat.itertools import izip, imap
from copy import deepcopy
missing = object()
class odict(dict):
"""
Ordered dict example implementation.
This is the proposed interface for a an ordered dict as proposed on the
Python mailinglist (proposal_).
It's a dict subclass and provides some list functions. The implementation
of this class is inspired by the implementation of Babel but incorporates
some ideas from the `ordereddict`_ and Django's ordered dict.
The constructor and `update()` both accept iterables of tuples as well as
mappings:
>>> d = odict([('a', 'b'), ('c', 'd')])
>>> d.update({'foo': 'bar'})
>>> d
odict.odict([('a', 'b'), ('c', 'd'), ('foo', 'bar')])
Keep in mind that when updating from dict-literals the order is not
preserved as these dicts are unsorted!
You can copy an odict like a dict by using the constructor, `copy.copy`
or the `copy` method and make deep copies with `copy.deepcopy`:
>>> from copy import copy, deepcopy
>>> copy(d)
odict.odict([('a', 'b'), ('c', 'd'), ('foo', 'bar')])
>>> d.copy()
odict.odict([('a', 'b'), ('c', 'd'), ('foo', 'bar')])
>>> odict(d)
odict.odict([('a', 'b'), ('c', 'd'), ('foo', 'bar')])
>>> d['spam'] = []
>>> d2 = deepcopy(d)
>>> d2['spam'].append('eggs')
>>> d
odict.odict([('a', 'b'), ('c', 'd'), ('foo', 'bar'), ('spam', [])])
>>> d2
odict.odict([('a', 'b'), ('c', 'd'), ('foo', 'bar'), ('spam', ['eggs'])])
All iteration methods as well as `keys`, `values` and `items` return
the values ordered by the the time the key-value pair is inserted:
>>> d.keys()
['a', 'c', 'foo', 'spam']
>>> list(d.values())
['b', 'd', 'bar', []]
>>> list(d.items())
[('a', 'b'), ('c', 'd'), ('foo', 'bar'), ('spam', [])]
>>> list(d.iterkeys())
['a', 'c', 'foo', 'spam']
>>> list(d.itervalues())
['b', 'd', 'bar', []]
>>> list(d.iteritems())
[('a', 'b'), ('c', 'd'), ('foo', 'bar'), ('spam', [])]
Index based lookup is supported too by `byindex` which returns the
key/value pair for an index:
>>> d.byindex(2)
('foo', 'bar')
You can reverse the odict as well:
>>> d.reverse()
>>> d
odict.odict([('spam', []), ('foo', 'bar'), ('c', 'd'), ('a', 'b')])
And sort it like a list:
>>> d.sort(key=lambda x: x[0].lower())
>>> d
odict.odict([('a', 'b'), ('c', 'd'), ('foo', 'bar'), ('spam', [])])
.. _proposal: http://thread.gmane.org/gmane.comp.python.devel/95316
.. _ordereddict: http://www.xs4all.nl/~anthon/Python/ordereddict/
"""
def __init__(self, *args, **kwargs):
dict.__init__(self)
self._keys = []
self.update(*args, **kwargs)
def __delitem__(self, key):
dict.__delitem__(self, key)
self._keys.remove(key)
def __setitem__(self, key, item):
if key not in self:
self._keys.append(key)
dict.__setitem__(self, key, item)
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
d = memo.get(id(self), missing)
if d is not missing:
return d
memo[id(self)] = d = self.__class__()
dict.__init__(d, deepcopy(self.items(), memo))
d._keys = self._keys[:]
return d
def __getstate__(self):
return {'items': dict(self), 'keys': self._keys}
def __setstate__(self, d):
self._keys = d['keys']
dict.update(d['items'])
def __reversed__(self):
return reversed(self._keys)
def __eq__(self, other):
if isinstance(other, odict):
if not dict.__eq__(self, other):
return False
return self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self.__eq__(other)
def __cmp__(self, other):
if isinstance(other, odict):
return cmp(self.items(), other.items())
elif isinstance(other, dict):
return dict.__cmp__(self, other)
return NotImplemented
@classmethod
def fromkeys(cls, iterable, default=None):
return cls((key, default) for key in iterable)
def clear(self):
del self._keys[:]
dict.clear(self)
def copy(self):
return self.__class__(self)
def items(self):
return list(zip(self._keys, self.values()))
def iteritems(self):
return izip(self._keys, self.itervalues())
def keys(self):
return self._keys[:]
def iterkeys(self):
return iter(self._keys)
def pop(self, key, default=missing):
if default is missing:
return dict.pop(self, key)
elif key not in self:
return default
self._keys.remove(key)
return dict.pop(self, key, default)
def popitem(self, key):
self._keys.remove(key)
return dict.popitem(key)
def setdefault(self, key, default=None):
if key not in self:
self._keys.append(key)
return dict.setdefault(self, key, default)
def update(self, *args, **kwargs):
sources = []
if len(args) == 1:
if hasattr(args[0], 'iteritems') or hasattr(args[0], 'items'):
sources.append(iteritems(args[0]))
else:
sources.append(iter(args[0]))
elif args:
raise TypeError('expected at most one positional argument')
if kwargs:
sources.append(kwargs.iteritems())
for iterable in sources:
for key, val in iterable:
self[key] = val
def values(self):
return map(self.get, self._keys)
def itervalues(self):
return imap(self.get, self._keys)
def index(self, item):
return self._keys.index(item)
def byindex(self, item):
key = self._keys[item]
return (key, dict.__getitem__(self, key))
def reverse(self):
self._keys.reverse()
def sort(self, *args, **kwargs):
self._keys.sort(*args, **kwargs)
def __repr__(self):
return 'odict.odict(%r)' % self.items()
__copy__ = copy
__iter__ = iterkeys
if __name__ == '__main__':
import doctest
doctest.testmod()
|
geopython/pywps
|
refs/heads/main
|
tests/test_capabilities.py
|
1
|
##################################################################
# Copyright 2018 Open Source Geospatial Foundation and others #
# licensed under MIT, Please consult LICENSE.txt for details #
##################################################################
import unittest
import lxml
import lxml.etree
from pywps import configuration
from pywps.app import Process, Service
from pywps.app.Common import Metadata
from pywps import get_ElementMakerForVersion
from pywps.tests import assert_pywps_version, client_for, assert_wps_version
WPS, OWS = get_ElementMakerForVersion("1.0.0")
class BadRequestTest(unittest.TestCase):
def test_bad_http_verb(self):
client = client_for(Service())
resp = client.put('')
assert resp.status_code == 405 # method not allowed
def test_bad_request_type_with_get(self):
client = client_for(Service())
resp = client.get('?Request=foo')
assert resp.status_code == 400
def test_bad_service_type_with_get(self):
client = client_for(Service())
resp = client.get('?service=foo')
exception = resp.xpath('/ows:ExceptionReport'
'/ows:Exception')
assert resp.status_code == 400
assert exception[0].attrib['exceptionCode'] == 'InvalidParameterValue'
def test_bad_request_type_with_post(self):
client = client_for(Service())
request_doc = WPS.Foo()
resp = client.post_xml('', doc=request_doc)
assert resp.status_code == 400
class CapabilitiesTest(unittest.TestCase):
def setUp(self):
def pr1(): pass
def pr2(): pass
self.client = client_for(
Service(
processes=[
Process(
pr1,
"pr1",
"Process 1",
abstract="Process 1",
keywords=["kw1a", "kw1b"],
metadata=[Metadata("pr1 metadata")],
),
Process(
pr2,
"pr2",
"Process 2",
keywords=["kw2a"],
metadata=[Metadata("pr2 metadata")],
),
]
)
)
def check_capabilities_response(self, resp):
assert resp.status_code == 200
assert resp.headers['Content-Type'] == 'text/xml'
title = resp.xpath_text('/wps:Capabilities'
'/ows:ServiceIdentification'
'/ows:Title')
assert title != ''
names = resp.xpath_text('/wps:Capabilities'
'/wps:ProcessOfferings'
'/wps:Process'
'/ows:Identifier')
assert sorted(names.split()) == ['pr1', 'pr2']
keywords = resp.xpath('/wps:Capabilities'
'/wps:ProcessOfferings'
'/wps:Process'
'/ows:Keywords'
'/ows:Keyword')
assert len(keywords) == 3
metadatas = resp.xpath('/wps:Capabilities'
'/wps:ProcessOfferings'
'/wps:Process'
'/ows:Metadata')
assert len(metadatas) == 2
def test_get_request(self):
resp = self.client.get('?Request=GetCapabilities&service=WpS')
self.check_capabilities_response(resp)
# case insesitive check
resp = self.client.get('?request=getcapabilities&service=wps')
self.check_capabilities_response(resp)
def test_post_request(self):
request_doc = WPS.GetCapabilities()
resp = self.client.post_xml(doc=request_doc)
self.check_capabilities_response(resp)
def test_get_bad_version(self):
resp = self.client.get('?request=getcapabilities&service=wps&acceptversions=2001-123')
exception = resp.xpath('/ows:ExceptionReport'
'/ows:Exception')
assert resp.status_code == 400
assert exception[0].attrib['exceptionCode'] == 'VersionNegotiationFailed'
def test_post_bad_version(self):
acceptedVersions_doc = OWS.AcceptVersions(OWS.Version('2001-123'))
request_doc = WPS.GetCapabilities(acceptedVersions_doc)
resp = self.client.post_xml(doc=request_doc)
exception = resp.xpath('/ows:ExceptionReport'
'/ows:Exception')
assert resp.status_code == 400
assert exception[0].attrib['exceptionCode'] == 'VersionNegotiationFailed'
def test_version(self):
resp = self.client.get('?service=WPS&request=GetCapabilities&version=1.0.0')
assert_wps_version(resp)
def test_version2(self):
resp = self.client.get('?service=WPS&request=GetCapabilities&acceptversions=2.0.0')
assert_wps_version(resp, version="2.0.0")
class CapabilitiesTranslationsTest(unittest.TestCase):
def setUp(self):
configuration.load_configuration()
configuration.CONFIG.set('server', 'language', 'en-US,fr-CA')
self.client = client_for(
Service(
processes=[
Process(
lambda: None,
"pr1",
"Process 1",
abstract="Process 1",
translations={"fr-CA": {"title": "Processus 1", "abstract": "Processus 1"}},
),
Process(
lambda: None,
"pr2",
"Process 2",
abstract="Process 2",
translations={"fr-CA": {"title": "Processus 2"}},
),
]
)
)
def tearDown(self):
configuration.CONFIG.set('server', 'language', 'en-US')
def test_get_translated(self):
resp = self.client.get('?Request=GetCapabilities&service=wps&language=fr-CA')
assert resp.xpath('/wps:Capabilities/@xml:lang')[0] == "fr-CA"
default = resp.xpath_text('/wps:Capabilities/wps:Languages/wps:Default/ows:Language')
assert default == 'en-US'
supported = resp.xpath('/wps:Capabilities/wps:Languages/wps:Supported/ows:Language/text()')
assert supported == ["en-US", "fr-CA"]
processes = list(resp.xpath('//wps:ProcessOfferings')[0])
assert [e.text for e in processes[0]] == ['pr1', 'Processus 1', 'Processus 1']
assert [e.text for e in processes[1]] == ['pr2', 'Processus 2', 'Process 2']
def load_tests(loader=None, tests=None, pattern=None):
if not loader:
loader = unittest.TestLoader()
suite_list = [
loader.loadTestsFromTestCase(BadRequestTest),
loader.loadTestsFromTestCase(CapabilitiesTest),
loader.loadTestsFromTestCase(CapabilitiesTranslationsTest),
]
return unittest.TestSuite(suite_list)
|
untitaker/icalendar
|
refs/heads/master
|
bootstrap.py
|
95
|
##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
"""
import os
import shutil
import sys
import tempfile
from optparse import OptionParser
tmpeggs = tempfile.mkdtemp()
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --find-links to point to local resources, you can keep
this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("-v", "--version", help="use a specific zc.buildout version")
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", "--config-file",
help=("Specify the path to the buildout configuration "
"file to be used."))
parser.add_option("-f", "--find-links",
help=("Specify a URL to search for buildout releases"))
parser.add_option("--allow-site-packages",
action="store_true", default=False,
help=("Let bootstrap.py use existing site packages"))
options, args = parser.parse_args()
######################################################################
# load/install setuptools
try:
if options.allow_site_packages:
import setuptools
import pkg_resources
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
ez = {}
exec(urlopen('https://bootstrap.pypa.io/ez_setup.py').read(), ez)
if not options.allow_site_packages:
# ez_setup imports site, which adds site packages
# this will remove them from the path to ensure that incompatible versions
# of setuptools are not in the path
import site
# inside a virtualenv, there is no 'getsitepackages'.
# We can't remove these reliably
if hasattr(site, 'getsitepackages'):
for sitepackage_path in site.getsitepackages():
sys.path[:] = [x for x in sys.path if sitepackage_path not in x]
setup_args = dict(to_dir=tmpeggs, download_delay=0)
ez['use_setuptools'](**setup_args)
import setuptools
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
######################################################################
# Install buildout
ws = pkg_resources.working_set
cmd = [sys.executable, '-c',
'from setuptools.command.easy_install import main; main()',
'-mZqNxd', tmpeggs]
find_links = os.environ.get(
'bootstrap-testing-find-links',
options.find_links or
('http://downloads.buildout.org/'
if options.accept_buildout_test_releases else None)
)
if find_links:
cmd.extend(['-f', find_links])
setuptools_path = ws.find(
pkg_resources.Requirement.parse('setuptools')).location
requirement = 'zc.buildout'
version = options.version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setuptools_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
import subprocess
if subprocess.call(cmd, env=dict(os.environ, PYTHONPATH=setuptools_path)) != 0:
raise Exception(
"Failed to execute command:\n%s" % repr(cmd)[1:-1])
######################################################################
# Import and run buildout
ws.add_entry(tmpeggs)
ws.require(requirement)
import zc.buildout.buildout
if not [a for a in args if '=' not in a]:
args.append('bootstrap')
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args[0:0] = ['-c', options.config_file]
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs)
|
jobiols/odoo-addons
|
refs/heads/11.0
|
product_price_update/wizard/__init__.py
|
1
|
# For copyright and license notices, see __manifest__.py file in module root
from . import price_update
|
mancoast/CPythonPyc_test
|
refs/heads/master
|
fail/341_test_crashers.py
|
116
|
# Tests that the crashers in the Lib/test/crashers directory actually
# do crash the interpreter as expected
#
# If a crasher is fixed, it should be moved elsewhere in the test suite to
# ensure it continues to work correctly.
import unittest
import glob
import os.path
import test.support
from test.script_helper import assert_python_failure
CRASHER_DIR = os.path.join(os.path.dirname(__file__), "crashers")
CRASHER_FILES = os.path.join(CRASHER_DIR, "*.py")
infinite_loops = ["infinite_loop_re.py", "nasty_eq_vs_dict.py"]
class CrasherTest(unittest.TestCase):
@unittest.skip("these tests are too fragile")
@test.support.cpython_only
def test_crashers_crash(self):
for fname in glob.glob(CRASHER_FILES):
if os.path.basename(fname) in infinite_loops:
continue
# Some "crashers" only trigger an exception rather than a
# segfault. Consider that an acceptable outcome.
if test.support.verbose:
print("Checking crasher:", fname)
assert_python_failure(fname)
def test_main():
test.support.run_unittest(CrasherTest)
test.support.reap_children()
if __name__ == "__main__":
test_main()
|
elysium001/zamboni
|
refs/heads/master
|
lib/video/utils.py
|
44
|
import subprocess
def check_output(*popenargs, **kwargs):
# Tell thee, check_output was from Python 2.7 untimely ripp'd.
# check_output shall never vanquish'd be until
# Marketplace moves to Python 2.7.
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
error = subprocess.CalledProcessError(retcode, cmd)
error.output = output
raise error
return output
class VideoBase(object):
def __init__(self, filename):
self.filename = filename
self.meta = None
self.errors = []
def _call(self):
raise NotImplementedError
def get_encoded(self, size):
raise NotImplementedError
def get_screenshot(self, size):
raise NotImplementedError
def get_meta(self):
pass
@classmethod
def library_available(cls):
pass
def is_valid(self):
return
|
heldtogether/peeping-tom
|
refs/heads/master
|
peeping-tom.py
|
1
|
#!/usr/bin/python
import logging
import sys
import threading
import time
from peepingtom import Arguments, tasks, io
def main(argv):
args = Arguments()
args.parse_arguments(argv)
logging.basicConfig(level=args.log_level)
reset_button = io.PushButton(4)
lcd = io.LCD()
lcd_lock = threading.Lock()
lcd_lock.acquire()
lcd.message("Starting\nPeeping Tom...")
time.sleep(2)
lcd_lock.release()
should_exit = threading.Event()
setup = tasks.Setup(should_exit, args.debug, lcd, lcd_lock, reset_button)
fetch = tasks.Fetch(should_exit, args.private_token, args.project_id, lcd, lcd_lock)
fetch.start()
setup.start()
try:
while 1:
time.sleep(1)
except KeyboardInterrupt:
should_exit.set()
if __name__ == "__main__":
main(sys.argv[1:])
|
charbeljc/OCB
|
refs/heads/8.0
|
openerp/tools/pdf_utils.py
|
456
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Copyright (c) 2003-2007 LOGILAB S.A. (Paris, FRANCE).
http://www.logilab.fr/ -- mailto:contact@logilab.fr
manipulate pdf and fdf files. pdftk recommended.
Notes regarding pdftk, pdf forms and fdf files (form definition file)
fields names can be extracted with:
pdftk orig.pdf generate_fdf output truc.fdf
to merge fdf and pdf:
pdftk orig.pdf fill_form test.fdf output result.pdf [flatten]
without flatten, one could further edit the resulting form.
with flatten, everything is turned into text.
"""
from __future__ import with_statement
import os
import tempfile
HEAD="""%FDF-1.2
%\xE2\xE3\xCF\xD3
1 0 obj
<<
/FDF
<<
/Fields [
"""
TAIL="""]
>>
>>
endobj
trailer
<<
/Root 1 0 R
>>
%%EOF
"""
def output_field(f):
return "\xfe\xff" + "".join( [ "\x00"+c for c in f ] )
def extract_keys(lines):
keys = []
for line in lines:
if line.startswith('/V'):
pass #print 'value',line
elif line.startswith('/T'):
key = line[7:-2]
key = ''.join(key.split('\x00'))
keys.append( key )
return keys
def write_field(out, key, value):
out.write("<<\n")
if value:
out.write("/V (%s)\n" %value)
else:
out.write("/V /\n")
out.write("/T (%s)\n" % output_field(key) )
out.write(">> \n")
def write_fields(out, fields):
out.write(HEAD)
for key in fields:
value = fields[key]
write_field(out, key, value)
# write_field(out, key+"a", value) # pour copie-carbone sur autres pages
out.write(TAIL)
def extract_keys_from_pdf(filename):
# what about using 'pdftk filename dump_data_fields' and parsing the output ?
tmp_file = tempfile.mkstemp(".fdf")[1]
try:
os.system('pdftk %s generate_fdf output \"%s\"' % (filename, tmp_file))
with open(tmp_file, "r") as ofile:
lines = ofile.readlines()
finally:
try:
os.remove(tmp_file)
except Exception:
pass # nothing to do
return extract_keys(lines)
def fill_pdf(infile, outfile, fields):
tmp_file = tempfile.mkstemp(".fdf")[1]
try:
with open(tmp_file, "w") as ofile:
write_fields(ofile, fields)
os.system('pdftk %s fill_form \"%s\" output %s flatten' % (infile, tmp_file, outfile))
finally:
try:
os.remove(tmp_file)
except Exception:
pass # nothing to do
def testfill_pdf(infile, outfile):
keys = extract_keys_from_pdf(infile)
fields = []
for key in keys:
fields.append( (key, key, '') )
fill_pdf(infile, outfile, fields)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
geodynamics/pylith
|
refs/heads/hackathon/static-greens-fns
|
pylith/utils/NullComponent.py
|
1
|
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2017 University of California, Davis
#
# See COPYING for license information.
#
# ----------------------------------------------------------------------
#
# @file pylith/utils/NullComponent.py
#
# @brief Python NullComponent object that is an empty component.
from .PetscComponent import PetscComponent
class NullComponent(PetscComponent):
"""Python NullComponent object that is an empty component.
"""
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self):
"""Constructor.
"""
PetscComponent.__init__(self, name="nullcomponent", facility="nullcomponent")
return
# PRIVATE METHODS ////////////////////////////////////////////////////
def _cleanup(self):
"""Deallocate locally managed data structures.
"""
return
# End of file
|
oliviersm199/mstsversion2
|
refs/heads/master
|
mstsv2/mstsv2app/resources/routes.py
|
1
|
from flask import Blueprint
from views import list_resources
# Blueprint aids in creating a modular application. This blueprint can be
# imported and added to our main app so all of the resources and url rules
# listed in here can be used in the entire application.
# This allows for a modular design.
resources_blueprint = Blueprint('resources',__name__)
# Resource Template Views
resources_blueprint.add_url_rule('',view_func=list_resources)
|
Curious72/sympy
|
refs/heads/master
|
sympy/vector/tests/test_coordsysrect.py
|
26
|
from sympy.vector.coordsysrect import CoordSysCartesian
from sympy.vector.scalar import BaseScalar
from sympy import sin, cos, pi, ImmutableMatrix as Matrix, \
symbols, simplify, zeros, expand
from sympy.vector.functions import express
from sympy.vector.point import Point
from sympy.vector.vector import Vector
from sympy.vector.orienters import (AxisOrienter, BodyOrienter,
SpaceOrienter, QuaternionOrienter)
a, b, c, q = symbols('a b c q')
q1, q2, q3, q4 = symbols('q1 q2 q3 q4')
def test_func_args():
A = CoordSysCartesian('A')
assert A.x.func(*A.x.args) == A.x
expr = 3*A.x + 4*A.y
assert expr.func(*expr.args) == expr
assert A.i.func(*A.i.args) == A.i
v = A.x*A.i + A.y*A.j + A.z*A.k
assert v.func(*v.args) == v
assert A.origin.func(*A.origin.args) == A.origin
def test_coordsyscartesian_equivalence():
A = CoordSysCartesian('A')
A1 = CoordSysCartesian('A')
assert A1 == A
B = CoordSysCartesian('B')
assert A != B
def test_orienters():
A = CoordSysCartesian('A')
axis_orienter = AxisOrienter(a, A.k)
body_orienter = BodyOrienter(a, b, c, '123')
space_orienter = SpaceOrienter(a, b, c, '123')
q_orienter = QuaternionOrienter(q1, q2, q3, q4)
assert axis_orienter.rotation_matrix(A) == Matrix([
[ cos(a), sin(a), 0],
[-sin(a), cos(a), 0],
[ 0, 0, 1]])
assert body_orienter.rotation_matrix() == Matrix([
[ cos(b)*cos(c), sin(a)*sin(b)*cos(c) + sin(c)*cos(a),
sin(a)*sin(c) - sin(b)*cos(a)*cos(c)],
[-sin(c)*cos(b), -sin(a)*sin(b)*sin(c) + cos(a)*cos(c),
sin(a)*cos(c) + sin(b)*sin(c)*cos(a)],
[ sin(b), -sin(a)*cos(b),
cos(a)*cos(b)]])
assert space_orienter.rotation_matrix() == Matrix([
[cos(b)*cos(c), sin(c)*cos(b), -sin(b)],
[sin(a)*sin(b)*cos(c) - sin(c)*cos(a),
sin(a)*sin(b)*sin(c) + cos(a)*cos(c), sin(a)*cos(b)],
[sin(a)*sin(c) + sin(b)*cos(a)*cos(c), -sin(a)*cos(c) +
sin(b)*sin(c)*cos(a), cos(a)*cos(b)]])
assert q_orienter.rotation_matrix() == Matrix([
[q1**2 + q2**2 - q3**2 - q4**2, 2*q1*q4 + 2*q2*q3,
-2*q1*q3 + 2*q2*q4],
[-2*q1*q4 + 2*q2*q3, q1**2 - q2**2 + q3**2 - q4**2,
2*q1*q2 + 2*q3*q4],
[2*q1*q3 + 2*q2*q4,
-2*q1*q2 + 2*q3*q4, q1**2 - q2**2 - q3**2 + q4**2]])
def test_coordinate_vars():
"""
Tests the coordinate variables functionality with respect to
reorientation of coordinate systems.
"""
A = CoordSysCartesian('A')
# Note that the name given on the lhs is different from A.x._name
assert BaseScalar('A.x', 0, A, 'A_x', r'\mathbf{{x}_{A}}') == A.x
assert BaseScalar('A.y', 1, A, 'A_y', r'\mathbf{{y}_{A}}') == A.y
assert BaseScalar('A.z', 2, A, 'A_z', r'\mathbf{{z}_{A}}') == A.z
assert BaseScalar('A.x', 0, A, 'A_x', r'\mathbf{{x}_{A}}').__hash__() == A.x.__hash__()
assert isinstance(A.x, BaseScalar) and \
isinstance(A.y, BaseScalar) and \
isinstance(A.z, BaseScalar)
assert A.x*A.y == A.y*A.x
assert A.scalar_map(A) == {A.x: A.x, A.y: A.y, A.z: A.z}
assert A.x.system == A
assert A.x.diff(A.x) == 1
B = A.orient_new_axis('B', q, A.k)
assert B.scalar_map(A) == {B.z: A.z, B.y: -A.x*sin(q) + A.y*cos(q),
B.x: A.x*cos(q) + A.y*sin(q)}
assert A.scalar_map(B) == {A.x: B.x*cos(q) - B.y*sin(q),
A.y: B.x*sin(q) + B.y*cos(q), A.z: B.z}
assert express(B.x, A, variables=True) == A.x*cos(q) + A.y*sin(q)
assert express(B.y, A, variables=True) == -A.x*sin(q) + A.y*cos(q)
assert express(B.z, A, variables=True) == A.z
assert expand(express(B.x*B.y*B.z, A, variables=True)) == \
expand(A.z*(-A.x*sin(q) + A.y*cos(q))*(A.x*cos(q) + A.y*sin(q)))
assert express(B.x*B.i + B.y*B.j + B.z*B.k, A) == \
(B.x*cos(q) - B.y*sin(q))*A.i + (B.x*sin(q) + \
B.y*cos(q))*A.j + B.z*A.k
assert simplify(express(B.x*B.i + B.y*B.j + B.z*B.k, A, \
variables=True)) == \
A.x*A.i + A.y*A.j + A.z*A.k
assert express(A.x*A.i + A.y*A.j + A.z*A.k, B) == \
(A.x*cos(q) + A.y*sin(q))*B.i + \
(-A.x*sin(q) + A.y*cos(q))*B.j + A.z*B.k
assert simplify(express(A.x*A.i + A.y*A.j + A.z*A.k, B, \
variables=True)) == \
B.x*B.i + B.y*B.j + B.z*B.k
N = B.orient_new_axis('N', -q, B.k)
assert N.scalar_map(A) == \
{N.x: A.x, N.z: A.z, N.y: A.y}
C = A.orient_new_axis('C', q, A.i + A.j + A.k)
mapping = A.scalar_map(C)
assert mapping[A.x] == (C.x*(2*cos(q) + 1)/3 +
C.y*(-2*sin(q + pi/6) + 1)/3 +
C.z*(-2*cos(q + pi/3) + 1)/3)
assert mapping[A.y] == (C.x*(-2*cos(q + pi/3) + 1)/3 +
C.y*(2*cos(q) + 1)/3 +
C.z*(-2*sin(q + pi/6) + 1)/3)
assert mapping[A.z] == (C.x*(-2*sin(q + pi/6) + 1)/3 +
C.y*(-2*cos(q + pi/3) + 1)/3 +
C.z*(2*cos(q) + 1)/3)
D = A.locate_new('D', a*A.i + b*A.j + c*A.k)
assert D.scalar_map(A) == {D.z: A.z - c, D.x: A.x - a, D.y: A.y - b}
E = A.orient_new_axis('E', a, A.k, a*A.i + b*A.j + c*A.k)
assert A.scalar_map(E) == {A.z: E.z + c,
A.x: E.x*cos(a) - E.y*sin(a) + a,
A.y: E.x*sin(a) + E.y*cos(a) + b}
assert E.scalar_map(A) == {E.x: (A.x - a)*cos(a) + (A.y - b)*sin(a),
E.y: (-A.x + a)*sin(a) + (A.y - b)*cos(a),
E.z: A.z - c}
F = A.locate_new('F', Vector.zero)
assert A.scalar_map(F) == {A.z: F.z, A.x: F.x, A.y: F.y}
def test_rotation_matrix():
N = CoordSysCartesian('N')
A = N.orient_new_axis('A', q1, N.k)
B = A.orient_new_axis('B', q2, A.i)
C = B.orient_new_axis('C', q3, B.j)
D = N.orient_new_axis('D', q4, N.j)
E = N.orient_new_space('E', q1, q2, q3, '123')
F = N.orient_new_quaternion('F', q1, q2, q3, q4)
G = N.orient_new_body('G', q1, q2, q3, '123')
assert N.rotation_matrix(C) == Matrix([
[- sin(q1) * sin(q2) * sin(q3) + cos(q1) * cos(q3), - sin(q1) *
cos(q2), sin(q1) * sin(q2) * cos(q3) + sin(q3) * cos(q1)], \
[sin(q1) * cos(q3) + sin(q2) * sin(q3) * cos(q1), \
cos(q1) * cos(q2), sin(q1) * sin(q3) - sin(q2) * cos(q1) * \
cos(q3)], [- sin(q3) * cos(q2), sin(q2), cos(q2) * cos(q3)]])
test_mat = D.rotation_matrix(C) - Matrix(
[[cos(q1) * cos(q3) * cos(q4) - sin(q3) * (- sin(q4) * cos(q2) +
sin(q1) * sin(q2) * cos(q4)), - sin(q2) * sin(q4) - sin(q1) *
cos(q2) * cos(q4), sin(q3) * cos(q1) * cos(q4) + cos(q3) * \
(- sin(q4) * cos(q2) + sin(q1) * sin(q2) * cos(q4))], \
[sin(q1) * cos(q3) + sin(q2) * sin(q3) * cos(q1), cos(q1) * \
cos(q2), sin(q1) * sin(q3) - sin(q2) * cos(q1) * cos(q3)], \
[sin(q4) * cos(q1) * cos(q3) - sin(q3) * (cos(q2) * cos(q4) + \
sin(q1) * sin(q2) * \
sin(q4)), sin(q2) *
cos(q4) - sin(q1) * sin(q4) * cos(q2), sin(q3) * \
sin(q4) * cos(q1) + cos(q3) * (cos(q2) * cos(q4) + \
sin(q1) * sin(q2) * sin(q4))]])
assert test_mat.expand() == zeros(3, 3)
assert E.rotation_matrix(N) == Matrix(
[[cos(q2)*cos(q3), sin(q3)*cos(q2), -sin(q2)],
[sin(q1)*sin(q2)*cos(q3) - sin(q3)*cos(q1), \
sin(q1)*sin(q2)*sin(q3) + cos(q1)*cos(q3), sin(q1)*cos(q2)], \
[sin(q1)*sin(q3) + sin(q2)*cos(q1)*cos(q3), - \
sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1), cos(q1)*cos(q2)]])
assert F.rotation_matrix(N) == Matrix([[
q1**2 + q2**2 - q3**2 - q4**2,
2*q1*q4 + 2*q2*q3, -2*q1*q3 + 2*q2*q4],[ -2*q1*q4 + 2*q2*q3,
q1**2 - q2**2 + q3**2 - q4**2, 2*q1*q2 + 2*q3*q4],
[2*q1*q3 + 2*q2*q4,
-2*q1*q2 + 2*q3*q4,
q1**2 - q2**2 - q3**2 + q4**2]])
assert G.rotation_matrix(N) == Matrix([[
cos(q2)*cos(q3), sin(q1)*sin(q2)*cos(q3) + sin(q3)*cos(q1),
sin(q1)*sin(q3) - sin(q2)*cos(q1)*cos(q3)], [
-sin(q3)*cos(q2), -sin(q1)*sin(q2)*sin(q3) + cos(q1)*cos(q3),
sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1)],[
sin(q2), -sin(q1)*cos(q2), cos(q1)*cos(q2)]])
def test_vector():
"""
Tests the effects of orientation of coordinate systems on
basic vector operations.
"""
N = CoordSysCartesian('N')
A = N.orient_new_axis('A', q1, N.k)
B = A.orient_new_axis('B', q2, A.i)
C = B.orient_new_axis('C', q3, B.j)
#Test to_matrix
v1 = a*N.i + b*N.j + c*N.k
assert v1.to_matrix(A) == Matrix([[ a*cos(q1) + b*sin(q1)],
[-a*sin(q1) + b*cos(q1)],
[ c]])
#Test dot
assert N.i.dot(A.i) == cos(q1)
assert N.i.dot(A.j) == -sin(q1)
assert N.i.dot(A.k) == 0
assert N.j.dot(A.i) == sin(q1)
assert N.j.dot(A.j) == cos(q1)
assert N.j.dot(A.k) == 0
assert N.k.dot(A.i) == 0
assert N.k.dot(A.j) == 0
assert N.k.dot(A.k) == 1
assert N.i.dot(A.i + A.j) == -sin(q1) + cos(q1) == \
(A.i + A.j).dot(N.i)
assert A.i.dot(C.i) == cos(q3)
assert A.i.dot(C.j) == 0
assert A.i.dot(C.k) == sin(q3)
assert A.j.dot(C.i) == sin(q2)*sin(q3)
assert A.j.dot(C.j) == cos(q2)
assert A.j.dot(C.k) == -sin(q2)*cos(q3)
assert A.k.dot(C.i) == -cos(q2)*sin(q3)
assert A.k.dot(C.j) == sin(q2)
assert A.k.dot(C.k) == cos(q2)*cos(q3)
#Test cross
assert N.i.cross(A.i) == sin(q1)*A.k
assert N.i.cross(A.j) == cos(q1)*A.k
assert N.i.cross(A.k) == -sin(q1)*A.i - cos(q1)*A.j
assert N.j.cross(A.i) == -cos(q1)*A.k
assert N.j.cross(A.j) == sin(q1)*A.k
assert N.j.cross(A.k) == cos(q1)*A.i - sin(q1)*A.j
assert N.k.cross(A.i) == A.j
assert N.k.cross(A.j) == -A.i
assert N.k.cross(A.k) == Vector.zero
assert N.i.cross(A.i) == sin(q1)*A.k
assert N.i.cross(A.j) == cos(q1)*A.k
assert N.i.cross(A.i + A.j) == sin(q1)*A.k + cos(q1)*A.k
assert (A.i + A.j).cross(N.i) == (-sin(q1) - cos(q1))*N.k
assert A.i.cross(C.i) == sin(q3)*C.j
assert A.i.cross(C.j) == -sin(q3)*C.i + cos(q3)*C.k
assert A.i.cross(C.k) == -cos(q3)*C.j
assert C.i.cross(A.i) == (-sin(q3)*cos(q2))*A.j + \
(-sin(q2)*sin(q3))*A.k
assert C.j.cross(A.i) == (sin(q2))*A.j + (-cos(q2))*A.k
assert express(C.k.cross(A.i), C).trigsimp() == cos(q3)*C.j
def test_orient_new_methods():
N = CoordSysCartesian('N')
orienter1 = AxisOrienter(q4, N.j)
orienter2 = SpaceOrienter(q1, q2, q3, '123')
orienter3 = QuaternionOrienter(q1, q2, q3, q4)
orienter4 = BodyOrienter(q1, q2, q3, '123')
D = N.orient_new('D', (orienter1, ))
E = N.orient_new('E', (orienter2, ))
F = N.orient_new('F', (orienter3, ))
G = N.orient_new('G', (orienter4, ))
assert D == N.orient_new_axis('D', q4, N.j)
assert E == N.orient_new_space('E', q1, q2, q3, '123')
assert F == N.orient_new_quaternion('F', q1, q2, q3, q4)
assert G == N.orient_new_body('G', q1, q2, q3, '123')
def test_locatenew_point():
"""
Tests Point class, and locate_new method in CoordSysCartesian.
"""
A = CoordSysCartesian('A')
assert isinstance(A.origin, Point)
v = a*A.i + b*A.j + c*A.k
C = A.locate_new('C', v)
assert C.origin.position_wrt(A) == \
C.position_wrt(A) == \
C.origin.position_wrt(A.origin) == v
assert A.origin.position_wrt(C) == \
A.position_wrt(C) == \
A.origin.position_wrt(C.origin) == -v
assert A.origin.express_coordinates(C) == (-a, -b, -c)
p = A.origin.locate_new('p', -v)
assert p.express_coordinates(A) == (-a, -b, -c)
assert p.position_wrt(C.origin) == p.position_wrt(C) == \
-2 * v
p1 = p.locate_new('p1', 2*v)
assert p1.position_wrt(C.origin) == Vector.zero
assert p1.express_coordinates(C) == (0, 0, 0)
p2 = p.locate_new('p2', A.i)
assert p1.position_wrt(p2) == 2*v - A.i
assert p2.express_coordinates(C) == (-2*a + 1, -2*b, -2*c)
def test_evalf():
A = CoordSysCartesian('A')
v = 3*A.i + 4*A.j + a*A.k
assert v.n() == v.evalf()
assert v.evalf(subs={a:1}) == v.subs(a, 1).evalf()
|
narrowcast/pyipay
|
refs/heads/master
|
setup.py
|
1
|
from distutils.core import setup
setup (
name='pyipay',
version='0.1.1',
author='Chee-Hyung Yoon',
author_email='yoon@tikkon.com',
packages=['pyipay',],
url='http://pypi.python.org/pypi/pyipay/',
license='LICENSE.txt',
description='A Python library for accessing the Auction iPay API',
long_description=open('README.md').read(),
install_requires=[
"suds >= 0.4",
],
)
|
brandond/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/openstack/os_stack.py
|
15
|
#!/usr/bin/python
# coding: utf-8 -*-
# (c) 2016, Mathieu Bultel <mbultel@redhat.com>
# (c) 2016, Steve Baker <sbaker@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_stack
short_description: Add/Remove Heat Stack
extends_documentation_fragment: openstack
version_added: "2.2"
author:
- "Mathieu Bultel (@matbu)"
- "Steve Baker (@steveb)"
description:
- Add or Remove a Stack to an OpenStack Heat
options:
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
name:
description:
- Name of the stack that should be created, name could be char and digit, no space
required: true
tag:
description:
- Tag for the stack that should be created, name could be char and digit, no space
version_added: "2.5"
template:
description:
- Path of the template file to use for the stack creation
environment:
description:
- List of environment files that should be used for the stack creation
parameters:
description:
- Dictionary of parameters for the stack creation
rollback:
description:
- Rollback stack creation
type: bool
default: 'yes'
timeout:
description:
- Maximum number of seconds to wait for the stack creation
default: 3600
availability_zone:
description:
- Ignored. Present for backwards compatibility
requirements:
- "python >= 2.7"
- "openstacksdk"
'''
EXAMPLES = '''
---
- name: create stack
ignore_errors: True
register: stack_create
os_stack:
name: "{{ stack_name }}"
tag: "{{ tag_name }}"
state: present
template: "/path/to/my_stack.yaml"
environment:
- /path/to/resource-registry.yaml
- /path/to/environment.yaml
parameters:
bmc_flavor: m1.medium
bmc_image: CentOS
key_name: default
private_net: "{{ private_net_param }}"
node_count: 2
name: undercloud
image: CentOS
my_flavor: m1.large
external_net: "{{ external_net_param }}"
'''
RETURN = '''
id:
description: Stack ID.
type: str
sample: "97a3f543-8136-4570-920e-fd7605c989d6"
returned: always
stack:
description: stack info
type: complex
returned: always
contains:
action:
description: Action, could be Create or Update.
type: str
sample: "CREATE"
creation_time:
description: Time when the action has been made.
type: str
sample: "2016-07-05T17:38:12Z"
description:
description: Description of the Stack provided in the heat template.
type: str
sample: "HOT template to create a new instance and networks"
id:
description: Stack ID.
type: str
sample: "97a3f543-8136-4570-920e-fd7605c989d6"
name:
description: Name of the Stack
type: str
sample: "test-stack"
identifier:
description: Identifier of the current Stack action.
type: str
sample: "test-stack/97a3f543-8136-4570-920e-fd7605c989d6"
links:
description: Links to the current Stack.
type: list of dict
sample: "[{'href': 'http://foo:8004/v1/7f6a/stacks/test-stack/97a3f543-8136-4570-920e-fd7605c989d6']"
outputs:
description: Output returned by the Stack.
type: list of dict
sample: "{'description': 'IP address of server1 in private network',
'output_key': 'server1_private_ip',
'output_value': '10.1.10.103'}"
parameters:
description: Parameters of the current Stack
type: dict
sample: "{'OS::project_id': '7f6a3a3e01164a4eb4eecb2ab7742101',
'OS::stack_id': '97a3f543-8136-4570-920e-fd7605c989d6',
'OS::stack_name': 'test-stack',
'stack_status': 'CREATE_COMPLETE',
'stack_status_reason': 'Stack CREATE completed successfully',
'status': 'COMPLETE',
'template_description': 'HOT template to create a new instance and networks',
'timeout_mins': 60,
'updated_time': null}"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
from ansible.module_utils._text import to_native
def _create_stack(module, stack, cloud, sdk):
try:
stack = cloud.create_stack(module.params['name'],
tags=module.params['tag'],
template_file=module.params['template'],
environment_files=module.params['environment'],
timeout=module.params['timeout'],
wait=True,
rollback=module.params['rollback'],
**module.params['parameters'])
stack = cloud.get_stack(stack.id, None)
if stack.stack_status == 'CREATE_COMPLETE':
return stack
else:
module.fail_json(msg="Failure in creating stack: {0}".format(stack))
except sdk.exceptions.OpenStackCloudException as e:
if hasattr(e, 'response'):
module.fail_json(msg=to_native(e), response=e.response.json())
else:
module.fail_json(msg=to_native(e))
def _update_stack(module, stack, cloud, sdk):
try:
stack = cloud.update_stack(
module.params['name'],
template_file=module.params['template'],
environment_files=module.params['environment'],
timeout=module.params['timeout'],
rollback=module.params['rollback'],
wait=module.params['wait'],
**module.params['parameters'])
if stack['stack_status'] == 'UPDATE_COMPLETE':
return stack
else:
module.fail_json(msg="Failure in updating stack: %s" %
stack['stack_status_reason'])
except sdk.exceptions.OpenStackCloudException as e:
if hasattr(e, 'response'):
module.fail_json(msg=to_native(e), response=e.response.json())
else:
module.fail_json(msg=to_native(e))
def _system_state_change(module, stack, cloud):
state = module.params['state']
if state == 'present':
if not stack:
return True
if state == 'absent' and stack:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
tag=dict(required=False, default=None),
template=dict(default=None),
environment=dict(default=None, type='list'),
parameters=dict(default={}, type='dict'),
rollback=dict(default=False, type='bool'),
timeout=dict(default=3600, type='int'),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
state = module.params['state']
name = module.params['name']
# Check for required parameters when state == 'present'
if state == 'present':
for p in ['template']:
if not module.params[p]:
module.fail_json(msg='%s required with present state' % p)
sdk, cloud = openstack_cloud_from_module(module)
try:
stack = cloud.get_stack(name)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, stack,
cloud))
if state == 'present':
if not stack:
stack = _create_stack(module, stack, cloud, sdk)
else:
stack = _update_stack(module, stack, cloud, sdk)
changed = True
module.exit_json(changed=changed,
stack=stack,
id=stack.id)
elif state == 'absent':
if not stack:
changed = False
else:
changed = True
if not cloud.delete_stack(name, wait=module.params['wait']):
module.fail_json(msg='delete stack failed for stack: %s' % name)
module.exit_json(changed=changed)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=to_native(e))
if __name__ == '__main__':
main()
|
sajeeshcs/nested_quota_latest
|
refs/heads/master
|
nova/api/openstack/compute/plugins/v3/limits.py
|
2
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack.compute.views import limits as limits_views
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import quota
QUOTAS = quota.QUOTAS
ALIAS = 'limits'
class LimitsController(wsgi.Controller):
"""Controller for accessing limits in the OpenStack API."""
@extensions.expected_errors(())
def index(self, req):
"""Return all global and rate limit information."""
context = req.environ['nova.context']
project_id = req.params.get('tenant_id', context.project_id)
quotas = QUOTAS.get_project_quotas(context, project_id,
usages=False)
abs_limits = dict((k, v['limit']) for k, v in quotas.items())
rate_limits = req.environ.get("nova.limits", [])
builder = self._get_view_builder(req)
return builder.build(rate_limits, abs_limits)
def _get_view_builder(self, req):
return limits_views.ViewBuilderV3()
class Limits(extensions.V3APIExtensionBase):
"""Limits support."""
name = "Limits"
alias = ALIAS
version = 1
def get_resources(self):
resource = [extensions.ResourceExtension(ALIAS,
LimitsController())]
return resource
def get_controller_extensions(self):
return []
|
La0/mozilla-relengapi
|
refs/heads/master
|
src/pulselistener/pulselistener/hook.py
|
1
|
# -*- coding: utf-8 -*-
import json
import structlog
from pulselistener import taskcluster
from pulselistener.lib.bus import MessageBus
from pulselistener.lib.pulse import create_consumer
from pulselistener.monitoring import task_monitoring
logger = structlog.get_logger(__name__)
class Hook(object):
'''
A taskcluster hook, used to build a task
'''
def __init__(self, group_id, hook_id, bus):
self.group_id = group_id
self.hook_id = hook_id
self.hooks = taskcluster.get_service('hooks')
self.mercurial_queue = None
self.routes = []
assert isinstance(bus, MessageBus)
self.bus = bus
def connect_queues(self, mercurial_queue):
'''
Save queues to communicate across processes
'''
self.mercurial_queue = mercurial_queue
return True
def build_consumer(self, *args, **kwargs):
'''
Create a consumer runtime for a new thread
'''
raise NotImplementedError
async def create_task(self, extra_env={}):
'''
Create a new task on Taskcluster
'''
assert self.hooks is not None
task = self.hooks.triggerHook(self.group_id, self.hook_id, extra_env)
task_id = task['status']['taskId']
logger.info('Triggered a new task', id=task_id)
# Send task to monitoring
await task_monitoring.add_task(self.group_id, self.hook_id, task_id)
return task_id
class PulseHook(Hook):
'''
A hook triggered by a Pulse message
'''
def __init__(self, group_id, hook_id, pulse_queue, pulse_route, bus):
super().__init__(group_id, hook_id, bus)
self.pulse_queue = pulse_queue
self.pulse_route = pulse_route
def parse(self, payload):
'''
Analyse payload content to extract needed environment
variables to trigger a new task
'''
raise NotImplementedError
def build_consumer(self, pulse_user, pulse_password):
'''
Create the pulse consumer triggering the hook
'''
# Use pulse consumer from bot_common
consumer = create_consumer(
pulse_user,
pulse_password,
self.pulse_queue,
self.pulse_route,
self.got_message
)
logger.info('Listening for new messages', queue=self.pulse_queue, route=self.pulse_route) # noqa
return consumer
async def got_message(self, channel, body, envelope, properties):
'''
Generic Pulse consumer callback
'''
assert isinstance(body, bytes), \
'Body is not in bytes'
body = json.loads(body.decode('utf-8'))
# Parse payload
env = self.parse(body)
if env is not None:
if isinstance(env, list):
for e in env:
await self.create_task(extra_env=e)
else:
raise Exception('Unsupported env type')
# Ack the message so it is removed from the broker's queue
await channel.basic_client_ack(delivery_tag=envelope.delivery_tag)
|
mavenlin/tensorflow
|
refs/heads/master
|
tensorflow/python/kernel_tests/neon_depthwise_conv_op_test.py
|
57
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for neon kernel for depthwise convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def ConfigsToTest():
"""Iterator for different convolution shapes, strides and paddings.
Yields:
Tuple (input_size, filter_size, out_size, stride, padding), the depthwise
convolution parameters.
"""
input_sizes = [[4, 5, 5, 48], [4, 8, 8, 84], [4, 17, 17, 48], [4, 35, 35, 2],
[4, 147, 147, 2], [3, 299, 299, 3], [5, 183, 183, 1]]
filter_sizes = [[1, 1, 48, 2], [1, 3, 84, 1], [3, 1, 48, 4], [5, 5, 2, 1],
[3, 3, 2, 8], [2, 2, 3, 8], [5, 5, 1, 2]]
out_sizes = [[4, 5, 5, 96], [4, 8, 8, 84], [4, 17, 17, 192], [4, 35, 35, 2],
[4, 49, 49, 16], [3, 150, 150, 24], [5, 92, 92, 2]]
strides = [1, 1, 1, 1, 3, 2, 2]
# pylint: disable=invalid-name
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
paddings = [SAME, SAME, SAME, SAME, VALID, SAME, SAME, SAME]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
def CheckGradConfigsToTest():
"""Iterator for different convolution shapes, strides and paddings.
compute_gradient_error() is very expensive. So the configs should be
relatively small.
Yields:
Tuple (input_size, filter_size, out_size, stride, padding), the depthwise
convolution parameters.
"""
input_sizes = [[2, 5, 8, 1], [4, 5, 5, 1], [2, 4, 4, 2], [1, 15, 15, 2],
[2, 15, 16, 1]]
filter_sizes = [[4, 4, 1, 2], [2, 2, 1, 2], [3, 1, 2, 2], [1, 3, 2, 1],
[3, 3, 1, 2]]
out_sizes = [[2, 5, 8, 2], [4, 2, 2, 2], [2, 4, 4, 4], [1, 15, 15, 2],
[2, 5, 5, 2]]
strides = [1, 2, 1, 1, 3]
# pylint: disable=invalid-name
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
paddings = [SAME, VALID, SAME, SAME, VALID]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
class DepthwiseConv2DTest(test.TestCase):
# This is testing that depthwise_conv2d and depthwise_conv2d_native
# produce the same results. It also tests that NCHW and NWHC
# formats agree, by comparing the depthwise_conv2d_native with
# 'NCHW' format (with transposition) matches the 'NHWC' format using
# the higher level interface.
def _VerifyValues(self,
tensor_in_sizes,
filter_in_sizes,
stride,
padding,
use_gpu,
data_format="NHWC"):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
use_gpu: Whether to use GPU.
data_format: The data_format of the input. "NHWC" or "NCHW".
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input and filter tensor with numbers incrementing from 1.
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with self.test_session(use_gpu=use_gpu) as sess:
with sess.graph._kernel_label_map({"DepthwiseConv2dNative": "neon"}):
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t1.set_shape(tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
native_t1 = t1
strides = [1, stride, stride, 1]
if data_format == "NCHW":
# Transpose from NWHC input to NCHW
# Ex. [4, 5, 5, 48] to [4, 48, 5, 5]
native_t1 = array_ops.transpose(t1, [0, 3, 1, 2])
strides = [1, 1, stride, stride]
conv_native = nn_ops.depthwise_conv2d_native(
native_t1,
t2,
strides=strides,
data_format=data_format,
padding=padding)
if data_format == "NCHW":
# Transpose back from NCHW to NHWC
conv_native = array_ops.transpose(conv_native, [0, 2, 3, 1])
conv_interface = nn_impl.depthwise_conv2d(
t1, t2, strides=[1, stride, stride, 1], padding=padding)
native_result = sess.run(conv_native)
interface_result = sess.run(conv_interface)
print("depthwise conv_2d: ", tensor_in_sizes, "*", filter_in_sizes,
", stride:", stride, ", padding: ", padding, ", max diff: ",
np.amax(np.absolute(native_result - interface_result)))
self.assertArrayNear(
np.ravel(native_result), np.ravel(interface_result), 1e-5)
self.assertShapeEqual(native_result, conv_native)
self.assertShapeEqual(native_result, conv_interface)
def testDepthwiseConv2D(self):
for index, (input_size, filter_size, _, stride,
padding) in enumerate(ConfigsToTest()):
print("Processing ", index, "th config.")
if index == 2:
self._VerifyValues(
input_size, filter_size, stride, padding, use_gpu=True)
self._VerifyValues(
input_size, filter_size, stride, padding, use_gpu=False)
def testDepthwiseConv2DFormat(self):
if not test.is_gpu_available():
return
for index, (input_size, filter_size, _, stride,
padding) in enumerate(ConfigsToTest()):
print("Processing ", index, "th config.")
self._VerifyValues(
input_size,
filter_size,
stride,
padding,
use_gpu=True,
data_format="NCHW")
# This is testing against hand calculated results.
def _VerifyHandValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
expected, use_gpu):
"""Verifies the output values of the depthwise convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
expected: An array containing the expected operation outputs.
use_gpu: Whether to use GPU.
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with self.test_session(use_gpu=use_gpu) as sess:
with sess.graph._kernel_label_map({"DepthwiseConv2dNative": "neon"}):
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t1.set_shape(tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
conv = nn_ops.depthwise_conv2d_native(
t1, t2, strides=[1, stride, stride, 1], padding=padding)
value = sess.run(conv)
print("value = ", value)
self.assertArrayNear(expected, np.ravel(value), 1e-5)
self.assertShapeEqual(value, conv)
def testConv2D2x2Filter(self):
# The inputs look like this (it's a 3 x 2 matrix, each of depth 2):
#
# [ (1.0, 2.0), (3.0, 4.0), ( 5.0, 6.0) ]
# [ (7.0, 8.0), (9.0, 10.0), (11.0, 12.0) ]
# We can view this as two inputs
#
# input depth 0:
#
# [ 1.0, 3.0, 5.0 ]
# [ 7.0, 9.0, 11.0 ]
#
# input depth 1:
#
# [ 2.0, 4.0, 6.0 ]
# [ 8.0, 10.0, 12.0 ]
#
# The filter looks like this (it has two 2 x 2 patches, each generating 2
# depths):
#
# filter #0:
#
# [ (1.0, 3.0), ( 5.0, 7.0)]
# [ (9.0, 11.0), (13.0, 15.0)]
#
# filter #1:
#
# [ ( 2.0, 4.0), ( 6.0, 8.0)]
# [ (10.0, 12.0), (14.0, 16.0)]
#
# So the outputs are:
#
# (position 0, 0: in_depth 0, output_depth 0 -- using filter #0)
# 1.0 * 1.0 + 7.0 * 9.0 + 3.0 * 5.0 + 9.0 * 13.0 = 196
# (position 0, 0: in_depth 0, output_depth 1 -- using filter #1)
# 1.0 * 2.0 + 7.0 * 10.0 + 3.0 * 6.0 + 9.0 * 14.0 = 216
# (position 0, 0: in_depth 1, output_depth 2 -- using filter #0)
# 2.0 * 3.0 + 8.0 * 11.0 + 4.0 * 7.0 + 10.0 * 15.0 = 272
# (position 0, 0: in_depth 1, output_depth 3 -- using filter #1)
# 2.0 * 4.0 + 8.0 * 12.0 + 4.0 * 8.0 + 10.0 * 16.0 = 296
#
# (position 1, 0: in_depth 0, output_depth 0 -- using filter #0)
# 3.0 * 1.0 + 9.0 * 9.0 + 5.0 * 5.0 + 11.0 * 13.0 = 252
# (position 1, 0: in_depth 0, output_depth 1 -- using filter #1)
# 3.0 * 2.0 + 9.0 * 10.0 + 5.0 * 6.0 + 11.0 * 14.0 = 280
# (position 1, 0: in_depth 1, output_depth 2 -- using filter #0)
# 4.0 * 3.0 + 10.0 * 11.0 + 6.0 * 7.0 + 12.0 * 15.0 = 344
# (position 1, 0: in_depth 1, output_depth 3 -- using filter #1)
# 4.0 * 4.0 + 10.0 * 12.0 + 6.0 * 8.0 + 12.0 * 16.0 = 376
expected_output = [196, 216, 272, 296, 252, 280, 344, 376]
self._VerifyHandValues(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
stride=1,
padding="VALID",
expected=expected_output,
use_gpu=False)
self._VerifyHandValues(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
stride=1,
padding="VALID",
expected=expected_output,
use_gpu=True)
if __name__ == "__main__":
test.main()
|
DarkArtek/FFXIVITAFC
|
refs/heads/master
|
allauth/socialaccount/providers/eveonline/views.py
|
10
|
import requests
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
from .provider import EveOnlineProvider
class EveOnlineOAuth2Adapter(OAuth2Adapter):
provider_id = EveOnlineProvider.id
access_token_url = 'https://login.eveonline.com/oauth/token'
authorize_url = 'https://login.eveonline.com/oauth/authorize'
profile_url = 'https://login.eveonline.com/oauth/verify'
def complete_login(self, request, app, token, **kwargs):
resp = requests.get(self.profile_url,
headers={'Authorization': 'Bearer ' + token.token})
extra_data = resp.json()
return self.get_provider().sociallogin_from_response(request,
extra_data)
oauth2_login = OAuth2LoginView.adapter_view(EveOnlineOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(EveOnlineOAuth2Adapter)
|
garbled1/ansible
|
refs/heads/devel
|
lib/ansible/modules/web_infrastructure/apache2_module.py
|
23
|
#!/usr/bin/python
# coding: utf-8 -*-
# (c) 2013-2014, Christian Berendt <berendt@b1-systems.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: apache2_module
version_added: 1.6
author:
- Christian Berendt (@berendt)
- Ralf Hertel (@n0trax)
- Robin Roth (@robinro)
short_description: enables/disables a module of the Apache2 webserver
description:
- Enables or disables a specified module of the Apache2 webserver.
options:
name:
description:
- name of the module to enable/disable
required: true
force:
description:
- force disabling of default modules and override Debian warnings
required: false
choices: ['True', 'False']
default: False
version_added: "2.1"
state:
description:
- indicate the desired state of the resource
choices: ['present', 'absent']
default: present
ignore_configcheck:
description:
- Ignore configuration checks about inconsistent module configuration. Especially for mpm_* modules.
choices: ['True', 'False']
default: False
version_added: "2.3"
requirements: ["a2enmod","a2dismod"]
'''
EXAMPLES = '''
# enables the Apache2 module "wsgi"
- apache2_module:
state: present
name: wsgi
# disables the Apache2 module "wsgi"
- apache2_module:
state: absent
name: wsgi
# disable default modules for Debian
- apache2_module:
state: absent
name: autoindex
force: True
# disable mpm_worker and ignore warnings about missing mpm module
- apache2_module:
state: absent
name: mpm_worker
ignore_configcheck: True
'''
RETURN = '''
result:
description: message about action taken
returned: always
type: string
warnings:
description: list of warning messages
returned: when needed
type: list
rc:
description: return code of underlying command
returned: failed
type: int
stdout:
description: stdout of underlying command
returned: failed
type: string
stderr:
description: stderr of underlying command
returned: failed
type: string
'''
import re
def _run_threaded(module):
control_binary = _get_ctl_binary(module)
result, stdout, stderr = module.run_command("%s -V" % control_binary)
return bool(re.search(r'threaded:[ ]*yes', stdout))
def _get_ctl_binary(module):
for command in ['apache2ctl', 'apachectl']:
ctl_binary = module.get_bin_path(command)
if ctl_binary is not None:
return ctl_binary
module.fail_json(
msg="Neither of apache2ctl nor apachctl found."
" At least one apache control binary is necessary."
)
def _module_is_enabled(module):
control_binary = _get_ctl_binary(module)
name = module.params['name']
ignore_configcheck = module.params['ignore_configcheck']
result, stdout, stderr = module.run_command("%s -M" % control_binary)
if result != 0:
error_msg = "Error executing %s: %s" % (control_binary, stderr)
if ignore_configcheck:
if 'AH00534' in stderr and 'mpm_' in name:
module.warnings.append(
"No MPM module loaded! apache2 reload AND other module actions"
" will fail if no MPM module is loaded immediately."
)
else:
module.warnings.append(error_msg)
return False
else:
module.fail_json(msg=error_msg)
searchstring = ' ' + create_apache_identifier(name)
return searchstring in stdout
def create_apache_identifier(name):
"""
By convention if a module is loaded via name, it appears in apache2ctl -M as
name_module.
Some modules don't follow this convention and we use replacements for those."""
# a2enmod name replacement to apache2ctl -M names
text_workarounds = [
('shib2', 'mod_shib'),
('evasive', 'evasive20_module'),
]
# re expressions to extract subparts of names
re_workarounds = [
('php', r'^(php\d)\.'),
]
for a2enmod_spelling, module_name in text_workarounds:
if a2enmod_spelling in name:
return module_name
for search, reexpr in re_workarounds:
if search in name:
try:
rematch = re.search(reexpr, name)
return rematch.group(1) + '_module'
except AttributeError:
pass
return name + '_module'
def _set_state(module, state):
name = module.params['name']
force = module.params['force']
want_enabled = state == 'present'
state_string = {'present': 'enabled', 'absent': 'disabled'}[state]
a2mod_binary = {'present': 'a2enmod', 'absent': 'a2dismod'}[state]
success_msg = "Module %s %s" % (name, state_string)
if _module_is_enabled(module) != want_enabled:
if module.check_mode:
module.exit_json(changed=True,
result=success_msg,
warnings=module.warnings)
a2mod_binary = module.get_bin_path(a2mod_binary)
if a2mod_binary is None:
module.fail_json(msg="%s not found. Perhaps this system does not use %s to manage apache" % (a2mod_binary, a2mod_binary))
if not want_enabled and force:
# force exists only for a2dismod on debian
a2mod_binary += ' -f'
result, stdout, stderr = module.run_command("%s %s" % (a2mod_binary, name))
if _module_is_enabled(module) == want_enabled:
module.exit_json(changed=True,
result=success_msg,
warnings=module.warnings)
else:
module.fail_json(msg="Failed to set module %s to %s: %s" % (name, state_string, stdout),
rc=result,
stdout=stdout,
stderr=stderr)
else:
module.exit_json(changed=False,
result=success_msg,
warnings=module.warnings)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
force=dict(required=False, type='bool', default=False),
state=dict(default='present', choices=['absent', 'present']),
ignore_configcheck=dict(required=False, type='bool', default=False),
),
supports_check_mode=True,
)
module.warnings = []
name = module.params['name']
if name == 'cgi' and _run_threaded(module):
module.fail_json(msg="Your MPM seems to be threaded. No automatic actions on module %s possible." % name)
if module.params['state'] in ['present', 'absent']:
_set_state(module, module.params['state'])
# import module snippets
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.