repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
JioCloud/nova | refs/heads/master | nova/scheduler/filters/extra_specs_ops.py | 78 | # Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import operator
# 1. The following operations are supported:
# =, s==, s!=, s>=, s>, s<=, s<, <in>, <all-in>, <or>, ==, !=, >=, <=
# 2. Note that <or> is handled in a different way below.
# 3. If the first word in the extra_specs is not one of the operators,
# it is ignored.
op_methods = {'=': lambda x, y: float(x) >= float(y),
'<in>': lambda x, y: y in x,
'<all-in>': lambda x, y: all(val in x for val in y),
'==': lambda x, y: float(x) == float(y),
'!=': lambda x, y: float(x) != float(y),
'>=': lambda x, y: float(x) >= float(y),
'<=': lambda x, y: float(x) <= float(y),
's==': operator.eq,
's!=': operator.ne,
's<': operator.lt,
's<=': operator.le,
's>': operator.gt,
's>=': operator.ge}
def match(value, req):
words = req.split()
op = method = None
if words:
op = words.pop(0)
method = op_methods.get(op)
if op != '<or>' and not method:
return value == req
if value is None:
return False
if op == '<or>': # Ex: <or> v1 <or> v2 <or> v3
while True:
if words.pop(0) == value:
return True
if not words:
break
words.pop(0) # remove a keyword <or>
if not words:
break
return False
if words:
if op == '<all-in>': # requires a list not a string
return method(value, words)
return method(value, words[0])
return False
|
seaotterman/tensorflow | refs/heads/master | tensorflow/contrib/session_bundle/session_bundle_test.py | 133 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for session_bundle.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import shutil
import numpy as np
from tensorflow.contrib.session_bundle import constants
from tensorflow.contrib.session_bundle import manifest_pb2
from tensorflow.contrib.session_bundle import session_bundle
from tensorflow.core.example.example_pb2 import Example
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.parsing_ops # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.training import saver
from tensorflow.python.util import compat
SAVED_MODEL_PATH = (
"python/saved_model/example/saved_model_half_plus_two/00000123")
SESSION_BUNDLE_PATH = "contrib/session_bundle/testdata/half_plus_two/00000123"
def _make_serialized_example(x):
example = Example()
example.features.feature["x"].float_list.value.append(x)
return example.SerializeToString()
class SessionBundleLoadTest(test.TestCase):
def _checkRegressionSignature(self, signatures, sess):
default_signature = signatures.default_signature
input_name = default_signature.regression_signature.input.tensor_name
output_name = default_signature.regression_signature.output.tensor_name
tf_example = [_make_serialized_example(x) for x in [0, 1, 2, 3]]
y = sess.run([output_name], {input_name: tf_example})
# The operation is y = 0.5 * x + 2
self.assertEqual(y[0][0], 2)
self.assertEqual(y[0][1], 2.5)
self.assertEqual(y[0][2], 3)
self.assertEqual(y[0][3], 3.5)
def _checkNamedSignatures(self, signatures, sess):
named_signatures = signatures.named_signatures
input_name = (named_signatures["inputs"].generic_signature.map["x"]
.tensor_name)
output_name = (named_signatures["outputs"].generic_signature.map["y"]
.tensor_name)
y = sess.run([output_name], {input_name: np.array([[0], [1], [2], [3]])})
# The operation is y = 0.5 * x + 2
self.assertEqual(y[0][0], 2)
self.assertEqual(y[0][1], 2.5)
self.assertEqual(y[0][2], 3)
self.assertEqual(y[0][3], 3.5)
def testMaybeSessionBundleDir(self):
base_path = test.test_src_dir_path(SESSION_BUNDLE_PATH)
self.assertTrue(session_bundle.maybe_session_bundle_dir(base_path))
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
self.assertFalse(session_bundle.maybe_session_bundle_dir(base_path))
base_path = "complete_garbage"
self.assertFalse(session_bundle.maybe_session_bundle_dir(base_path))
def testBasic(self):
base_path = test.test_src_dir_path(SESSION_BUNDLE_PATH)
ops.reset_default_graph()
sess, meta_graph_def = session_bundle.load_session_bundle_from_path(
base_path,
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2}))
self.assertTrue(sess)
asset_path = os.path.join(base_path, constants.ASSETS_DIRECTORY)
with sess.as_default():
path1, path2 = sess.run(["filename1:0", "filename2:0"])
self.assertEqual(
compat.as_bytes(os.path.join(asset_path, "hello1.txt")), path1)
self.assertEqual(
compat.as_bytes(os.path.join(asset_path, "hello2.txt")), path2)
collection_def = meta_graph_def.collection_def
signatures_any = collection_def[constants.SIGNATURES_KEY].any_list.value
self.assertEquals(len(signatures_any), 1)
signatures = manifest_pb2.Signatures()
signatures_any[0].Unpack(signatures)
self._checkRegressionSignature(signatures, sess)
self._checkNamedSignatures(signatures, sess)
def testBadPath(self):
base_path = test.test_src_dir_path("/no/such/a/dir")
ops.reset_default_graph()
with self.assertRaises(RuntimeError) as cm:
_, _ = session_bundle.load_session_bundle_from_path(
base_path,
target="local",
config=config_pb2.ConfigProto(device_count={"CPU": 2}))
self.assertTrue("Expected meta graph file missing" in str(cm.exception))
def testVarCheckpointV2(self):
base_path = test.test_src_dir_path(
"contrib/session_bundle/testdata/half_plus_two_ckpt_v2/00000123")
ops.reset_default_graph()
sess, meta_graph_def = session_bundle.load_session_bundle_from_path(
base_path,
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2}))
self.assertTrue(sess)
asset_path = os.path.join(base_path, constants.ASSETS_DIRECTORY)
with sess.as_default():
path1, path2 = sess.run(["filename1:0", "filename2:0"])
self.assertEqual(
compat.as_bytes(os.path.join(asset_path, "hello1.txt")), path1)
self.assertEqual(
compat.as_bytes(os.path.join(asset_path, "hello2.txt")), path2)
collection_def = meta_graph_def.collection_def
signatures_any = collection_def[constants.SIGNATURES_KEY].any_list.value
self.assertEquals(len(signatures_any), 1)
signatures = manifest_pb2.Signatures()
signatures_any[0].Unpack(signatures)
self._checkRegressionSignature(signatures, sess)
self._checkNamedSignatures(signatures, sess)
class SessionBundleLoadNoVarsTest(test.TestCase):
"""Test the case where there are no variables in the graph."""
def setUp(self):
self.base_path = os.path.join(test.get_temp_dir(), "no_vars")
if not os.path.exists(self.base_path):
os.mkdir(self.base_path)
# Create a simple graph with a variable, then convert variables to
# constants and export the graph.
with ops.Graph().as_default() as g:
x = array_ops.placeholder(dtypes.float32, name="x")
w = variables.Variable(3.0)
y = math_ops.subtract(w * x, 7.0, name="y") # pylint: disable=unused-variable
ops.add_to_collection("meta", "this is meta")
with self.test_session(graph=g) as session:
variables.global_variables_initializer().run()
new_graph_def = graph_util.convert_variables_to_constants(
session, g.as_graph_def(), ["y"])
filename = os.path.join(self.base_path, constants.META_GRAPH_DEF_FILENAME)
saver.export_meta_graph(
filename, graph_def=new_graph_def, collection_list=["meta"])
def tearDown(self):
shutil.rmtree(self.base_path)
def testGraphWithoutVarsLoadsCorrectly(self):
session, _ = session_bundle.load_session_bundle_from_path(self.base_path)
got = session.run(["y:0"], {"x:0": 5.0})[0]
self.assertEquals(got, 5.0 * 3.0 - 7.0)
self.assertEquals(ops.get_collection("meta"), [b"this is meta"])
if __name__ == "__main__":
test.main()
|
j7nn7k/django-flashpacker | refs/heads/master | flashpacker/runtests/settings.py | 1 | # Django settings for testproject project.
from __future__ import unicode_literals
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DEBUG_PROPAGATE_EXCEPTIONS = True
ALLOWED_HOSTS = ['*']
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'sqlite.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Berlin'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'de-DE'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'u@x-aj9(hoh#rb-^ymf#g2jx_hp0vj7u5#b@ag1n^sku9e!%cy'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'flashpacker',
'flashpacker.tests',
)
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'auth.User'
|
nilnvoid/wagtail | refs/heads/master | wagtail/wagtailcore/blocks/base.py | 1 | from __future__ import absolute_import, unicode_literals
import collections
from importlib import import_module
from django import forms
from django.core import checks
from django.core.exceptions import ImproperlyConfigured
from django.template.loader import render_to_string
from django.utils import six
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
# unicode_literals ensures that any render / __str__ methods returning HTML via calls to mark_safe / format_html
# return a SafeText, not SafeBytes; necessary so that it doesn't get re-encoded when the template engine
# calls force_text, which would cause it to lose its 'safe' flag
__all__ = ['BaseBlock', 'Block', 'BoundBlock', 'DeclarativeSubBlocksMetaclass', 'BlockWidget', 'BlockField']
# =========================================
# Top-level superclasses and helper objects
# =========================================
class BaseBlock(type):
def __new__(mcs, name, bases, attrs):
meta_class = attrs.pop('Meta', None)
cls = super(BaseBlock, mcs).__new__(mcs, name, bases, attrs)
# Get all the Meta classes from all the bases
meta_class_bases = [meta_class] + [getattr(base, '_meta_class', None)
for base in bases]
meta_class_bases = tuple(filter(bool, meta_class_bases))
cls._meta_class = type(str(name + 'Meta'), meta_class_bases, {})
return cls
class Block(six.with_metaclass(BaseBlock, object)):
name = ''
creation_counter = 0
TEMPLATE_VAR = 'value'
class Meta(object):
label = None
icon = "placeholder"
classname = None
"""
Setting a 'dependencies' list serves as a shortcut for the common case where a complex block type
(such as struct, list or stream) relies on one or more inner block objects, and needs to ensure that
the responses from the 'media' and 'html_declarations' include the relevant declarations for those inner
blocks, as well as its own. Specifying these inner block objects in a 'dependencies' list means that
the base 'media' and 'html_declarations' methods will return those declarations; the outer block type can
then add its own declarations to the list by overriding those methods and using super().
"""
dependencies = []
def __new__(cls, *args, **kwargs):
# adapted from django.utils.deconstruct.deconstructible; capture the arguments
# so that we can return them in the 'deconstruct' method
obj = super(Block, cls).__new__(cls)
obj._constructor_args = (args, kwargs)
return obj
def all_blocks(self):
"""
Return a list consisting of self and all block objects that are direct or indirect dependencies
of this block
"""
result = [self]
for dep in self.dependencies:
result.extend(dep.all_blocks())
return result
def all_media(self):
media = forms.Media()
for block in self.all_blocks():
media += block.media
return media
def all_html_declarations(self):
declarations = filter(bool, [block.html_declarations() for block in self.all_blocks()])
return mark_safe('\n'.join(declarations))
def __init__(self, **kwargs):
self.meta = self._meta_class()
for attr, value in kwargs.items():
setattr(self.meta, attr, value)
# Increase the creation counter, and save our local copy.
self.creation_counter = Block.creation_counter
Block.creation_counter += 1
self.definition_prefix = 'blockdef-%d' % self.creation_counter
self.label = self.meta.label or ''
def set_name(self, name):
self.name = name
if not self.meta.label:
self.label = capfirst(force_text(name).replace('_', ' '))
@property
def media(self):
return forms.Media()
def html_declarations(self):
"""
Return an HTML fragment to be rendered on the form page once per block definition -
as opposed to once per occurrence of the block. For example, the block definition
ListBlock(label="Shopping list", CharBlock(label="Product"))
needs to output a <script type="text/template"></script> block containing the HTML for
a 'product' text input, to that these can be dynamically added to the list. This
template block must only occur once in the page, even if there are multiple 'shopping list'
blocks on the page.
Any element IDs used in this HTML fragment must begin with definition_prefix.
(More precisely, they must either be definition_prefix itself, or begin with definition_prefix
followed by a '-' character)
"""
return ''
def js_initializer(self):
"""
Returns a Javascript expression string, or None if this block does not require any
Javascript behaviour. This expression evaluates to an initializer function, a function that
takes the ID prefix and applies JS behaviour to the block instance with that value and prefix.
The parent block of this block (or the top-level page code) must ensure that this
expression is not evaluated more than once. (The resulting initializer function can and will be
called as many times as there are instances of this block, though.)
"""
return None
def render_form(self, value, prefix='', errors=None):
"""
Render the HTML for this block with 'value' as its content.
"""
raise NotImplementedError('%s.render_form' % self.__class__)
def value_from_datadict(self, data, files, prefix):
raise NotImplementedError('%s.value_from_datadict' % self.__class__)
def value_omitted_from_data(self, data, files, name):
"""
Used only for top-level blocks wrapped by BlockWidget (i.e.: typically only StreamBlock)
to inform ModelForm logic on Django >=1.10.2 whether the field is absent from the form
submission (and should therefore revert to the field default).
"""
return name not in data
def bind(self, value, prefix=None, errors=None):
"""
Return a BoundBlock which represents the association of this block definition with a value
and a prefix (and optionally, a ValidationError to be rendered).
BoundBlock primarily exists as a convenience to allow rendering within templates:
bound_block.render() rather than blockdef.render(value, prefix) which can't be called from
within a template.
"""
return BoundBlock(self, value, prefix=prefix, errors=errors)
def get_default(self):
"""
Return this block's default value (conventionally found in self.meta.default),
converted to the value type expected by this block. This caters for the case
where that value type is not something that can be expressed statically at
model definition type (e.g. something like StructValue which incorporates a
pointer back to the block definion object).
"""
return self.meta.default
def prototype_block(self):
"""
Return a BoundBlock that can be used as a basis for new empty block instances to be added on the fly
(new list items, for example). This will have a prefix of '__PREFIX__' (to be dynamically replaced with
a real prefix when it's inserted into the page) and a value equal to the block's default value.
"""
return self.bind(self.get_default(), '__PREFIX__')
def clean(self, value):
"""
Validate value and return a cleaned version of it, or throw a ValidationError if validation fails.
The thrown ValidationError instance will subsequently be passed to render() to display the
error message; the ValidationError must therefore include all detail necessary to perform that
rendering, such as identifying the specific child block(s) with errors, in the case of nested
blocks. (It is suggested that you use the 'params' attribute for this; using error_list /
error_dict is unreliable because Django tends to hack around with these when nested.)
"""
return value
def to_python(self, value):
"""
Convert 'value' from a simple (JSON-serialisable) value to a (possibly complex) Python value to be
used in the rest of the block API and within front-end templates . In simple cases this might be
the value itself; alternatively, it might be a 'smart' version of the value which behaves mostly
like the original value but provides a native HTML rendering when inserted into a template; or it
might be something totally different (e.g. an image chooser will use the image ID as the clean
value, and turn this back into an actual image object here).
"""
return value
def get_prep_value(self, value):
"""
The reverse of to_python; convert the python value into JSON-serialisable form.
"""
return value
def get_context(self, value):
"""
Return a dict of context variables (derived from the block value, or otherwise)
to be added to the template context when rendering this value through a template.
"""
return {
'self': value,
self.TEMPLATE_VAR: value,
}
def render(self, value, context=None):
"""
Return a text rendering of 'value', suitable for display on templates. By default, this will
use a template (with the passed context, supplemented by the result of get_context) if a
'template' property is specified on the block, and fall back on render_basic otherwise.
"""
template = getattr(self.meta, 'template', None)
if not template:
return self.render_basic(value, context=context)
if context is None:
new_context = self.get_context(value)
else:
new_context = dict(context)
new_context.update(self.get_context(value))
return mark_safe(render_to_string(template, new_context))
def render_basic(self, value, context=None):
"""
Return a text rendering of 'value', suitable for display on templates. render() will fall back on
this if the block does not define a 'template' property.
"""
return force_text(value)
def get_searchable_content(self, value):
"""
Returns a list of strings containing text content within this block to be used in a search engine.
"""
return []
def check(self, **kwargs):
"""
Hook for the Django system checks framework -
returns a list of django.core.checks.Error objects indicating validity errors in the block
"""
return []
def _check_name(self, **kwargs):
"""
Helper method called by container blocks as part of the system checks framework,
to validate that this block's name is a valid identifier.
(Not called universally, because not all blocks need names)
"""
errors = []
if not self.name:
errors.append(checks.Error(
"Block name %r is invalid" % self.name,
hint="Block name cannot be empty",
obj=kwargs.get('field', self),
id='wagtailcore.E001',
))
if ' ' in self.name:
errors.append(checks.Error(
"Block name %r is invalid" % self.name,
hint="Block names cannot contain spaces",
obj=kwargs.get('field', self),
id='wagtailcore.E001',
))
if '-' in self.name:
errors.append(checks.Error(
"Block name %r is invalid" % self.name,
"Block names cannot contain dashes",
obj=kwargs.get('field', self),
id='wagtailcore.E001',
))
if self.name and self.name[0].isdigit():
errors.append(checks.Error(
"Block name %r is invalid" % self.name,
"Block names cannot begin with a digit",
obj=kwargs.get('field', self),
id='wagtailcore.E001',
))
return errors
def id_for_label(self, prefix):
"""
Return the ID to be used as the 'for' attribute of <label> elements that refer to this block,
when the given field prefix is in use. Return None if no 'for' attribute should be used.
"""
return None
@property
def required(self):
"""
Flag used to determine whether labels for this block should display a 'required' asterisk.
False by default, since Block does not provide any validation of its own - it's up to subclasses
to define what required-ness means.
"""
return False
def deconstruct(self):
# adapted from django.utils.deconstruct.deconstructible
module_name = self.__module__
name = self.__class__.__name__
# Make sure it's actually there and not an inner class
module = import_module(module_name)
if not hasattr(module, name):
raise ValueError(
"Could not find object %s in %s.\n"
"Please note that you cannot serialize things like inner "
"classes. Please move the object into the main module "
"body to use migrations.\n"
% (name, module_name))
# if the module defines a DECONSTRUCT_ALIASES dictionary, see if the class has an entry in there;
# if so, use that instead of the real path
try:
path = module.DECONSTRUCT_ALIASES[self.__class__]
except (AttributeError, KeyError):
path = '%s.%s' % (module_name, name)
return (
path,
self._constructor_args[0],
self._constructor_args[1],
)
def __eq__(self, other):
"""
The deep_deconstruct method in django.db.migrations.autodetector.MigrationAutodetector does not
recurse into arbitrary lists and dicts. As a result, when it is passed a field such as:
StreamField([
('heading', CharBlock()),
])
the CharBlock object will be left in its constructed form. This causes problems when
MigrationAutodetector compares two separate instances of the StreamField from different project
states: since the CharBlocks are different objects, it will report a change where there isn't one.
To prevent this, we implement the equality operator on Block instances such that the two CharBlocks
are reported as equal. Since block objects are intended to be immutable with the exception of
set_name(), it is sufficient to compare the 'name' property and the constructor args/kwargs of the
two block objects. The 'deconstruct' method provides a convenient way to access the latter.
"""
if not isinstance(other, Block):
# if the other object isn't a block at all, it clearly isn't equal.
return False
# Note that we do not require the two blocks to be of the exact same class. This is because
# we may wish the following blocks to be considered equal:
#
# class FooBlock(StructBlock):
# first_name = CharBlock()
# surname = CharBlock()
#
# class BarBlock(StructBlock):
# first_name = CharBlock()
# surname = CharBlock()
#
# FooBlock() == BarBlock() == StructBlock([('first_name', CharBlock()), ('surname': CharBlock())])
#
# For this to work, StructBlock will need to ensure that 'deconstruct' returns the same signature
# in all of these cases, including reporting StructBlock as the path:
#
# FooBlock().deconstruct() == (
# 'wagtail.wagtailcore.blocks.StructBlock',
# [('first_name', CharBlock()), ('surname': CharBlock())],
# {}
# )
#
# This has the bonus side effect that the StructBlock field definition gets frozen into
# the migration, rather than leaving the migration vulnerable to future changes to FooBlock / BarBlock
# in models.py.
return (self.name == other.name) and (self.deconstruct() == other.deconstruct())
def __ne__(self, other):
return not self.__eq__(other)
# Making block instances hashable in a way that's consistent with __eq__ is non-trivial, because
# self.deconstruct() is liable to contain unhashable data (e.g. lists and dicts). So let's set
# Block to be explicitly unhashable - Python 3 will do this automatically when defining __eq__,
# but Python 2 won't, and we'd like the behaviour to be consistent on both.
__hash__ = None
@python_2_unicode_compatible
class BoundBlock(object):
def __init__(self, block, value, prefix=None, errors=None):
self.block = block
self.value = value
self.prefix = prefix
self.errors = errors
def render_form(self):
return self.block.render_form(self.value, self.prefix, errors=self.errors)
def render(self, context=None):
return self.block.render(self.value, context=context)
def render_as_block(self, context=None):
"""
Alias for render; the include_block tag will specifically check for the presence of a method
with this name. (This is because {% include_block %} is just as likely to be invoked on a bare
value as a BoundBlock. If we looked for a `render` method instead, we'd run the risk of finding
an unrelated method that just happened to have that name - for example, when called on a
PageChooserBlock it could end up calling page.render.
"""
return self.block.render(self.value, context=context)
def id_for_label(self):
return self.block.id_for_label(self.prefix)
def __str__(self):
"""Render the value according to the block's native rendering"""
return self.block.render(self.value)
class DeclarativeSubBlocksMetaclass(BaseBlock):
"""
Metaclass that collects sub-blocks declared on the base classes.
(cheerfully stolen from https://github.com/django/django/blob/master/django/forms/forms.py)
"""
def __new__(mcs, name, bases, attrs):
# Collect sub-blocks declared on the current class.
# These are available on the class as `declared_blocks`
current_blocks = []
for key, value in list(attrs.items()):
if isinstance(value, Block):
current_blocks.append((key, value))
value.set_name(key)
attrs.pop(key)
current_blocks.sort(key=lambda x: x[1].creation_counter)
attrs['declared_blocks'] = collections.OrderedDict(current_blocks)
new_class = (super(DeclarativeSubBlocksMetaclass, mcs).__new__(
mcs, name, bases, attrs))
# Walk through the MRO, collecting all inherited sub-blocks, to make
# the combined `base_blocks`.
base_blocks = collections.OrderedDict()
for base in reversed(new_class.__mro__):
# Collect sub-blocks from base class.
if hasattr(base, 'declared_blocks'):
base_blocks.update(base.declared_blocks)
# Field shadowing.
for attr, value in base.__dict__.items():
if value is None and attr in base_blocks:
base_blocks.pop(attr)
new_class.base_blocks = base_blocks
return new_class
# ========================
# django.forms integration
# ========================
class BlockWidget(forms.Widget):
"""Wraps a block object as a widget so that it can be incorporated into a Django form"""
# Flag used by Django 1.10.1 (only) to indicate that this widget will not necessarily submit
# a postdata item with a name that matches the field name -
# see https://github.com/django/django/pull/7068, https://github.com/wagtail/wagtail/issues/2994
dont_use_model_field_default_for_empty_data = True
def __init__(self, block_def, attrs=None):
super(BlockWidget, self).__init__(attrs=attrs)
self.block_def = block_def
def render_with_errors(self, name, value, attrs=None, errors=None):
bound_block = self.block_def.bind(value, prefix=name, errors=errors)
js_initializer = self.block_def.js_initializer()
if js_initializer:
js_snippet = """
<script>
$(function() {
var initializer = %s;
initializer('%s');
})
</script>
""" % (js_initializer, name)
else:
js_snippet = ''
return mark_safe(bound_block.render_form() + js_snippet)
def render(self, name, value, attrs=None):
return self.render_with_errors(name, value, attrs=attrs, errors=None)
@property
def media(self):
return self.block_def.all_media()
def value_from_datadict(self, data, files, name):
return self.block_def.value_from_datadict(data, files, name)
def value_omitted_from_data(self, data, files, name):
return self.block_def.value_omitted_from_data(data, files, name)
class BlockField(forms.Field):
"""Wraps a block object as a form field so that it can be incorporated into a Django form"""
def __init__(self, block=None, **kwargs):
if block is None:
raise ImproperlyConfigured("BlockField was not passed a 'block' object")
self.block = block
if 'widget' not in kwargs:
kwargs['widget'] = BlockWidget(block)
super(BlockField, self).__init__(**kwargs)
def clean(self, value):
return self.block.clean(value)
DECONSTRUCT_ALIASES = {
Block: 'wagtail.wagtailcore.blocks.Block',
}
|
slayher/android_kernel_samsung_trlte | refs/heads/cm-12.0 | tools/perf/python/twatch.py | 7370 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
|
nodice73/hspipeline | refs/heads/master | web/env/lib/python2.7/site-packages/markupsafe/__init__.py | 144 | # -*- coding: utf-8 -*-
"""
markupsafe
~~~~~~~~~~
Implements a Markup string.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import re
import string
from collections import Mapping
from markupsafe._compat import text_type, string_types, int_types, \
unichr, iteritems, PY2
__version__ = "1.0"
__all__ = ['Markup', 'soft_unicode', 'escape', 'escape_silent']
_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
_entity_re = re.compile(r'&([^& ;]+);')
class Markup(text_type):
r"""Marks a string as being safe for inclusion in HTML/XML output without
needing to be escaped. This implements the `__html__` interface a couple
of frameworks and web applications use. :class:`Markup` is a direct
subclass of `unicode` and provides all the methods of `unicode` just that
it escapes arguments passed and always returns `Markup`.
The `escape` function returns markup objects so that double escaping can't
happen.
The constructor of the :class:`Markup` class can be used for three
different things: When passed an unicode object it's assumed to be safe,
when passed an object with an HTML representation (has an `__html__`
method) that representation is used, otherwise the object passed is
converted into a unicode string and then assumed to be safe:
>>> Markup("Hello <em>World</em>!")
Markup(u'Hello <em>World</em>!')
>>> class Foo(object):
... def __html__(self):
... return '<a href="#">foo</a>'
...
>>> Markup(Foo())
Markup(u'<a href="#">foo</a>')
If you want object passed being always treated as unsafe you can use the
:meth:`escape` classmethod to create a :class:`Markup` object:
>>> Markup.escape("Hello <em>World</em>!")
Markup(u'Hello <em>World</em>!')
Operations on a markup string are markup aware which means that all
arguments are passed through the :func:`escape` function:
>>> em = Markup("<em>%s</em>")
>>> em % "foo & bar"
Markup(u'<em>foo & bar</em>')
>>> strong = Markup("<strong>%(text)s</strong>")
>>> strong % {'text': '<blink>hacker here</blink>'}
Markup(u'<strong><blink>hacker here</blink></strong>')
>>> Markup("<em>Hello</em> ") + "<foo>"
Markup(u'<em>Hello</em> <foo>')
"""
__slots__ = ()
def __new__(cls, base=u'', encoding=None, errors='strict'):
if hasattr(base, '__html__'):
base = base.__html__()
if encoding is None:
return text_type.__new__(cls, base)
return text_type.__new__(cls, base, encoding, errors)
def __html__(self):
return self
def __add__(self, other):
if isinstance(other, string_types) or hasattr(other, '__html__'):
return self.__class__(super(Markup, self).__add__(self.escape(other)))
return NotImplemented
def __radd__(self, other):
if hasattr(other, '__html__') or isinstance(other, string_types):
return self.escape(other).__add__(self)
return NotImplemented
def __mul__(self, num):
if isinstance(num, int_types):
return self.__class__(text_type.__mul__(self, num))
return NotImplemented
__rmul__ = __mul__
def __mod__(self, arg):
if isinstance(arg, tuple):
arg = tuple(_MarkupEscapeHelper(x, self.escape) for x in arg)
else:
arg = _MarkupEscapeHelper(arg, self.escape)
return self.__class__(text_type.__mod__(self, arg))
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
text_type.__repr__(self)
)
def join(self, seq):
return self.__class__(text_type.join(self, map(self.escape, seq)))
join.__doc__ = text_type.join.__doc__
def split(self, *args, **kwargs):
return list(map(self.__class__, text_type.split(self, *args, **kwargs)))
split.__doc__ = text_type.split.__doc__
def rsplit(self, *args, **kwargs):
return list(map(self.__class__, text_type.rsplit(self, *args, **kwargs)))
rsplit.__doc__ = text_type.rsplit.__doc__
def splitlines(self, *args, **kwargs):
return list(map(self.__class__, text_type.splitlines(
self, *args, **kwargs)))
splitlines.__doc__ = text_type.splitlines.__doc__
def unescape(self):
r"""Unescape markup again into an text_type string. This also resolves
known HTML4 and XHTML entities:
>>> Markup("Main » <em>About</em>").unescape()
u'Main \xbb <em>About</em>'
"""
from markupsafe._constants import HTML_ENTITIES
def handle_match(m):
name = m.group(1)
if name in HTML_ENTITIES:
return unichr(HTML_ENTITIES[name])
try:
if name[:2] in ('#x', '#X'):
return unichr(int(name[2:], 16))
elif name.startswith('#'):
return unichr(int(name[1:]))
except ValueError:
pass
# Don't modify unexpected input.
return m.group()
return _entity_re.sub(handle_match, text_type(self))
def striptags(self):
r"""Unescape markup into an text_type string and strip all tags. This
also resolves known HTML4 and XHTML entities. Whitespace is
normalized to one:
>>> Markup("Main » <em>About</em>").striptags()
u'Main \xbb About'
"""
stripped = u' '.join(_striptags_re.sub('', self).split())
return Markup(stripped).unescape()
@classmethod
def escape(cls, s):
"""Escape the string. Works like :func:`escape` with the difference
that for subclasses of :class:`Markup` this function would return the
correct subclass.
"""
rv = escape(s)
if rv.__class__ is not cls:
return cls(rv)
return rv
def make_simple_escaping_wrapper(name):
orig = getattr(text_type, name)
def func(self, *args, **kwargs):
args = _escape_argspec(list(args), enumerate(args), self.escape)
_escape_argspec(kwargs, iteritems(kwargs), self.escape)
return self.__class__(orig(self, *args, **kwargs))
func.__name__ = orig.__name__
func.__doc__ = orig.__doc__
return func
for method in '__getitem__', 'capitalize', \
'title', 'lower', 'upper', 'replace', 'ljust', \
'rjust', 'lstrip', 'rstrip', 'center', 'strip', \
'translate', 'expandtabs', 'swapcase', 'zfill':
locals()[method] = make_simple_escaping_wrapper(method)
# new in python 2.5
if hasattr(text_type, 'partition'):
def partition(self, sep):
return tuple(map(self.__class__,
text_type.partition(self, self.escape(sep))))
def rpartition(self, sep):
return tuple(map(self.__class__,
text_type.rpartition(self, self.escape(sep))))
# new in python 2.6
if hasattr(text_type, 'format'):
def format(*args, **kwargs):
self, args = args[0], args[1:]
formatter = EscapeFormatter(self.escape)
kwargs = _MagicFormatMapping(args, kwargs)
return self.__class__(formatter.vformat(self, args, kwargs))
def __html_format__(self, format_spec):
if format_spec:
raise ValueError('Unsupported format specification '
'for Markup.')
return self
# not in python 3
if hasattr(text_type, '__getslice__'):
__getslice__ = make_simple_escaping_wrapper('__getslice__')
del method, make_simple_escaping_wrapper
class _MagicFormatMapping(Mapping):
"""This class implements a dummy wrapper to fix a bug in the Python
standard library for string formatting.
See http://bugs.python.org/issue13598 for information about why
this is necessary.
"""
def __init__(self, args, kwargs):
self._args = args
self._kwargs = kwargs
self._last_index = 0
def __getitem__(self, key):
if key == '':
idx = self._last_index
self._last_index += 1
try:
return self._args[idx]
except LookupError:
pass
key = str(idx)
return self._kwargs[key]
def __iter__(self):
return iter(self._kwargs)
def __len__(self):
return len(self._kwargs)
if hasattr(text_type, 'format'):
class EscapeFormatter(string.Formatter):
def __init__(self, escape):
self.escape = escape
def format_field(self, value, format_spec):
if hasattr(value, '__html_format__'):
rv = value.__html_format__(format_spec)
elif hasattr(value, '__html__'):
if format_spec:
raise ValueError('No format specification allowed '
'when formatting an object with '
'its __html__ method.')
rv = value.__html__()
else:
# We need to make sure the format spec is unicode here as
# otherwise the wrong callback methods are invoked. For
# instance a byte string there would invoke __str__ and
# not __unicode__.
rv = string.Formatter.format_field(
self, value, text_type(format_spec))
return text_type(self.escape(rv))
def _escape_argspec(obj, iterable, escape):
"""Helper for various string-wrapped functions."""
for key, value in iterable:
if hasattr(value, '__html__') or isinstance(value, string_types):
obj[key] = escape(value)
return obj
class _MarkupEscapeHelper(object):
"""Helper for Markup.__mod__"""
def __init__(self, obj, escape):
self.obj = obj
self.escape = escape
__getitem__ = lambda s, x: _MarkupEscapeHelper(s.obj[x], s.escape)
__unicode__ = __str__ = lambda s: text_type(s.escape(s.obj))
__repr__ = lambda s: str(s.escape(repr(s.obj)))
__int__ = lambda s: int(s.obj)
__float__ = lambda s: float(s.obj)
# we have to import it down here as the speedups and native
# modules imports the markup type which is define above.
try:
from markupsafe._speedups import escape, escape_silent, soft_unicode
except ImportError:
from markupsafe._native import escape, escape_silent, soft_unicode
if not PY2:
soft_str = soft_unicode
__all__.append('soft_str')
|
nkgilley/home-assistant | refs/heads/dev | tests/components/luftdaten/__init__.py | 40 | """Define tests for the Luftdaten component."""
|
Esri/workflowmanager-admin-tools | refs/heads/master | Documentation/ActiveDirectorySample1.py | 1 | # ---------------------------------------------------------------------------
# ActiveDirectorySample1.py
#
# SAMPLE #1:
# This script shows one possible scenario for using the "Import Active
# Directory Configuration" tool.
# ---------------------------------------------------------------------------
import arcpy
import os
# Define a basic class used to call out license errors
class LicenseError(Exception):
pass
# Define a basic class used to call out core installation errors
class InstallationError(Exception):
pass
# Function that prints an explanation of how to use this sample
def printUsage():
print("""
SAMPLE #1:
This script shows one possible scenario for using the "Import Active
Directory Configuration" tool. Specifically, it:
- Imports the Workflow Manager users & groups from the AD groups that you
specify
- Refreshes the privileges for the groups in the system
- Makes sure that every user in an administrators group has
administrative access to the Workflow Manager database.
Expected Arguments:
1 - Name of the AD group containing the full list of Workflow Manager
USERS; assumes the domain of the user running the script.
2 - Name of the AD group containing the full list of Workflow Manager
GROUPS; assumes the domain of the user running the script.
3 - The name of a Workflow Manager group whose users should have
administrator access to the database.
""")
# Function to retrieve the licenses needed by this utility
def checkOutLicenses(licenseType, extensionList):
# Check out all necessary licenses
if licenseType != None and len(licenseType) > 0:
retVal = arcpy.SetProduct(licenseType)
if retVal == "CheckedOut" or retVal == "AlreadyInitialized":
arcpy.AddMessage("Got product successfully.")
else:
arcpy.AddError("Could not get license '" + licenseType + "'; return code: " + retVal)
raise LicenseError
for extension in extensionList:
if arcpy.CheckExtension(extension) == "Available":
arcpy.AddMessage(extension + " extension is available")
retVal = arcpy.CheckOutExtension(extension)
if retVal != "CheckedOut":
arcpy.AddError("Could not get extension: " + extension + "; return code: " + retVal)
raise LicenseError
else:
arcpy.AddMessage("Got extension: " + extension)
else:
raise LicenseError
# Function to determine the install location of the workflow manager toolbox
def getWorkflowManagerToolboxLocation():
# Import the workflow manager toolbox
wmxToolbox = None
installations = arcpy.ListInstallations()
for installation in installations:
installInfo = arcpy.GetInstallInfo(installation)
if installInfo != None:
tbx = installInfo["InstallDir"] + os.sep + "ArcToolbox" + os.sep + "Toolboxes" + os.sep + "Workflow Manager Administration Tools.tbx"
tbx = os.path.normpath(tbx)
if os.path.exists(tbx):
wmxToolbox = tbx
break
if wmxToolbox == None:
raise InstallationError("Workflow Manager Administration Tools toolbox not found")
return wmxToolbox
# Function to ensure that messages from a previously-run tool are not lost
def logPreviousToolMessages():
i = 0
msgCount = arcpy.GetMessageCount()
while i < msgCount:
msg = arcpy.GetMessage(i)
arcpy.AddReturnMessage(i)
i += 1
def main():
try:
# Get the input parameters to this tool
if arcpy.GetArgumentCount() != 3:
raise Exception("Incorrect number of arguments")
adUsersGroup = arcpy.GetParameterAsText(0)
adGroupsGroup = arcpy.GetParameterAsText(1)
wmxAdminGroup = arcpy.GetParameterAsText(2)
# Get any necessary licenses before importing the toolbox
checkOutLicenses("", ["JTX"])
# Import the Workflow Manager toolbox
wmxToolbox = getWorkflowManagerToolboxLocation()
arcpy.ImportToolbox(wmxToolbox, "WMXAdminUtils")
# Run the active directory import
preserve = "NO_PRESERVE"
arcpy.ImportActiveDirectoryConfiguration_WMXAdminUtils(adUsersGroup, adGroupsGroup, preserve)
logPreviousToolMessages()
# Ensure that all of the group permissions are in a known state
#
# The sequence of events shown here is arbitrary; a real-life example would
# likely be set up differently
# Grant everything to everyone
arcpy.ModifyPrivilegeAssignment_WMXAdminUtils("[All]", "[All]", "GRANT")
logPreviousToolMessages()
# Remove some particular permissions from the groups
arcpy.ModifyPrivilegeAssignment_WMXAdminUtils("DeleteJobs", "[All]", "REVOKE")
logPreviousToolMessages()
arcpy.ModifyPrivilegeAssignment_WMXAdminUtils("DeleteVersion", "[All]", "REVOKE")
logPreviousToolMessages()
# Add the permissions back to the administrators group
arcpy.ModifyPrivilegeAssignment_WMXAdminUtils("[All]", wmxAdminGroup, "GRANT")
logPreviousToolMessages()
# Now, make sure that all of the users in the admin group have administrator
# access to the WMX DB
result = arcpy.ListUsers_WMXAdminUtils(wmxAdminGroup)
logPreviousToolMessages()
userListString = result.getOutput(0)
if userListString != None and len(userListString) > 0:
users = userListString.split(";")
arcpy.ModifyAdministratorAccess_WMXAdminUtils("[All]", "REVOKE", "PRESERVE")
logPreviousToolMessages()
for user in users:
arcpy.ModifyAdministratorAccess_WMXAdminUtils(user, "GRANT", "PRESERVE")
logPreviousToolMessages()
except Exception, ex:
printUsage()
arcpy.AddError("Caught exception: " + str(ex))
# Entry point for the script
if __name__ == "__main__":
main()
|
kazmiruk/gevent-socketio | refs/heads/master | socketio/packet.py | 9 | from socketio.defaultjson import default_json_dumps, default_json_loads
MSG_TYPES = {
'disconnect': 0,
'connect': 1,
'heartbeat': 2,
'message': 3,
'json': 4,
'event': 5,
'ack': 6,
'error': 7,
'noop': 8,
}
MSG_VALUES = dict((v, k) for k, v in MSG_TYPES.iteritems())
ERROR_REASONS = {
'transport not supported': 0,
'client not handshaken': 1,
'unauthorized': 2
}
REASONS_VALUES = dict((v, k) for k, v in ERROR_REASONS.iteritems())
ERROR_ADVICES = {
'reconnect': 0,
}
ADVICES_VALUES = dict((v, k) for k, v in ERROR_ADVICES.iteritems())
socketio_packet_attributes = ['type', 'name', 'data', 'endpoint', 'args',
'ackId', 'reason', 'advice', 'qs', 'id']
def encode(data, json_dumps=default_json_dumps):
"""
Encode an attribute dict into a byte string.
"""
payload = ''
msg = str(MSG_TYPES[data['type']])
if msg in ['0', '1']:
# '1::' [path] [query]
msg += '::' + data['endpoint']
if 'qs' in data and data['qs'] != '':
msg += ':' + data['qs']
elif msg == '2':
# heartbeat
msg += '::'
elif msg in ['3', '4', '5']:
# '3:' [id ('+')] ':' [endpoint] ':' [data]
# '4:' [id ('+')] ':' [endpoint] ':' [json]
# '5:' [id ('+')] ':' [endpoint] ':' [json encoded event]
# The message id is an incremental integer, required for ACKs.
# If the message id is followed by a +, the ACK is not handled by
# socket.io, but by the user instead.
if msg == '3':
payload = data['data']
if msg == '4':
payload = json_dumps(data['data'])
if msg == '5':
d = {}
d['name'] = data['name']
if 'args' in data and data['args'] != []:
d['args'] = data['args']
payload = json_dumps(d)
if 'id' in data:
msg += ':' + str(data['id'])
if data['ack'] == 'data':
msg += '+'
msg += ':'
else:
msg += '::'
if 'endpoint' not in data:
data['endpoint'] = ''
if payload != '':
msg += data['endpoint'] + ':' + payload
else:
msg += data['endpoint']
elif msg == '6':
# '6:::' [id] '+' [data]
msg += '::' + data.get('endpoint', '') + ':' + str(data['ackId'])
if 'args' in data and data['args'] != []:
msg += '+' + json_dumps(data['args'])
elif msg == '7':
# '7::' [endpoint] ':' [reason] '+' [advice]
msg += ':::'
if 'reason' in data and data['reason'] != '':
msg += str(ERROR_REASONS[data['reason']])
if 'advice' in data and data['advice'] != '':
msg += '+' + str(ERROR_ADVICES[data['advice']])
msg += data['endpoint']
# NoOp, used to close a poll after the polling duration time
elif msg == '8':
msg += '::'
return msg
def decode(rawstr, json_loads=default_json_loads):
"""
Decode a rawstr packet arriving from the socket into a dict.
"""
decoded_msg = {}
split_data = rawstr.split(":", 3)
msg_type = split_data[0]
msg_id = split_data[1]
endpoint = split_data[2]
data = ''
if msg_id != '':
if "+" in msg_id:
msg_id = msg_id.split('+')[0]
decoded_msg['id'] = int(msg_id)
decoded_msg['ack'] = 'data'
else:
decoded_msg['id'] = int(msg_id)
decoded_msg['ack'] = True
# common to every message
msg_type_id = int(msg_type)
if msg_type_id in MSG_VALUES:
decoded_msg['type'] = MSG_VALUES[int(msg_type)]
else:
raise Exception("Unknown message type: %s" % msg_type)
decoded_msg['endpoint'] = endpoint
if len(split_data) > 3:
data = split_data[3]
if msg_type == "0": # disconnect
pass
elif msg_type == "1": # connect
decoded_msg['qs'] = data
elif msg_type == "2": # heartbeat
pass
elif msg_type == "3": # message
decoded_msg['data'] = data
elif msg_type == "4": # json msg
decoded_msg['data'] = json_loads(data)
elif msg_type == "5": # event
try:
data = json_loads(data)
except ValueError, e:
print("Invalid JSON event message", data)
decoded_msg['args'] = []
else:
decoded_msg['name'] = data.pop('name')
if 'args' in data:
decoded_msg['args'] = data['args']
else:
decoded_msg['args'] = []
elif msg_type == "6": # ack
if '+' in data:
ackId, data = data.split('+')
decoded_msg['ackId'] = int(ackId)
decoded_msg['args'] = json_loads(data)
else:
decoded_msg['ackId'] = int(data)
decoded_msg['args'] = []
elif msg_type == "7": # error
if '+' in data:
reason, advice = data.split('+')
decoded_msg['reason'] = REASONS_VALUES[int(reason)]
decoded_msg['advice'] = ADVICES_VALUES[int(advice)]
else:
decoded_msg['advice'] = ''
if data != '':
decoded_msg['reason'] = REASONS_VALUES[int(data)]
else:
decoded_msg['reason'] = ''
elif msg_type == "8": # noop
pass
return decoded_msg
|
iamwrm/coding | refs/heads/master | leetcode/a1.py | 1 | class Solution:
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
for i,j in nums:
if (not (i == j )) and (i+j == nums):
return i. |
darktears/chromium-crosswalk | refs/heads/master | ppapi/generate_ppapi_size_checks.py | 177 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script should be run manually on occasion to make sure all PPAPI types
have appropriate size checking.
"""
import optparse
import os
import subprocess
import sys
# The string that the PrintNamesAndSizes plugin uses to indicate a type is
# expected to have architecture-dependent size.
ARCH_DEPENDENT_STRING = "ArchDependentSize"
COPYRIGHT_STRING_C = (
"""/* Copyright (c) %s The Chromium Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*
* This file has compile assertions for the sizes of types that are dependent
* on the architecture for which they are compiled (i.e., 32-bit vs 64-bit).
*/
""") % datetime.date.today().year
class SourceLocation(object):
"""A class representing the source location of a definiton."""
def __init__(self, filename="", start_line=-1, end_line=-1):
self.filename = os.path.normpath(filename)
self.start_line = start_line
self.end_line = end_line
class TypeInfo(object):
"""A class representing information about a C++ type. It contains the
following fields:
- kind: The Clang TypeClassName (Record, Enum, Typedef, Union, etc)
- name: The unmangled string name of the type.
- size: The size in bytes of the type.
- arch_dependent: True if the type may have architecture dependent size
according to PrintNamesAndSizes. False otherwise. Types
which are considered architecture-dependent from 32-bit
to 64-bit are pointers, longs, unsigned longs, and any
type that contains an architecture-dependent type.
- source_location: A SourceLocation describing where the type is defined.
- target: The target Clang was compiling when it found the type definition.
This is used only for diagnostic output.
- parsed_line: The line which Clang output which was used to create this
TypeInfo (as the info_string parameter to __init__). This is
used only for diagnostic output.
"""
def __init__(self, info_string, target):
"""Create a TypeInfo from a given info_string. Also store the name of the
target for which the TypeInfo was first created just so we can print useful
error information.
info_string is a comma-delimited string of the following form:
kind,name,size,arch_dependent,source_file,start_line,end_line
Where:
- kind: The Clang TypeClassName (Record, Enum, Typedef, Union, etc)
- name: The unmangled string name of the type.
- size: The size in bytes of the type.
- arch_dependent: 'ArchDependentSize' if the type has architecture-dependent
size, NotArchDependentSize otherwise.
- source_file: The source file in which the type is defined.
- first_line: The first line of the definition (counting from 0).
- last_line: The last line of the definition (counting from 0).
This should match the output of the PrintNamesAndSizes plugin.
"""
[self.kind, self.name, self.size, arch_dependent_string, source_file,
start_line, end_line] = info_string.split(',')
self.target = target
self.parsed_line = info_string
# Note that Clang counts line numbers from 1, but we want to count from 0.
self.source_location = SourceLocation(source_file,
int(start_line)-1,
int(end_line)-1)
self.arch_dependent = (arch_dependent_string == ARCH_DEPENDENT_STRING)
class FilePatch(object):
"""A class representing a set of line-by-line changes to a particular file.
None of the changes are applied until Apply is called. All line numbers are
counted from 0.
"""
def __init__(self, filename):
self.filename = filename
self.linenums_to_delete = set()
# A dictionary from line number to an array of strings to be inserted at
# that line number.
self.lines_to_add = {}
def Delete(self, start_line, end_line):
"""Make the patch delete the lines starting with |start_line| up to but not
including |end_line|.
"""
self.linenums_to_delete |= set(range(start_line, end_line))
def Add(self, text, line_number):
"""Add the given text before the text on the given line number."""
if line_number in self.lines_to_add:
self.lines_to_add[line_number].append(text)
else:
self.lines_to_add[line_number] = [text]
def Apply(self):
"""Apply the patch by writing it to self.filename."""
# Read the lines of the existing file in to a list.
sourcefile = open(self.filename, "r")
file_lines = sourcefile.readlines()
sourcefile.close()
# Now apply the patch. Our strategy is to keep the array at the same size,
# and just edit strings in the file_lines list as necessary. When we delete
# lines, we just blank the line and keep it in the list. When we add lines,
# we just prepend the added source code to the start of the existing line at
# that line number. This way, all the line numbers we cached from calls to
# Add and Delete remain valid list indices, and we don't have to worry about
# maintaining any offsets. Each element of file_lines at the end may
# contain any number of lines (0 or more) delimited by carriage returns.
for linenum_to_delete in self.linenums_to_delete:
file_lines[linenum_to_delete] = "";
for linenum, sourcelines in self.lines_to_add.items():
# Sort the lines we're adding so we get relatively consistent results.
sourcelines.sort()
# Prepend the new lines. When we output
file_lines[linenum] = "".join(sourcelines) + file_lines[linenum]
newsource = open(self.filename, "w")
for line in file_lines:
newsource.write(line)
newsource.close()
def CheckAndInsert(typeinfo, typeinfo_map):
"""Check if a TypeInfo exists already in the given map with the same name. If
so, make sure the size is consistent.
- If the name exists but the sizes do not match, print a message and
exit with non-zero exit code.
- If the name exists and the sizes match, do nothing.
- If the name does not exist, insert the typeinfo in to the map.
"""
# If the type is unnamed, ignore it.
if typeinfo.name == "":
return
# If the size is 0, ignore it.
elif int(typeinfo.size) == 0:
return
# If the type is not defined under ppapi, ignore it.
elif typeinfo.source_location.filename.find("ppapi") == -1:
return
# If the type is defined under GLES2, ignore it.
elif typeinfo.source_location.filename.find("GLES2") > -1:
return
# If the type is an interface (by convention, starts with PPP_ or PPB_),
# ignore it.
elif (typeinfo.name[:4] == "PPP_") or (typeinfo.name[:4] == "PPB_"):
return
elif typeinfo.name in typeinfo_map:
if typeinfo.size != typeinfo_map[typeinfo.name].size:
print "Error: '" + typeinfo.name + "' is", \
typeinfo_map[typeinfo.name].size, \
"bytes on target '" + typeinfo_map[typeinfo.name].target + \
"', but", typeinfo.size, "on target '" + typeinfo.target + "'"
print typeinfo_map[typeinfo.name].parsed_line
print typeinfo.parsed_line
sys.exit(1)
else:
# It's already in the map and the sizes match.
pass
else:
typeinfo_map[typeinfo.name] = typeinfo
def ProcessTarget(clang_command, target, types):
"""Run clang using the given clang_command for the given target string. Parse
the output to create TypeInfos for each discovered type. Insert each type in
to the 'types' dictionary. If the type already exists in the types
dictionary, make sure that the size matches what's already in the map. If
not, exit with an error message.
"""
p = subprocess.Popen(clang_command + " -triple " + target,
shell=True,
stdout=subprocess.PIPE)
lines = p.communicate()[0].split()
for line in lines:
typeinfo = TypeInfo(line, target)
CheckAndInsert(typeinfo, types)
def ToAssertionCode(typeinfo):
"""Convert the TypeInfo to an appropriate C compile assertion.
If it's a struct (Record in Clang terminology), we want a line like this:
PP_COMPILE_ASSERT_STRUCT_SIZE_IN_BYTES(<name>, <size>);\n
Enums:
PP_COMPILE_ASSERT_ENUM_SIZE_IN_BYTES(<name>, <size>);\n
Typedefs:
PP_COMPILE_ASSERT_SIZE_IN_BYTES(<name>, <size>);\n
"""
line = "PP_COMPILE_ASSERT_"
if typeinfo.kind == "Enum":
line += "ENUM_"
elif typeinfo.kind == "Record":
line += "STRUCT_"
line += "SIZE_IN_BYTES("
line += typeinfo.name
line += ", "
line += typeinfo.size
line += ");\n"
return line
def IsMacroDefinedName(typename):
"""Return true iff the given typename came from a PPAPI compile assertion."""
return typename.find("PP_Dummy_Struct_For_") == 0
def WriteArchSpecificCode(types, root, filename):
"""Write a header file that contains a compile-time assertion for the size of
each of the given typeinfos, in to a file named filename rooted at root.
"""
assertion_lines = [ToAssertionCode(typeinfo) for typeinfo in types]
assertion_lines.sort()
outfile = open(os.path.join(root, filename), "w")
header_guard = "PPAPI_TESTS_" + filename.upper().replace(".", "_") + "_"
outfile.write(COPYRIGHT_STRING_C)
outfile.write('#ifndef ' + header_guard + '\n')
outfile.write('#define ' + header_guard + '\n\n')
outfile.write('#include "ppapi/tests/test_struct_sizes.c"\n\n')
for line in assertion_lines:
outfile.write(line)
outfile.write('\n#endif /* ' + header_guard + ' */\n')
def main(argv):
# See README file for example command-line invocation. This script runs the
# PrintNamesAndSizes Clang plugin with 'test_struct_sizes.c' as input, which
# should include all C headers and all existing size checks. It runs the
# plugin multiple times; once for each of a set of targets, some 32-bit and
# some 64-bit. It verifies that wherever possible, types have a consistent
# size on both platforms. Types that can't easily have consistent size (e.g.
# ones that contain a pointer) are checked to make sure they are consistent
# for all 32-bit platforms and consistent on all 64-bit platforms, but the
# sizes on 32 vs 64 are allowed to differ.
#
# Then, if all the types have consistent size as expected, compile assertions
# are added to the source code. Types whose size is independent of
# architectureacross have their compile assertions placed immediately after
# their definition in the C API header. Types whose size differs on 32-bit
# vs 64-bit have a compile assertion placed in each of:
# ppapi/tests/arch_dependent_sizes_32.h and
# ppapi/tests/arch_dependent_sizes_64.h.
#
# Note that you should always check the results of the tool to make sure
# they are sane.
parser = optparse.OptionParser()
parser.add_option(
'-c', '--clang-path', dest='clang_path',
default=(''),
help='the path to the clang binary (default is to get it from your path)')
parser.add_option(
'-p', '--plugin', dest='plugin',
default='tests/clang/libPrintNamesAndSizes.so',
help='The path to the PrintNamesAndSizes plugin library.')
parser.add_option(
'--targets32', dest='targets32',
default='i386-pc-linux,arm-pc-linux,i386-pc-win32',
help='Which 32-bit target triples to provide to clang.')
parser.add_option(
'--targets64', dest='targets64',
default='x86_64-pc-linux,x86_64-pc-win',
help='Which 32-bit target triples to provide to clang.')
parser.add_option(
'-r', '--ppapi-root', dest='ppapi_root',
default='.',
help='The root directory of ppapi.')
options, args = parser.parse_args(argv)
if args:
parser.print_help()
print 'ERROR: invalid argument'
sys.exit(1)
clang_executable = os.path.join(options.clang_path, 'clang')
clang_command = clang_executable + " -cc1" \
+ " -load " + options.plugin \
+ " -plugin PrintNamesAndSizes" \
+ " -I" + os.path.join(options.ppapi_root, "..") \
+ " " \
+ os.path.join(options.ppapi_root, "tests", "test_struct_sizes.c")
# Dictionaries mapping type names to TypeInfo objects.
# Types that have size dependent on architecture, for 32-bit
types32 = {}
# Types that have size dependent on architecture, for 64-bit
types64 = {}
# Note that types32 and types64 should contain the same types, but with
# different sizes.
# Types whose size should be consistent regardless of architecture.
types_independent = {}
# Now run clang for each target. Along the way, make sure architecture-
# dependent types are consistent sizes on all 32-bit platforms and consistent
# on all 64-bit platforms.
targets32 = options.targets32.split(',');
for target in targets32:
# For each 32-bit target, run the PrintNamesAndSizes Clang plugin to get
# information about all types in the translation unit, and add a TypeInfo
# for each of them to types32. If any size mismatches are found,
# ProcessTarget will spit out an error and exit.
ProcessTarget(clang_command, target, types32)
targets64 = options.targets64.split(',');
for target in targets64:
# Do the same as above for each 64-bit target; put all types in types64.
ProcessTarget(clang_command, target, types64)
# Now for each dictionary, find types whose size are consistent regardless of
# architecture, and move those in to types_independent. Anywhere sizes
# differ, make sure they are expected to be architecture-dependent based on
# their structure. If we find types which could easily be consistent but
# aren't, spit out an error and exit.
types_independent = {}
for typename, typeinfo32 in types32.items():
if (typename in types64):
typeinfo64 = types64[typename]
if (typeinfo64.size == typeinfo32.size):
# The types are the same size, so we can treat it as arch-independent.
types_independent[typename] = typeinfo32
del types32[typename]
del types64[typename]
elif (typeinfo32.arch_dependent or typeinfo64.arch_dependent):
# The type is defined in such a way that it would be difficult to make
# its size consistent. E.g., it has pointers. We'll leave it in the
# arch-dependent maps so that we can put arch-dependent size checks in
# test code.
pass
else:
# The sizes don't match, but there's no reason they couldn't. It's
# probably due to an alignment mismatch between Win32/NaCl vs Linux32/
# Mac32.
print "Error: '" + typename + "' is", typeinfo32.size, \
"bytes on target '" + typeinfo32.target + \
"', but", typeinfo64.size, "on target '" + typeinfo64.target + "'"
print typeinfo32.parsed_line
print typeinfo64.parsed_line
sys.exit(1)
else:
print "WARNING: Type '", typename, "' was defined for target '",
print typeinfo32.target, ", but not for any 64-bit targets."
# Now we have all the information we need to generate our static assertions.
# Types that have consistent size across architectures will have the static
# assertion placed immediately after their definition. Types whose size
# depends on 32-bit vs 64-bit architecture will have checks placed in
# tests/arch_dependent_sizes_32/64.h.
# This dictionary maps file names to FilePatch objects. We will add items
# to it as needed. Each FilePatch represents a set of changes to make to the
# associated file (additions and deletions).
file_patches = {}
# Find locations of existing macros, and just delete them all. Note that
# normally, only things in 'types_independent' need to be deleted, as arch-
# dependent checks exist in tests/arch_dependent_sizes_32/64.h, which are
# always completely over-ridden. However, it's possible that a type that used
# to be arch-independent has changed to now be arch-dependent (e.g., because
# a pointer was added), and we want to delete the old check in that case.
for name, typeinfo in \
types_independent.items() + types32.items() + types64.items():
if IsMacroDefinedName(name):
sourcefile = typeinfo.source_location.filename
if sourcefile not in file_patches:
file_patches[sourcefile] = FilePatch(sourcefile)
file_patches[sourcefile].Delete(typeinfo.source_location.start_line,
typeinfo.source_location.end_line+1)
# Add a compile-time assertion for each type whose size is independent of
# architecture. These assertions go immediately after the class definition.
for name, typeinfo in types_independent.items():
# Ignore dummy types that were defined by macros and also ignore types that
# are 0 bytes (i.e., typedefs to void).
if not IsMacroDefinedName(name) and typeinfo.size > 0:
sourcefile = typeinfo.source_location.filename
if sourcefile not in file_patches:
file_patches[sourcefile] = FilePatch(sourcefile)
# Add the assertion code just after the definition of the type.
# E.g.:
# struct Foo {
# int32_t x;
# };
# PP_COMPILE_ASSERT_STRUCT_SIZE_IN_BYTES(Foo, 4); <---Add this line
file_patches[sourcefile].Add(ToAssertionCode(typeinfo),
typeinfo.source_location.end_line+1)
# Apply our patches. This actually edits the files containing the definitions
# for the types in types_independent.
for filename, patch in file_patches.items():
patch.Apply()
# Write out a file of checks for 32-bit architectures and a separate file for
# 64-bit architectures. These only have checks for types that are
# architecture-dependent.
c_source_root = os.path.join(options.ppapi_root, "tests")
WriteArchSpecificCode(types32.values(),
c_source_root,
"arch_dependent_sizes_32.h")
WriteArchSpecificCode(types64.values(),
c_source_root,
"arch_dependent_sizes_64.h")
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
microhuang/mongrel2 | refs/heads/master | examples/python/mongrel2/config/commands.py | 90 | from mongrel2 import config
from mongrel2.config import args
import mongrel2.config.commands
from uuid import uuid4
from mongrel2.config import model
import getpass
import sys
import os
import signal
from sqlite3 import OperationalError
def try_reading(reader):
try:
cmd = reader.readline()
return cmd.split(' ')
except UnicodeDecodeError:
print "\nERROR: Sorry, PyRepl and Python hate printing to your screen: UnicodeDecodeError."
return []
def shell_command():
"""
Starts an interactive shell with readline style input so you can
work with Mongrel2 easier.
"""
try:
from pyrepl.unix_console import UnixConsole
from pyrepl.historical_reader import HistoricalReader
except:
print "You don't have PyRepl installed, shell not available."
reader = HistoricalReader(UnixConsole())
reader.ps1 = "m2> "
reader.ps2 = "..> "
reader.ps3 = "...> "
reader.ps4 = "....> "
try:
while True:
cmd = try_reading(reader)
if cmd:
try:
args.parse_and_run_command(cmd, mongrel2.config.commands)
except Exception, e:
print "ERROR:", e
except EOFError:
print "Bye."
except KeyboardInterrupt:
print "BYE!"
def help_command(**options):
"""
Prints out help for the commands.
m2sh help
You can get help for one command with:
m2sh help -for STR
"""
if "for" in options:
help_text = args.help_for_command(config.commands, options['for'])
if help_text:
print help_text
else:
args.invalid_command_message(config.commands)
else:
print "Available commands:\n"
print "\n".join(args.available_commands(config.commands))
print "\nUse config help -for <command> to find out more."
def dump_command(db=None):
"""
Simple dump of a config database:
m2sh dump -db config.sqlite
"""
print "LOADING DB: ", db
try:
if not (os.path.isfile(db) and os.access(db, os.R_OK)):
raise IOError
store = model.begin(db)
servers = store.find(model.Server)
for server in servers:
print server
for host in server.hosts:
print "\t", host
for route in host.routes:
print "\t\t", route
except IOError:
print "%s not readable" % db
except OperationalError, exc:
print "SQLite error: %s" % exc
def uuid_command(hex=False):
"""
Generates a UUID for you to use in your configurations:
m2sh uuid
m2sh uuid -hex
The -hex means to print it as a big hex number, which is
more efficient but harder to read.
"""
if hex:
print uuid4().hex
else:
print str(uuid4())
def servers_command(db=None):
"""
Lists the servers that are configured in this setup:
m2sh servers -db config.sqlite
"""
if not os.path.isfile(db):
print "ERROR: Cannot access database file %s" % db
return
try:
store = model.begin(db)
servers = store.find(model.Server)
for server in servers:
print "-------"
print server.name, server.default_host, server.uuid
for host in server.hosts:
print "\t", host.id, ':', host.name
except OperationalError, exc:
print "SQLite error: %s" % exc
def hosts_command(db=None, uuid="", host="", name=""):
"""
List all the hosts in the given server identified by UUID or host.
m2sh hosts -db config.sqlite -uuid f400bf85-4538-4f7a-8908-67e313d515c2
m2sh hosts -db config.sqlite -host localhost
m2sh hosts -db config.sqlite -name test
The -host parameter is the default_host for the server.
"""
if not (os.path.isfile(db) and os.access(db, os.R_OK)):
print "Cannot read database file %s" % db
return
try:
store = model.begin(db)
results = None
if uuid:
results = store.find(model.Server, model.Server.uuid == unicode(uuid))
elif host:
results = store.find(model.Server, model.Server.default_host == unicode(host))
elif name:
results = store.find(model.Server, model.Server.name == unicode(name))
else:
print "ERROR: Must give a -host or -uuid or -name."
return
if results.count():
server = results[0]
hosts = store.find(model.Host, model.Host.server_id == server.id)
for host in hosts:
print "--------"
print host, ":"
for route in host.routes:
print "\t", route.path, ':', route.target
else:
print "No servers found."
except OperationalError, exc:
print "SQLite error: %s" % exc
def init_command(db=None):
"""
Initializes a new config database.
m2sh init -db config.sqlite
It will obliterate this config.
"""
from pkg_resources import resource_stream
import sqlite3
sql = resource_stream('mongrel2', 'sql/config.sql').read()
if model.store:
model.store.close()
model.store = None
if os.path.isfile(db) and not os.access(db, os.W_OK):
print "Cannot access database file %s" % db
return
try:
conn = sqlite3.connect(db)
conn.executescript(sql)
commit_command(db=db, what="init_command", why=" ".join(sys.argv))
except OperationalError, exc:
print "Error: %s" % exc
def load_command(db=None, config=None, clear=True):
"""
After using init you can use this to load a config:
m2sh load -db config.sqlite -config tests/sample_conf.py
This will erase the previous config, but we'll make it
safer later on.
"""
import imp
if not (os.path.isfile(db) and os.access(db, os.R_OK)):
print "Cannot access database file %s" % db
return
try:
model.begin(db, clear=clear)
imp.load_source('mongrel2_config_main', config)
commit_command(db=db, what="load_command", why=" ".join(sys.argv))
except OperationalError, exc:
print "SQLite error: %s" % exc
except SyntaxError,exc:
print "Syntax error: %s" % exc
def config_command(db=None, config=None, clear=True):
"""
Effectively does an init then load of a config to get
you started quicker:
m2sh config -db config.sqlite -config tests/sample_conf.py
Like the other two, this will nuke your config, but we'll
make it safer later.
"""
init_command(db=db)
load_command(db=db, config=config, clear=clear)
def commit_command(db=None, what=None, why=None):
"""
Used to a commit event to the database for other admins to know
what is going on with the config. The system logs quite a lot
already for you, like your username, machine name, etc:
m2sh commit -db test.sqlite -what mongrel2.org \
-why "Needed to change paters."
In future versions it will prevent you from committing as root,
because only assholes commit from root.
Both parameters are arbitrary, but I like to record what I did to
different Hosts in servers.
"""
import socket
store = model.load_db("sqlite:" + db)
who = unicode(getpass.getuser())
if who == u'root':
print "Commit from root eh? Man, you're kind of a tool."
log = model.Log()
log.who = who
log.what = unicode(what)
log.why = unicode(why)
log.location = unicode(socket.gethostname())
log.how = u'm2sh'
store.add(log)
store.commit()
def log_command(db=None, count=20):
"""
Dumps commit logs:
m2sh log -db test.sqlite -count 20
m2sh log -db test.sqlite
So you know who to blame.
"""
store = model.load_db("sqlite:" + db)
logs = store.find(model.Log)
for log in logs.order_by(model.Log.happened_at)[0:count]:
print log
def find_servers(db=None, uuid="", host="", name="", every=False):
"""
Finds all the servers which match the given uuid, host or name.
If every is true all servers in the database will be returned.
"""
store = model.begin(db)
servers = []
if every:
servers = store.find(model.Server)
elif uuid:
servers = store.find(model.Server, model.Server.uuid == unicode(uuid))
elif host:
servers = store.find(model.Server, model.Server.default_host == unicode(host))
elif name:
servers = store.find(model.Server, model.Server.name == unicode(name))
if servers.count() > 1 and not every:
print "Not sure which server to run, what I found:"
print "NAME HOST UUID"
print "--------------"
for server in servers:
print server.name, server.default_host, server.uuid
print "* Use -every to run them all."
return []
else:
return servers
def start_command(db=None, uuid= "", host="", name="", sudo=False, every=False):
"""
Does a simple start of the given server(s) identified by the uuid, host
(default_host) parameter or the name.:
m2sh start -db config.sqlite -uuid 3d815ade-9081-4c36-94dc-77a9b060b021
m2sh start -db config.sqlite -host localhost
m2sh start -db config.sqlite -name test
m2sh start -db config.sqlite -every
Give the -sudo options if you want it to start mongrel2 as root for you
(must have sudo installed).
Give the -every option if you want mongrel2 to launch all servers listed in
the given db.
If multiple servers match and -every is not given, m2sh will ask you which
to start.
"""
root_enabler = 'sudo' if sudo else ''
servers = find_servers(db, uuid, host, name, every)
if not servers or servers.count() == 0:
print 'No matching servers found, nothing launched'
else:
for server in servers:
print 'Launching server %s %s on port %d' % (server.name, server.uuid, server.port)
os.system('%s mongrel2 %s %s' % (root_enabler, db, server.uuid))
def stop_command(db=None, uuid="", host="", name="", every=False, murder=False):
"""
Stops a running mongrel2 process according to the host, either
gracefully (INT) or murderous (TERM):
m2sh stop -db config.sqlite -host localhost
m2sh stop -db config.sqlite -host localhost -murder
m2sh stop -db config.sqlite -name test -murder
m2sh stop -db config.sqlite -every
You shouldn't need sudo to stop a running mongrel if you
are also the user that owns the chroot directory or root.
Normally mongrel2 will wait until connections die off before really
leaving, but you can give it the -murder flag and it'll nuke it
semi-gracefully. You can also do it again with -murder if it's waiting
for some dead connections and you want it to just quit.
"""
for server in find_servers(db, uuid, host, name, every):
pid = get_server_pid(server)
if pid:
sig = signal.SIGTERM if murder else signal.SIGINT
os.kill(pid, sig)
def reload_command(db=None, uuid="", host="", name="", every=False):
"""
Causes Mongrel2 to do a soft-reload which will re-read the config
database and then attempt to load a whole new configuration without
losing connections on the previous one:
m2sh reload -db config.sqlite -uuid 3d815ade-9081-4c36-94dc-77a9b060b021
m2sh reload -db config.sqlite -host localhost
m2sh reload -db config.sqlite -name test
m2sh reload -db config.sqlite -every
This reload will need access to the config database from within the
chroot for it to work, and it's not totally guaranteed to be 100%
reliable, but if you are doing development and need to do quick changes
then this is what you do.
"""
for server in find_servers(db, uuid, host, name, every):
pid = get_server_pid(server)
if pid:
os.kill(pid, signal.SIGHUP)
def running_command(db=None, uuid="", host="", name="", every=False):
"""
Tells you if the given server is still running:
m2sh running -db config.sqlite -uuid 3d815ade-9081-4c36-94dc-77a9b060b021
m2sh running -db config.sqlite -host localhost
m2sh running -db config.sqlite -name test
m2sh running -db config.sqlite -every
"""
for server in find_servers(db, uuid, host, name, every):
pid = get_server_pid(server)
# TODO: Clean this up.
if pid:
try:
os.kill(pid, 0)
print "Found server %s %s RUNNING at PID %i" % (server.name,
server.uuid,
pid)
except OSError:
print "Server %s %s NOT RUNNING at PID %i" % (server.name,
server.uuid,
pid)
def control_command(db=None, host="", name="", uuid=""):
"""
Start a simple control console for working with mongrel2.
This is *very* bare bones at the moment but should improve.
m2sh control -db config.sqlite -uuid 3d815ade-9081-4c36-94dc-77a9b060b021
m2sh control -db config.sqlite -host localhost
m2sh control -db config.sqlite -name test
"""
store = model.load_db("sqlite:" + db)
import zmq
servers = find_servers(db, uuid, host, name, False)
if servers:
server = servers[0]
CTX = zmq.Context()
results = store.find(model.Setting, model.Setting.key == unicode("control_port"))
addr = results[0].value if results.count() > 1 else "ipc://run/control"
ctl = CTX.socket(zmq.REQ)
print "CONNECTING to: %s in %s" % (addr, server.chroot)
os.chdir(server.chroot)
ctl.connect(addr)
try:
while True:
cmd = raw_input("> ")
ctl.send(cmd)
print ctl.recv()
except EOFError:
ctl.close()
def get_server_pid(server):
pid_file = os.path.realpath(server.chroot + server.pid_file)
if not os.path.isfile(pid_file):
print "PID file %s not found for server %s %s" % (pid_file,
server.name,
server.uuid)
return None
else:
return int(open(pid_file, 'r').read())
def version_command():
"""
Prints out the version of your mongrel2 binary."
"""
print "Mongrel2/1.7.5"
|
pombredanne/pyjs | refs/heads/master | tests/test022.py | 13 | import ui
class DynaTable:
def onModuleLoad(self):
slot = ui.RootPanel.get("calendar") |
phil65/KodiDevKit | refs/heads/master | libs/include.py | 1 | # -*- coding: utf8 -*-
# Copyright (C) 2015 - Philipp Temminghoff <phil65@kodi.tv>
# This program is Free Software see LICENSE file for details
from lxml import etree as ET
import os
class Include(dict):
"""
Represents an include-like kodi object
"""
constant_attribs = {"x", "y", "width", "height", "center", "max", "min", "w", "h", "time",
"acceleration", "delay", "start", "end", "center", "border", "repeat"}
constant_nodes = {"posx", "posy", "left", "centerleft", "right", "centerright", "top", "centertop",
"bottom", "centerbottom", "width", "height", "offsetx", "offsety", "textoffsetx",
"textoffsety", "textwidth", "spinposx", "spinposy", "spinwidth", "spinheight",
"radioposx", "radioposy", "radiowidth", "radioheight", "sliderwidth", "sliderheight",
"itemgap", "bordersize", "timeperimage", "fadetime", "pauseatend", "depth"}
exp_nodes = {"visible", "enable", "usealttexture", "selected"}
exp_attribs = {"condition"}
def __init__(self, node, *args, **kwargs):
super().__init__(*args, **kwargs)
self.node = node
self.file = kwargs.get("file")
if self.node.getnext() is not None:
self.length = self.node.getnext().sourceline - self.node.sourceline
else:
self.length = None
def __getitem__(self, key):
if key == "line":
return self.line
elif key == "type":
return self.tag
elif key == "name":
return self.name
elif key == "filename":
return self.filename
elif key == "file":
return self.file
elif key == "content":
return ET.tostring(self.node, pretty_print=True, encoding="unicode")
elif key == "length":
return self.length
return super().__getitem__(key)
def get(self, key):
return self.__getitem__(key)
@property
def line(self):
"""
returns xml source line
"""
return self.node.sourceline
@property
def tag(self):
"""
returns tag of include node
"""
return self.node.tag
@property
def content(self):
"""
returns include node text
"""
return self.node.text
@property
def name(self):
"""
returns name of include
"""
return self.node.attrib.get("name")
@property
def filename(self):
"""
returns filename of include parent file
"""
return os.path.basename(self.file)
|
alikins/ansible | refs/heads/devel | lib/ansible/modules/cloud/amazon/ecs_taskdefinition.py | 25 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ecs_taskdefinition
short_description: register a task definition in ecs
description:
- Registers or deregisters task definitions in the Amazon Web Services (AWS) EC2 Container Service (ECS)
version_added: "2.0"
author: Mark Chance(@Java1Guy)
requirements: [ json, botocore, boto3 ]
options:
state:
description:
- State whether the task definition should exist or be deleted
required: true
choices: ['present', 'absent']
arn:
description:
- The arn of the task description to delete
required: false
family:
description:
- A Name that would be given to the task definition
required: false
revision:
description:
- A revision number for the task definition
required: False
force_create:
description:
- Always create new task definition
required: False
version_added: 2.5
containers:
description:
- A list of containers definitions
required: False
network_mode:
description:
- The Docker networking mode to use for the containers in the task.
required: false
default: bridge
choices: [ 'bridge', 'host', 'none' ]
version_added: 2.3
task_role_arn:
description:
- The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers in this task are granted
the permissions that are specified in this role.
required: false
version_added: 2.3
volumes:
description:
- A list of names of volumes to be attached
required: False
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: Create task definition
ecs_taskdefinition:
containers:
- name: simple-app
cpu: 10
essential: true
image: "httpd:2.4"
memory: 300
mountPoints:
- containerPath: /usr/local/apache2/htdocs
sourceVolume: my-vol
portMappings:
- containerPort: 80
hostPort: 80
logConfiguration:
logDriver: awslogs
options:
awslogs-group: ecs
awslogs-region: us-west-2
- name: busybox
command:
- >
/bin/sh -c "while true; do echo '<html><head><title>Amazon ECS Sample App</title></head><body><div><h1>Amazon ECS Sample App</h1><h2>Congratulations!
</h2><p>Your application is now running on a container in Amazon ECS.</p>' > top; /bin/date > date ; echo '</div></body></html>' > bottom;
cat top date bottom > /usr/local/apache2/htdocs/index.html ; sleep 1; done"
cpu: 10
entryPoint:
- sh
- "-c"
essential: false
image: busybox
memory: 200
volumesFrom:
- sourceContainer: simple-app
volumes:
- name: my-vol
family: test-cluster-taskdef
state: present
register: task_output
'''
RETURN = '''
taskdefinition:
description: a reflection of the input parameters
type: dict
returned: always
'''
try:
import botocore
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec, get_aws_connection_info
from ansible.module_utils._text import to_text
class EcsTaskManager:
"""Handles ECS Tasks"""
def __init__(self, module):
self.module = module
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
self.ecs = boto3_conn(module, conn_type='client', resource='ecs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
def describe_task(self, task_name):
try:
response = self.ecs.describe_task_definition(taskDefinition=task_name)
return response['taskDefinition']
except botocore.exceptions.ClientError:
return None
def register_task(self, family, task_role_arn, network_mode, container_definitions, volumes):
validated_containers = []
# Ensures the number parameters are int as required by boto
for container in container_definitions:
for param in ('memory', 'cpu', 'memoryReservation'):
if param in container:
container[param] = int(container[param])
if 'portMappings' in container:
for port_mapping in container['portMappings']:
for port in ('hostPort', 'containerPort'):
if port in port_mapping:
port_mapping[port] = int(port_mapping[port])
validated_containers.append(container)
try:
response = self.ecs.register_task_definition(family=family,
taskRoleArn=task_role_arn,
networkMode=network_mode,
containerDefinitions=container_definitions,
volumes=volumes)
except botocore.exceptions.ClientError as e:
self.module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
return response['taskDefinition']
def describe_task_definitions(self, family):
data = {
"taskDefinitionArns": [],
"nextToken": None
}
def fetch():
# Boto3 is weird about params passed, so only pass nextToken if we have a value
params = {
'familyPrefix': family
}
if data['nextToken']:
params['nextToken'] = data['nextToken']
result = self.ecs.list_task_definitions(**params)
data['taskDefinitionArns'] += result['taskDefinitionArns']
data['nextToken'] = result.get('nextToken', None)
return data['nextToken'] is not None
# Fetch all the arns, possibly across multiple pages
while fetch():
pass
# Return the full descriptions of the task definitions, sorted ascending by revision
return list(
sorted(
[self.ecs.describe_task_definition(taskDefinition=arn)['taskDefinition'] for arn in data['taskDefinitionArns']],
key=lambda td: td['revision']
)
)
def deregister_task(self, taskArn):
response = self.ecs.deregister_task_definition(taskDefinition=taskArn)
return response['taskDefinition']
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
arn=dict(required=False, type='str'),
family=dict(required=False, type='str'),
revision=dict(required=False, type='int'),
force_create=dict(required=False, default=False, type='bool'),
containers=dict(required=False, type='list'),
network_mode=dict(required=False, default='bridge', choices=['bridge', 'host', 'none'], type='str'),
task_role_arn=dict(required=False, default='', type='str'),
volumes=dict(required=False, type='list')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.')
task_to_describe = None
task_mgr = EcsTaskManager(module)
results = dict(changed=False)
for container in module.params.get('containers', []):
for environment in container.get('environment', []):
environment['value'] = to_text(environment['value'])
if module.params['state'] == 'present':
if 'containers' not in module.params or not module.params['containers']:
module.fail_json(msg="To use task definitions, a list of containers must be specified")
if 'family' not in module.params or not module.params['family']:
module.fail_json(msg="To use task definitions, a family must be specified")
family = module.params['family']
existing_definitions_in_family = task_mgr.describe_task_definitions(module.params['family'])
if 'revision' in module.params and module.params['revision']:
# The definition specifies revision. We must gurantee that an active revision of that number will result from this.
revision = int(module.params['revision'])
# A revision has been explicitly specified. Attempt to locate a matching revision
tasks_defs_for_revision = [td for td in existing_definitions_in_family if td['revision'] == revision]
existing = tasks_defs_for_revision[0] if len(tasks_defs_for_revision) > 0 else None
if existing and existing['status'] != "ACTIVE":
# We cannot reactivate an inactive revision
module.fail_json(msg="A task in family '%s' already exists for revsion %d, but it is inactive" % (family, revision))
elif not existing:
if not existing_definitions_in_family and revision != 1:
module.fail_json(msg="You have specified a revision of %d but a created revision would be 1" % revision)
elif existing_definitions_in_family and existing_definitions_in_family[-1]['revision'] + 1 != revision:
module.fail_json(msg="You have specified a revision of %d but a created revision would be %d" %
(revision, existing_definitions_in_family[-1]['revision'] + 1))
else:
existing = None
def _right_has_values_of_left(left, right):
# Make sure the values are equivalent for everything left has
for k, v in left.items():
if not ((not v and (k not in right or not right[k])) or (k in right and v == right[k])):
# We don't care about list ordering because ECS can change things
if isinstance(v, list) and k in right:
left_list = v
right_list = right[k] or []
if len(left_list) != len(right_list):
return False
for list_val in left_list:
if list_val not in right_list:
return False
else:
return False
# Make sure right doesn't have anything that left doesn't
for k, v in right.items():
if v and k not in left:
return False
return True
def _task_definition_matches(requested_volumes, requested_containers, existing_task_definition):
if td['status'] != "ACTIVE":
return None
existing_volumes = td.get('volumes', []) or []
if len(requested_volumes) != len(existing_volumes):
# Nope.
return None
if len(requested_volumes) > 0:
for requested_vol in requested_volumes:
found = False
for actual_vol in existing_volumes:
if _right_has_values_of_left(requested_vol, actual_vol):
found = True
break
if not found:
return None
existing_containers = td.get('containerDefinitions', []) or []
if len(requested_containers) != len(existing_containers):
# Nope.
return None
for requested_container in requested_containers:
found = False
for actual_container in existing_containers:
if _right_has_values_of_left(requested_container, actual_container):
found = True
break
if not found:
return None
return existing_task_definition
# No revision explicitly specified. Attempt to find an active, matching revision that has all the properties requested
for td in existing_definitions_in_family:
requested_volumes = module.params.get('volumes', []) or []
requested_containers = module.params.get('containers', []) or []
existing = _task_definition_matches(requested_volumes, requested_containers, td)
if existing:
break
if existing and not module.params.get('force_create'):
# Awesome. Have an existing one. Nothing to do.
results['taskdefinition'] = existing
else:
if not module.check_mode:
# Doesn't exist. create it.
volumes = module.params.get('volumes', []) or []
results['taskdefinition'] = task_mgr.register_task(module.params['family'],
module.params['task_role_arn'],
module.params['network_mode'],
module.params['containers'],
volumes)
results['changed'] = True
elif module.params['state'] == 'absent':
# When de-registering a task definition, we can specify the ARN OR the family and revision.
if module.params['state'] == 'absent':
if 'arn' in module.params and module.params['arn'] is not None:
task_to_describe = module.params['arn']
elif 'family' in module.params and module.params['family'] is not None and 'revision' in module.params and \
module.params['revision'] is not None:
task_to_describe = module.params['family'] + ":" + str(module.params['revision'])
else:
module.fail_json(msg="To use task definitions, an arn or family and revision must be specified")
existing = task_mgr.describe_task(task_to_describe)
if not existing:
pass
else:
# It exists, so we should delete it and mark changed. Return info about the task definition deleted
results['taskdefinition'] = existing
if 'status' in existing and existing['status'] == "INACTIVE":
results['changed'] = False
else:
if not module.check_mode:
task_mgr.deregister_task(task_to_describe)
results['changed'] = True
module.exit_json(**results)
if __name__ == '__main__':
main()
|
ResolveWang/algrithm_qa | refs/heads/master | basic_algrithms/sort_algrithms/insert_sort.py | 1 | """
插入排序
"""
from basic_algrithms.sort_algrithms.benchmark import Comparator
def insert_sort(arr):
if not arr or len(arr) < 2:
return arr
for i in range(1, len(arr)):
j = i - 1
while j >= 0 and arr[j] > arr[j+1]:
arr[j], arr[j+1] = arr[j+1], arr[j]
j -= 1
return arr
if __name__ == '__main__':
max_times = 500
max_size = 100
max_value = 100
res = True
for _ in range(max_times):
arr1 = Comparator.gen_random_array(max_size, max_value)
arr2 = Comparator.copy_arr(arr1)
sorted_arr1 = insert_sort(arr1)
sorted_arr2 = sorted(arr2)
if not Comparator.is_equal(sorted_arr1, sorted_arr2):
res = False
break
if not res:
print('Failed ')
else:
print('Success')
|
leekchan/django_test | refs/heads/master | tests/context_processors/models.py | 547 | from django.db import models
class DebugObject(models.Model):
pass
|
loic/django | refs/heads/master | tests/forms_tests/field_tests/test_imagefield.py | 37 | from __future__ import unicode_literals
import os
import unittest
from django.core.files.uploadedfile import SimpleUploadedFile
from django.forms import ImageField
from django.test import SimpleTestCase
from django.utils._os import upath
try:
from PIL import Image
except ImportError:
Image = None
def get_img_path(path):
return os.path.join(os.path.abspath(os.path.join(upath(__file__), '..', '..')), 'tests', path)
@unittest.skipUnless(Image, "Pillow is required to test ImageField")
class ImageFieldTest(SimpleTestCase):
def test_imagefield_annotate_with_image_after_clean(self):
f = ImageField()
img_path = get_img_path('filepath_test_files/1x1.png')
with open(img_path, 'rb') as img_file:
img_data = img_file.read()
img_file = SimpleUploadedFile('1x1.png', img_data)
img_file.content_type = 'text/plain'
uploaded_file = f.clean(img_file)
self.assertEqual('PNG', uploaded_file.image.format)
self.assertEqual('image/png', uploaded_file.content_type)
def test_imagefield_annotate_with_bitmap_image_after_clean(self):
"""
This also tests the situation when Pillow doesn't detect the MIME type
of the image (#24948).
"""
from PIL.BmpImagePlugin import BmpImageFile
try:
Image.register_mime(BmpImageFile.format, None)
f = ImageField()
img_path = get_img_path('filepath_test_files/1x1.bmp')
with open(img_path, 'rb') as img_file:
img_data = img_file.read()
img_file = SimpleUploadedFile('1x1.bmp', img_data)
img_file.content_type = 'text/plain'
uploaded_file = f.clean(img_file)
self.assertEqual('BMP', uploaded_file.image.format)
self.assertIsNone(uploaded_file.content_type)
finally:
Image.register_mime(BmpImageFile.format, 'image/bmp')
|
mSenyor/sl4a | refs/heads/master | python/src/Misc/Vim/syntax_test.py | 82 | """Test file for syntax highlighting of editors.
Meant to cover a wide range of different types of statements and expressions.
Not necessarily sensical or comprehensive (assume that if one exception is
highlighted that all are, for instance).
Extraneous trailing whitespace can't be tested because of svn pre-commit hook
checks for such things.
"""
# Comment
# OPTIONAL: XXX catch your attention
# Statements
from __future__ import with_statement # Import
from sys import path as thing
assert True # keyword
def foo(): # function definition
return []
class Bar(object): # Class definition
def __enter__(self):
pass
def __exit__(self, *args):
pass
foo() # UNCOLOURED: function call
while False: # 'while'
continue
for x in foo(): # 'for'
break
with Bar() as stuff:
pass
if False: pass # 'if'
elif False: pass
else: pass
# Constants
'single-quote', u'unicode' # Strings of all kinds; prefixes not highlighted
"double-quote"
"""triple double-quote"""
'''triple single-quote'''
r'raw'
ur'unicode raw'
'escape\n'
'\04' # octal
'\xFF' # hex
'\u1111' # unicode character
1 # Integral
1L
1.0 # Float
.1
1+2j # Complex
# Expressions
1 and 2 or 3 # Boolean operators
2 < 3 # UNCOLOURED: comparison operators
spam = 42 # UNCOLOURED: assignment
2 + 3 # UNCOLOURED: number operators
[] # UNCOLOURED: list
{} # UNCOLOURED: dict
(1,) # UNCOLOURED: tuple
all # Built-in functions
GeneratorExit # Exceptions
|
wizcoin/wizcoin | refs/heads/master | contrib/wallettools/walletunlock.py | 2299 | from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:8332")
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60) |
kingvuplus/boom2 | refs/heads/master | lib/python/Plugins/SystemPlugins/NFIFlash/downloader.py | 67 | # -*- coding: utf-8 -*-
from boxbranding import getImageVersion,getMachineBrand, getMachineName
from os import system, access, R_OK
import re
from enigma import eConsoleAppContainer, eTimer
from twisted.web import client
from Plugins.SystemPlugins.Hotplug.plugin import hotplugNotifier
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Screens.ChoiceBox import ChoiceBox
from Screens.HelpMenu import HelpableScreen
from Screens.TaskView import JobView
from Components.ActionMap import ActionMap
from Components.Sources.StaticText import StaticText
from Components.Sources.List import List
from Components.FileList import FileList
from Components.ScrollLabel import ScrollLabel
from Components.Harddisk import harddiskmanager
from Components.Task import Task, Job, job_manager, Condition
from Tools.Directories import isMount, resolveFilename, SCOPE_HDD, SCOPE_MEDIA
from Tools.HardwareInfo import HardwareInfo
from Tools.Downloader import downloadWithProgress
class ImageDownloadJob(Job):
def __init__(self, url, filename, device=None, mountpoint="/"):
Job.__init__(self, _("Download .NFI-files for USB-flasher"))
if device:
if isMount(mountpoint):
UmountTask(self, mountpoint)
MountTask(self, device, mountpoint)
ImageDownloadTask(self, url, mountpoint+filename)
ImageDownloadTask(self, url[:-4]+".nfo", mountpoint+filename[:-4]+".nfo")
#if device:
#UmountTask(self, mountpoint)
def retry(self):
self.tasks[0].args += self.tasks[0].retryargs
Job.retry(self)
class MountTask(Task):
def __init__(self, job, device, mountpoint):
Task.__init__(self, job, "mount")
self.setTool("mount")
options = "rw,sync"
self.mountpoint = mountpoint
self.args += [ device, mountpoint, "-o"+options ]
self.weighting = 1
def processOutput(self, data):
print "[MountTask] output:", data
class UmountTask(Task):
def __init__(self, job, mountpoint):
Task.__init__(self, job, "mount")
self.setTool("umount")
self.args += [mountpoint]
self.weighting = 1
class DownloaderPostcondition(Condition):
def __init__(self):
pass
def check(self, task):
return task.returncode == 0
def getErrorMessage(self, task):
return self.error_message
class ImageDownloadTask(Task):
def __init__(self, job, url, path):
Task.__init__(self, job, _("Downloading"))
self.postconditions.append(DownloaderPostcondition())
self.job = job
self.url = url
self.path = path
self.error_message = ""
self.last_recvbytes = 0
self.error_message = None
self.download = None
self.aborted = False
def run(self, callback):
self.callback = callback
self.download = downloadWithProgress(self.url,self.path)
self.download.addProgress(self.download_progress)
self.download.start().addCallback(self.download_finished).addErrback(self.download_failed)
print "[ImageDownloadTask] downloading", self.url, "to", self.path
def abort(self):
print "[ImageDownloadTask] aborting", self.url
if self.download:
self.download.stop()
self.aborted = True
def download_progress(self, recvbytes, totalbytes):
#print "[update_progress] recvbytes=%d, totalbytes=%d" % (recvbytes, totalbytes)
if ( recvbytes - self.last_recvbytes ) > 10000: # anti-flicker
self.progress = int(100*(float(recvbytes)/float(totalbytes)))
self.name = _("Downloading") + ' ' + "%d of %d kBytes" % (recvbytes/1024, totalbytes/1024)
self.last_recvbytes = recvbytes
def download_failed(self, failure_instance=None, error_message=""):
self.error_message = error_message
if error_message == "" and failure_instance is not None:
self.error_message = failure_instance.getErrorMessage()
Task.processFinished(self, 1)
def download_finished(self, string=""):
if self.aborted:
self.finish(aborted = True)
else:
Task.processFinished(self, 0)
class StickWizardJob(Job):
def __init__(self, path):
Job.__init__(self, _("USB stick wizard"))
self.path = path
self.device = path
while self.device[-1:] == "/" or self.device[-1:].isdigit():
self.device = self.device[:-1]
box = HardwareInfo().get_device_name()
url = "http://www.dreamboxupdate.com/download/opendreambox/dreambox-nfiflasher-%s.tar.bz2" % box
self.downloadfilename = "/tmp/dreambox-nfiflasher-%s.tar.bz2" % box
self.imagefilename = "/tmp/nfiflash_%s.img" % box
#UmountTask(self, device)
PartitionTask(self)
ImageDownloadTask(self, url, self.downloadfilename)
UnpackTask(self)
CopyTask(self)
class PartitionTaskPostcondition(Condition):
def __init__(self):
pass
def check(self, task):
return task.returncode == 0
def getErrorMessage(self, task):
return {
task.ERROR_BLKRRPART: "Device or resource busy",
task.ERROR_UNKNOWN: task.errormsg
}[task.error]
class PartitionTask(Task):
ERROR_UNKNOWN, ERROR_BLKRRPART = range(2)
def __init__(self, job):
Task.__init__(self, job, "partitioning")
self.postconditions.append(PartitionTaskPostcondition())
self.job = job
self.setTool("sfdisk")
self.args += [self.job.device]
self.weighting = 10
self.initial_input = "0 - 0x6 *\n;\n;\n;\ny"
self.errormsg = ""
def run(self, callback):
Task.run(self, callback)
def processOutput(self, data):
print "[PartitionTask] output:", data
if data.startswith("BLKRRPART:"):
self.error = self.ERROR_BLKRRPART
else:
self.error = self.ERROR_UNKNOWN
self.errormsg = data
class UnpackTask(Task):
def __init__(self, job):
Task.__init__(self, job, "Unpacking USB flasher image...")
self.job = job
self.setTool("tar")
self.args += ["-xjvf", self.job.downloadfilename]
self.weighting = 80
self.end = 80
self.delayTimer = eTimer()
self.delayTimer.callback.append(self.progress_increment)
def run(self, callback):
Task.run(self, callback)
self.delayTimer.start(950, False)
def progress_increment(self):
self.progress += 1
def processOutput(self, data):
print "[UnpackTask] output: \'%s\'" % data
self.job.imagefilename = data
def afterRun(self):
self.delayTimer.callback.remove(self.progress_increment)
class CopyTask(Task):
def __init__(self, job):
Task.__init__(self, job, "Copying USB flasher boot image to stick...")
self.job = job
self.setTool("dd")
self.args += ["if=%s" % self.job.imagefilename, "of=%s1" % self.job.device]
self.weighting = 20
self.end = 20
self.delayTimer = eTimer()
self.delayTimer.callback.append(self.progress_increment)
def run(self, callback):
Task.run(self, callback)
self.delayTimer.start(100, False)
def progress_increment(self):
self.progress += 1
def processOutput(self, data):
print "[CopyTask] output:", data
def afterRun(self):
self.delayTimer.callback.remove(self.progress_increment)
class NFOViewer(Screen):
skin = """
<screen name="NFOViewer" position="center,center" size="610,410" title="Changelog" >
<widget name="changelog" position="10,10" size="590,380" font="Regular;16" />
</screen>"""
def __init__(self, session, nfo):
Screen.__init__(self, session)
self["changelog"] = ScrollLabel(nfo)
self["ViewerActions"] = ActionMap(["SetupActions", "ColorActions", "DirectionActions"],
{
"green": self.exit,
"red": self.exit,
"ok": self.exit,
"cancel": self.exit,
"down": self.pageDown,
"up": self.pageUp
})
def pageUp(self):
self["changelog"].pageUp()
def pageDown(self):
self["changelog"].pageDown()
def exit(self):
self.close(False)
class feedDownloader:
def __init__(self, feed_base, box, OE_vers):
print "[feedDownloader::init] feed_base=%s, box=%s" % (feed_base, box)
self.feed_base = feed_base
self.OE_vers = OE_vers
self.box = box
def getList(self, callback, errback):
self.urlbase = "%s/%s/%s/images/" % (self.feed_base, self.OE_vers, self.box)
print "[getList]", self.urlbase
self.callback = callback
self.errback = errback
client.getPage(self.urlbase).addCallback(self.feed_finished).addErrback(self.feed_failed)
def feed_failed(self, failure_instance):
print "[feed_failed]", str(failure_instance)
self.errback(failure_instance.getErrorMessage())
def feed_finished(self, feedhtml):
print "[feed_finished]"
fileresultmask = re.compile("<a class=[\'\"]nfi[\'\"] href=[\'\"](?P<url>.*?)[\'\"]>(?P<name>.*?.nfi)</a>", re.DOTALL)
searchresults = fileresultmask.finditer(feedhtml)
fileresultlist = []
if searchresults:
for x in searchresults:
url = x.group("url")
if url[0:7] != "http://":
url = self.urlbase + x.group("url")
name = x.group("name")
entry = (name, url)
fileresultlist.append(entry)
self.callback(fileresultlist, self.OE_vers)
class DeviceBrowser(Screen, HelpableScreen):
skin = """
<screen name="DeviceBrowser" position="center,center" size="520,430" title="Please select target medium" >
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="message" render="Label" position="5,50" size="510,150" font="Regular;16" />
<widget name="filelist" position="5,210" size="510,220" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session, startdir, message="", showDirectories = True, showFiles = True, showMountpoints = True, matchingPattern = "", useServiceRef = False, inhibitDirs = False, inhibitMounts = False, isTop = False, enableWrapAround = False, additionalExtensions = None):
Screen.__init__(self, session)
HelpableScreen.__init__(self)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText()
self["message"] = StaticText(message)
self.filelist = FileList(startdir, showDirectories = showDirectories, showFiles = showFiles, showMountpoints = showMountpoints, matchingPattern = matchingPattern, useServiceRef = useServiceRef, inhibitDirs = inhibitDirs, inhibitMounts = inhibitMounts, isTop = isTop, enableWrapAround = enableWrapAround, additionalExtensions = additionalExtensions)
self["filelist"] = self.filelist
self["FilelistActions"] = ActionMap(["SetupActions", "ColorActions"],
{
"green": self.use,
"red": self.exit,
"ok": self.ok,
"cancel": self.exit
})
hotplugNotifier.append(self.hotplugCB)
self.onShown.append(self.updateButton)
self.onClose.append(self.removeHotplug)
def hotplugCB(self, dev, action):
print "[hotplugCB]", dev, action
self.updateButton()
def updateButton(self):
if self["filelist"].getFilename() or self["filelist"].getCurrentDirectory():
self["key_green"].text = _("Use")
else:
self["key_green"].text = ""
def removeHotplug(self):
print "[removeHotplug]"
hotplugNotifier.remove(self.hotplugCB)
def ok(self):
if self.filelist.canDescent():
if self["filelist"].showMountpoints == True and self["filelist"].showDirectories == False:
self.use()
else:
self.filelist.descent()
def use(self):
print "[use]", self["filelist"].getCurrentDirectory(), self["filelist"].getFilename()
if self["filelist"].getCurrentDirectory() is not None:
if self.filelist.canDescent() and self["filelist"].getFilename() and len(self["filelist"].getFilename()) > len(self["filelist"].getCurrentDirectory()):
self.filelist.descent()
self.close(self["filelist"].getCurrentDirectory())
elif self["filelist"].getFilename():
self.close(self["filelist"].getFilename())
def exit(self):
self.close(False)
(ALLIMAGES, RELEASE, EXPERIMENTAL, STICK_WIZARD, START) = range(5)
class NFIDownload(Screen):
skin = """
<screen name="NFIDownload" position="center,center" size="610,410" title="NFIDownload" >
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" valign="center" halign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" valign="center" halign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" valign="center" halign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" valign="center" halign="center" backgroundColor="#18188b" transparent="1" />
<ePixmap pixmap="border_menu_350.png" position="5,50" zPosition="1" size="350,300" transparent="1" alphatest="on" />
<widget source="menu" render="Listbox" position="15,60" size="330,290" scrollbarMode="showOnDemand">
<convert type="TemplatedMultiContent">
{"templates":
{"default": (25, [
MultiContentEntryText(pos = (2, 2), size = (330, 24), flags = RT_HALIGN_LEFT, text = 1), # index 0 is the MenuText,
], True, "showOnDemand")
},
"fonts": [gFont("Regular", 22)],
"itemHeight": 25
}
</convert>
</widget>
<widget source="menu" render="Listbox" position="360,50" size="240,300" scrollbarMode="showNever" selectionDisabled="1">
<convert type="TemplatedMultiContent">
{"templates":
{"default": (300, [
MultiContentEntryText(pos = (2, 2), size = (240, 300), flags = RT_HALIGN_CENTER|RT_VALIGN_CENTER|RT_WRAP, text = 2), # index 2 is the Description,
], False, "showNever")
},
"fonts": [gFont("Regular", 22)],
"itemHeight": 300
}
</convert>
</widget>
<widget source="status" render="Label" position="5,360" zPosition="10" size="600,50" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
</screen>"""
def __init__(self, session, destdir=None):
Screen.__init__(self, session)
#self.skin_path = plugin_path
#self.menu = args
self.box = HardwareInfo().get_device_name()
self.feed_base = "http://www.dreamboxupdate.com/opendreambox" #/1.5/%s/images/" % self.box
self.usbmountpoint = resolveFilename(SCOPE_MEDIA)+"usb/"
self.menulist = []
self["menu"] = List(self.menulist)
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText()
self["key_yellow"] = StaticText()
self["key_blue"] = StaticText()
self["status"] = StaticText(_("Please wait... Loading list..."))
self["shortcuts"] = ActionMap(["OkCancelActions", "ColorActions", "ShortcutActions", "DirectionActions"],
{
"ok": self.keyOk,
"green": self.keyOk,
"red": self.keyRed,
"blue": self.keyBlue,
"up": self.keyUp,
"upRepeated": self.keyUp,
"downRepeated": self.keyDown,
"down": self.keyDown,
"cancel": self.close,
}, -1)
self.onShown.append(self.go)
self.feedlists = [[],[],[]]
self.branch = START
self.container = eConsoleAppContainer()
self.container.dataAvail.append(self.tool_avail)
self.taskstring = ""
self.image_idx = 0
self.nfofilename = ""
self.nfo = ""
self.target_dir = None
def tool_avail(self, string):
print "[tool_avail]" + string
self.taskstring += string
def go(self):
self.onShown.remove(self.go)
self.umountCallback = self.getMD5
self.umount()
def getMD5(self):
url = "http://www.dreamboxupdate.com/download/opendreambox/dreambox-nfiflasher-%s-md5sums" % self.box
client.getPage(url).addCallback(self.md5sums_finished).addErrback(self.feed_failed)
def md5sums_finished(self, data):
print "[md5sums_finished]", data
self.stickimage_md5 = data
self.checkUSBStick()
def keyRed(self):
if self.branch == START:
self.close()
else:
self.branch = START
self["menu"].setList(self.menulist)
#elif self.branch == ALLIMAGES or self.branch == STICK_WIZARD:
def keyBlue(self):
if self.nfo != "":
self.session.open(NFOViewer, self.nfo)
def keyOk(self):
print "[keyOk]", self["menu"].getCurrent()
current = self["menu"].getCurrent()
if current:
if self.branch == START:
currentEntry = current[0]
if currentEntry == RELEASE:
self.image_idx = 0
self.branch = RELEASE
self.askDestination()
elif currentEntry == EXPERIMENTAL:
self.image_idx = 0
self.branch = EXPERIMENTAL
self.askDestination()
elif currentEntry == ALLIMAGES:
self.branch = ALLIMAGES
self.listImages()
elif currentEntry == STICK_WIZARD:
self.askStartWizard()
elif self.branch == ALLIMAGES:
self.image_idx = self["menu"].getIndex()
self.askDestination()
self.updateButtons()
def keyUp(self):
self["menu"].selectPrevious()
self.updateButtons()
def keyDown(self):
self["menu"].selectNext()
self.updateButtons()
def updateButtons(self):
current = self["menu"].getCurrent()
if current:
if self.branch == START:
self["key_red"].text = _("Close")
currentEntry = current[0]
if currentEntry in (RELEASE, EXPERIMENTAL):
self.nfo_download(currentEntry, 0)
self["key_green"].text = _("Download")
else:
self.nfofilename = ""
self.nfo = ""
self["key_blue"].text = ""
self["key_green"].text = _("continue")
elif self.branch == ALLIMAGES:
self["key_red"].text = _("Back")
self["key_green"].text = _("Download")
self.nfo_download(ALLIMAGES, self["menu"].getIndex())
def listImages(self):
print "[listImages]"
imagelist = []
mask = re.compile("%s/(?P<OE_vers>1\.\d)/%s/images/(?P<branch>.*?)-%s_(?P<version>.*?).nfi" % (self.feed_base, self.box, self.box), re.DOTALL)
for name, url in self.feedlists[ALLIMAGES]:
result = mask.match(url)
if result:
if result.group("version").startswith("20"):
version = ( result.group("version")[:4]+'-'+result.group("version")[4:6]+'-'+result.group("version")[6:8] )
else:
version = result.group("version")
description = "\nOpendreambox %s\n%s image\n%s\n" % (result.group("OE_vers"), result.group("branch"), version)
imagelist.append((url, name, _("Download %s from server" ) % description, None))
self["menu"].setList(imagelist)
def getUSBPartitions(self):
allpartitions = [ (r.description, r.mountpoint) for r in harddiskmanager.getMountedPartitions(onlyhotplug = True)]
print "[getUSBPartitions]", allpartitions
usbpartition = []
for x in allpartitions:
print x, x[1] == '/', x[0].find("USB"), access(x[1], R_OK)
if x[1] != '/' and x[0].find("USB") > -1: # and access(x[1], R_OK) is True:
usbpartition.append(x)
return usbpartition
def askDestination(self):
usbpartition = self.getUSBPartitions()
if len(usbpartition) == 1:
self.target_dir = usbpartition[0][1]
self.ackDestinationDevice(device_description=usbpartition[0][0])
else:
self.openDeviceBrowser()
def openDeviceBrowser(self):
self.session.openWithCallback(self.DeviceBrowserClosed, DeviceBrowser, None, showDirectories=True, showMountpoints=True, inhibitMounts=["/autofs/sr0/"])
def DeviceBrowserClosed(self, path):
print "[DeviceBrowserClosed]", str(path)
self.target_dir = path
if path:
self.ackDestinationDevice()
else:
self.keyRed()
def ackDestinationDevice(self, device_description=None):
if device_description is None:
dev = self.target_dir
else:
dev = device_description
message = _("Do you want to download the image to %s ?") % dev
choices = [(_("Yes"), self.ackedDestination), (_("List of storage devices"),self.openDeviceBrowser), (_("Cancel"),self.keyRed)]
self.session.openWithCallback(self.ackDestination_query, ChoiceBox, title=message, list=choices)
def ackDestination_query(self, choice):
print "[ackDestination_query]", choice
if isinstance(choice, tuple):
choice[1]()
else:
self.keyRed()
def ackedDestination(self):
print "[ackedDestination]", self.branch, self.target_dir
self.container.setCWD(resolveFilename(SCOPE_MEDIA)+"usb/")
if self.target_dir[:8] == "/autofs/":
self.target_dir = "/dev/" + self.target_dir[8:-1]
if self.branch == STICK_WIZARD:
job = StickWizardJob(self.target_dir)
job.afterEvent = "close"
job_manager.AddJob(job)
job_manager.failed_jobs = []
self.session.openWithCallback(self.StickWizardCB, JobView, job, afterEventChangeable = False)
elif self.branch != STICK_WIZARD:
url = self.feedlists[self.branch][self.image_idx][1]
filename = self.feedlists[self.branch][self.image_idx][0]
print "[getImage] start downloading %s to %s" % (url, filename)
if self.target_dir.startswith("/dev/"):
job = ImageDownloadJob(url, filename, self.target_dir, self.usbmountpoint)
else:
job = ImageDownloadJob(url, filename, None, self.target_dir)
job.afterEvent = "close"
job_manager.AddJob(job)
job_manager.failed_jobs = []
self.session.openWithCallback(self.ImageDownloadCB, JobView, job, afterEventChangeable = False)
def StickWizardCB(self, ret=None):
print "[StickWizardCB]", ret
# print job_manager.active_jobs, job_manager.failed_jobs, job_manager.job_classes, job_manager.in_background, job_manager.active_job
if len(job_manager.failed_jobs) == 0:
self.session.open(MessageBox, _("The USB stick was prepared to be bootable.\nNow you can download an NFI image file!"), type = MessageBox.TYPE_INFO)
if len(self.feedlists[ALLIMAGES]) == 0:
self.getFeed()
else:
self.setMenu()
else:
self.umountCallback = self.checkUSBStick
self.umount()
def ImageDownloadCB(self, ret):
print "[ImageDownloadCB]", ret
# print job_manager.active_jobs, job_manager.failed_jobs, job_manager.job_classes, job_manager.in_background, job_manager.active_job
if len(job_manager.failed_jobs) == 0:
self.session.openWithCallback(self.askBackupCB, MessageBox, _("The wizard can backup your current settings. Do you want to do a backup now?"), MessageBox.TYPE_YESNO)
else:
self.umountCallback = self.keyRed
self.umount()
def askBackupCB(self, ret):
if ret:
from Plugins.SystemPlugins.SoftwareManager.BackupRestore import BackupScreen
class USBBackupScreen(BackupScreen):
def __init__(self, session, usbmountpoint):
BackupScreen.__init__(self, session, runBackup = True)
self.backuppath = usbmountpoint
self.fullbackupfilename = self.backuppath + "/" + self.backupfile
self.session.openWithCallback(self.showHint, USBBackupScreen, self.usbmountpoint)
else:
self.showHint()
def showHint(self, ret=None):
self.session.open(MessageBox, _("To update your %s %s firmware, please follow these steps:\n1) Turn off your box with the rear power switch and make sure the bootable USB stick is plugged in.\n2) Turn mains back on and hold the DOWN button on the front panel pressed for 10 seconds.\n3) Wait for bootup and follow instructions of the wizard.") % (getMachineBrand(), getMachineName()), type = MessageBox.TYPE_INFO)
self.umountCallback = self.keyRed
self.umount()
def getFeed(self):
self.feedDownloader15 = feedDownloader(self.feed_base, self.box, OE_vers="1.5")
self.feedDownloader16 = feedDownloader(self.feed_base, self.box, OE_vers="1.6")
self.feedlists = [[],[],[]]
self.feedDownloader15.getList(self.gotFeed, self.feed_failed)
self.feedDownloader16.getList(self.gotFeed, self.feed_failed)
def feed_failed(self, message=""):
self["status"].text = _("Could not connect to %s %s .NFI image feed server:") % (getMachineBrand(), getMachineName()) + "\n" + str(message) + "\n" + _("Please check your network settings!")
def gotFeed(self, feedlist, OE_vers):
print "[gotFeed]", OE_vers
releaselist = []
experimentallist = []
for name, url in feedlist:
if name.find("release") > -1:
releaselist.append((name, url))
if name.find("experimental") > -1:
experimentallist.append((name, url))
self.feedlists[ALLIMAGES].append((name, url))
if OE_vers == "1.6":
self.feedlists[RELEASE] = releaselist + self.feedlists[RELEASE]
self.feedlists[EXPERIMENTAL] = experimentallist + self.feedlists[RELEASE]
elif OE_vers == "1.5":
self.feedlists[RELEASE] = self.feedlists[RELEASE] + releaselist
self.feedlists[EXPERIMENTAL] = self.feedlists[EXPERIMENTAL] + experimentallist
self.setMenu()
def checkUSBStick(self):
self.target_dir = None
allpartitions = [ (r.description, r.mountpoint) for r in harddiskmanager.getMountedPartitions(onlyhotplug = True)]
print "[checkUSBStick] found partitions:", allpartitions
usbpartition = []
for x in allpartitions:
print x, x[1] == '/', x[0].find("USB"), access(x[1], R_OK)
if x[1] != '/' and x[0].find("USB") > -1: # and access(x[1], R_OK) is True:
usbpartition.append(x)
print usbpartition
if len(usbpartition) == 1:
self.target_dir = usbpartition[0][1]
self.md5_passback = self.getFeed
self.md5_failback = self.askStartWizard
self.md5verify(self.stickimage_md5, self.target_dir)
elif not usbpartition:
print "[NFIFlash] needs to create usb flasher stick first!"
self.askStartWizard()
else:
self.askStartWizard()
def askStartWizard(self):
self.branch = STICK_WIZARD
message = _("""This plugin creates a USB stick which can be used to update the firmware of your %s %s without the need for a network or WLAN connection.
First, a USB stick needs to be prepared so that it becomes bootable.
In the next step, an NFI image file can be downloaded from the update server and saved on the USB stick.
If you already have a prepared bootable USB stick, please insert it now. Otherwise plug in a USB stick with a minimum size of 64 MB!""") % (getMachineBrand(), getMachineName())
self.session.openWithCallback(self.wizardDeviceBrowserClosed, DeviceBrowser, None, message, showDirectories=True, showMountpoints=True, inhibitMounts=["/","/autofs/sr0/","/autofs/sda1/","/media/hdd/","/media/net/",self.usbmountpoint,"/media/dvd/"])
def wizardDeviceBrowserClosed(self, path):
print "[wizardDeviceBrowserClosed]", path
self.target_dir = path
if path:
self.md5_passback = self.getFeed
self.md5_failback = self.wizardQuery
self.md5verify(self.stickimage_md5, self.target_dir)
else:
self.close()
def wizardQuery(self):
print "[wizardQuery]"
description = self.target_dir
for name, dev in self.getUSBPartitions():
if dev == self.target_dir:
description = name
message = _("You have chosen to create a new .NFI flasher bootable USB stick. This will repartition the USB stick and therefore all data on it will be erased.") + "\n"
message += _("The following device was found:\n\n%s\n\nDo you want to write the USB flasher to this stick?") % description
choices = [(_("Yes"), self.ackedDestination), (_("List of storage devices"),self.askStartWizard), (_("Cancel"),self.close)]
self.session.openWithCallback(self.ackDestination_query, ChoiceBox, title=message, list=choices)
def setMenu(self):
self.menulist = []
try:
latest_release = "Release %s (Opendreambox 1.5)" % self.feedlists[RELEASE][0][0][-9:-4]
self.menulist.append((RELEASE, _("Get latest release image"), _("Download %s from server" ) % latest_release, None))
except IndexError:
pass
try:
dat = self.feedlists[EXPERIMENTAL][0][0][-12:-4]
latest_experimental = "Experimental %s-%s-%s (Opendreambox 1.6)" % (dat[:4], dat[4:6], dat[6:])
self.menulist.append((EXPERIMENTAL, _("Get latest experimental image"), _("Download %s from server") % latest_experimental, None))
except IndexError:
pass
self.menulist.append((ALLIMAGES, _("Select an image to be downloaded"), _("Select desired image from feed list" ), None))
self.menulist.append((STICK_WIZARD, _("USB stick wizard"), _("Prepare another USB stick for image flashing" ), None))
self["menu"].setList(self.menulist)
self["status"].text = _("Currently installed image") + ": %s" % (getImageVersion())
self.branch = START
self.updateButtons()
def nfo_download(self, branch, idx):
nfourl = (self.feedlists[branch][idx][1])[:-4]+".nfo"
self.nfofilename = (self.feedlists[branch][idx][0])[:-4]+".nfo"
print "[check_for_NFO]", nfourl
client.getPage(nfourl).addCallback(self.nfo_finished).addErrback(self.nfo_failed)
def nfo_failed(self, failure_instance):
print "[nfo_failed] " + str(failure_instance)
self["key_blue"].text = ""
self.nfofilename = ""
self.nfo = ""
def nfo_finished(self,nfodata=""):
print "[nfo_finished] " + str(nfodata)
self["key_blue"].text = _("Changelog")
self.nfo = nfodata
def md5verify(self, md5, path):
cmd = "md5sum -c -s"
print "[verify_md5]", md5, path, cmd
self.container.setCWD(path)
self.container.appClosed.append(self.md5finished)
self.container.execute(cmd)
self.container.write(md5)
self.container.dataSent.append(self.md5ready)
def md5ready(self, retval):
self.container.sendEOF()
def md5finished(self, retval):
print "[md5finished]", str(retval)
self.container.appClosed.remove(self.md5finished)
self.container.dataSent.remove(self.md5ready)
if retval==0:
print "check passed! calling", repr(self.md5_passback)
self.md5_passback()
else:
print "check failed! calling", repr(self.md5_failback)
self.md5_failback()
def umount(self):
cmd = "umount " + self.usbmountpoint
print "[umount]", cmd
self.container.setCWD('/')
self.container.appClosed.append(self.umountFinished)
self.container.execute(cmd)
def umountFinished(self, retval):
print "[umountFinished]", str(retval)
self.container.appClosed.remove(self.umountFinished)
self.umountCallback()
def main(session, **kwargs):
session.open(NFIDownload,resolveFilename(SCOPE_HDD))
def filescan_open(list, session, **kwargs):
dev = "/dev/" + list[0].path.rsplit('/',1)[0][7:]
print "mounting device " + dev + " to /media/usb..."
usbmountpoint = resolveFilename(SCOPE_MEDIA)+"usb/"
system("mount %s %s -o rw,sync" % (dev, usbmountpoint))
session.open(NFIDownload,usbmountpoint)
def filescan(**kwargs):
from Components.Scanner import Scanner, ScanPath
return \
Scanner(mimetypes = ["application/x-dream-image"],
paths_to_scan =
[
ScanPath(path = "", with_subdirs = False),
],
name = "NFI",
description = (_("Download .NFI-files for USB-flasher")+"..."),
openfnc = filescan_open, )
|
chenss/ChatRoom | refs/heads/master | 14.5 已经能运行(虽然有很多Warning)的Django-nonrel框架/django/contrib/databrowse/sites.py | 329 | from django import http
from django.db import models
from django.contrib.databrowse.datastructures import EasyModel
from django.shortcuts import render_to_response
from django.utils.safestring import mark_safe
class AlreadyRegistered(Exception):
pass
class NotRegistered(Exception):
pass
class DatabrowsePlugin(object):
def urls(self, plugin_name, easy_instance_field):
"""
Given an EasyInstanceField object, returns a list of URLs for this
plugin's views of this object. These URLs should be absolute.
Returns None if the EasyInstanceField object doesn't get a
list of plugin-specific URLs.
"""
return None
def model_index_html(self, request, model, site):
"""
Returns a snippet of HTML to include on the model index page.
"""
return ''
def model_view(self, request, model_databrowse, url):
"""
Handles main URL routing for a plugin's model-specific pages.
"""
raise NotImplementedError
class ModelDatabrowse(object):
plugins = {}
def __init__(self, model, site):
self.model = model
self.site = site
def root(self, request, url):
"""
Handles main URL routing for the databrowse app.
`url` is the remainder of the URL -- e.g. 'objects/3'.
"""
# Delegate to the appropriate method, based on the URL.
if url is None:
return self.main_view(request)
try:
plugin_name, rest_of_url = url.split('/', 1)
except ValueError: # need more than 1 value to unpack
plugin_name, rest_of_url = url, None
try:
plugin = self.plugins[plugin_name]
except KeyError:
raise http.Http404('A plugin with the requested name does not exist.')
return plugin.model_view(request, self, rest_of_url)
def main_view(self, request):
easy_model = EasyModel(self.site, self.model)
html_snippets = mark_safe(u'\n'.join([p.model_index_html(request, self.model, self.site) for p in self.plugins.values()]))
return render_to_response('databrowse/model_detail.html', {
'model': easy_model,
'root_url': self.site.root_url,
'plugin_html': html_snippets,
})
class DatabrowseSite(object):
def __init__(self):
self.registry = {} # model_class -> databrowse_class
self.root_url = None
def register(self, model_or_iterable, databrowse_class=None, **options):
"""
Registers the given model(s) with the given databrowse site.
The model(s) should be Model classes, not instances.
If a databrowse class isn't given, it will use DefaultModelDatabrowse
(the default databrowse options).
If a model is already registered, this will raise AlreadyRegistered.
"""
databrowse_class = databrowse_class or DefaultModelDatabrowse
if issubclass(model_or_iterable, models.Model):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model in self.registry:
raise AlreadyRegistered('The model %s is already registered' % model.__name__)
self.registry[model] = databrowse_class
def unregister(self, model_or_iterable):
"""
Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered.
"""
if issubclass(model_or_iterable, models.Model):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self.registry:
raise NotRegistered('The model %s is not registered' % model.__name__)
del self.registry[model]
def root(self, request, url):
"""
Handles main URL routing for the databrowse app.
`url` is the remainder of the URL -- e.g. 'comments/comment/'.
"""
self.root_url = request.path[:len(request.path) - len(url)]
url = url.rstrip('/') # Trim trailing slash, if it exists.
if url == '':
return self.index(request)
elif '/' in url:
return self.model_page(request, *url.split('/', 2))
raise http.Http404('The requested databrowse page does not exist.')
def index(self, request):
m_list = [EasyModel(self, m) for m in self.registry.keys()]
return render_to_response('databrowse/homepage.html', {'model_list': m_list, 'root_url': self.root_url})
def model_page(self, request, app_label, model_name, rest_of_url=None):
"""
Handles the model-specific functionality of the databrowse site, delegating
to the appropriate ModelDatabrowse class.
"""
model = models.get_model(app_label, model_name)
if model is None:
raise http.Http404("App %r, model %r, not found." % (app_label, model_name))
try:
databrowse_class = self.registry[model]
except KeyError:
raise http.Http404("This model exists but has not been registered with databrowse.")
return databrowse_class(model, self).root(request, rest_of_url)
site = DatabrowseSite()
from django.contrib.databrowse.plugins.calendars import CalendarPlugin
from django.contrib.databrowse.plugins.objects import ObjectDetailPlugin
from django.contrib.databrowse.plugins.fieldchoices import FieldChoicePlugin
class DefaultModelDatabrowse(ModelDatabrowse):
plugins = {'objects': ObjectDetailPlugin(), 'calendars': CalendarPlugin(), 'fields': FieldChoicePlugin()}
|
0x3a/crits | refs/heads/master | crits/core/api.py | 2 | import json
import yaml
from bson.objectid import ObjectId
from dateutil.parser import parse
from django.http import HttpResponse
from lxml.etree import tostring
from django.core.urlresolvers import resolve, get_script_prefix
from tastypie.exceptions import BadRequest, ImmediateHttpResponse
from tastypie.serializers import Serializer
from tastypie.authentication import SessionAuthentication, ApiKeyAuthentication
from tastypie.utils.mime import build_content_type
from tastypie_mongoengine.resources import MongoEngineResource
from crits.core.data_tools import format_file, create_zip
from crits.core.handlers import download_object_handler, remove_quotes, generate_regex
from crits.core.source_access import SourceAccess
from crits.core.user_tools import user_sources
# The following leverages code from the Tastypie library.
class CRITsApiKeyAuthentication(ApiKeyAuthentication):
"""
API Key Authentication Class.
"""
def is_authenticated(self, request, **kwargs):
"""
Determine if the user can properly authenticate with the
username and API key they provided.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:returns: True, :class:`tastypie.http.HttpUnauthorized`
"""
try:
username, api_key = self.extract_credentials(request)
except ValueError:
return self._unauthorized()
if not username or not api_key:
return self._unauthorized()
try:
from crits.core.user import CRITsUser
user = CRITsUser.objects(username=username).first()
except:
return self._unauthorized()
if not user:
return self._unauthorized()
if not user.is_active:
return self._unauthorized()
key_auth_check = self.get_key(user, api_key)
if key_auth_check:
request.user = user
return True
else:
return self._unauthorized()
def get_key(self, user, api_key):
"""
Attempts to find the API key for the user. Uses ``ApiKey`` by default
but can be overridden.
:param user: The user trying to authenticate.
:type user: str
:param api_key: The key the user is trying to authenticate with.
:type api_key: str
:returns: True, False
"""
if user:
if user.validate_api_key(api_key):
return True
return False
class CRITsSessionAuthentication(SessionAuthentication):
"""
API Authentication leveraging an existing Django browser session.
"""
def get_identifier(self, request):
"""
Returns the username as the identifier.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:returns: str
"""
return request.user.username
class CRITsSerializer(Serializer):
"""
Custom serializer for CRITs.
"""
formats = ['json', 'xml', 'yaml', 'stix', 'file']
content_types = {
'json': 'application/json',
'xml': 'application/xml',
'yaml': 'text/yaml',
'stix': 'application/stix+xml',
'file': 'application/octet-stream',
}
def _format_data(self, filedata, file_format=None):
"""
Format filedata based on request.
:param filedata: The filedata to format.
:type filedata: str
:param file_format: The format the file should be in:
"base64", "zlib", "raw", "invert".
:type file_format: str
:returns: list of [<formatted data>, <file extension>]
"""
if file_format not in ('base64', 'zlib', 'raw', 'invert'):
file_format = 'raw'
return format_file(filedata, file_format)[0]
def to_file(self, data, options=None):
"""
Respond with filedata instead of metadata.
:param data: The data to be worked on.
:type data: dict for multiple objects,
:class:`tastypie.bundle.Bundle` for a single object.
:param options: Options to alter how this serializer works.
:type options: dict
:returns: :class:`django.http.HttpResponse`,
:class:`tastypie.exceptions.BadRequest`
"""
get_file = options.get('file', None)
file_format = options.get('file_format', 'raw')
response = None
zipfile = None
if get_file:
files = []
if hasattr(data, 'obj'):
if hasattr(data.obj, 'filedata'):
filename = data.obj.md5
filedata = data.obj.filedata.read()
if filedata:
filedata = self._format_data(filedata, file_format)
files.append([filename, filedata])
elif hasattr(data.obj, 'screenshot'):
filename = "%s.png" % data.obj.md5
filedata = data.obj.screenshot.read()
if filedata:
files.append([filename, filedata])
elif 'objects' in data:
try:
objs = data['objects']
for obj_ in objs:
if hasattr(obj_.obj, 'filedata'):
filename = obj_.obj.md5
filedata = obj_.obj.filedata.read()
if filedata:
filedata = self._format_data(filedata,
file_format)
files.append([filename, filedata])
elif hasattr(obj_.obj, 'screenshot'):
filename = "%s.png" % data.obj.md5
filedata = data.obj.screenshot.read()
if filedata:
files.append([filename, filedata])
except:
pass
try:
if len(files):
zipfile = create_zip(files)
response = HttpResponse(zipfile,
mimetype='application/octet-stream; charset=utf-8')
response['Content-Disposition'] = 'attachment; filename="results.zip"'
else:
response = BadRequest("No files found!")
except Exception, e:
response = BadRequest(str(e))
return response
def to_json(self, data, options=None):
"""
Respond with JSON formatted data. This is the default.
:param data: The data to be worked on.
:type data: dict for multiple objects,
:class:`tastypie.bundle.Bundle` for a single object.
:param options: Options to alter how this serializer works.
:type options: dict
:returns: str
"""
options = options or {}
username = options.get('username', None)
# if this is a singular object, just return our internal to_json()
# which handles the Embedded MongoEngine classes.
if hasattr(data, 'obj'):
if data.obj._has_method('sanitize'):
data.obj.sanitize(username=username, rels=True)
return data.obj.to_json()
data = self._convert_mongoengine(data, options)
return json.dumps(data, sort_keys=True)
def to_xml(self, data, options=None):
"""
Respond with XML formatted data.
:param data: The data to be worked on.
:type data: dict for multiple objects,
:class:`tastypie.bundle.Bundle` for a single object.
:param options: Options to alter how this serializer works.
:type options: dict
:returns: str
"""
options = options or {}
if hasattr(data, 'obj'):
data = {'objects': [data]}
data = self._convert_mongoengine(data, options)
return tostring(self.to_etree(data, options), xml_declaration=True,
encoding='utf-8')
def to_yaml(self, data, options=None):
"""
Respond with YAML formatted data.
:param data: The data to be worked on.
:type data: dict for multiple objects,
:class:`tastypie.bundle.Bundle` for a single object.
:param options: Options to alter how this serializer works.
:type options: dict
:returns: str
"""
options = options or {}
username = options.get('username', None)
# if this is a singular object, just return our internal to_yaml()
# which handles the Embedded MongoEngine classes.
if hasattr(data, 'obj'):
if data.obj._has_method('sanitize'):
data.obj.sanitize(username=username, rels=True)
return data.obj.to_yaml()
data = self._convert_mongoengine(data, options)
return yaml.dump(data)
def to_stix(self, data, options=None):
"""
Respond with STIX formatted data.
:param data: The data to be worked on.
:type data: dict for multiple objects,
:class:`tastypie.bundle.Bundle` for a single object.
:param options: Options to alter how this serializer works.
:type options: dict
:returns: str
"""
options = options or {}
get_binaries = 'stix_no_bin'
bin_fmt = 'raw'
if 'binaries' in options:
try:
if int(options['binaries']):
get_binaries = 'stix'
bin_fmt = 'base64'
except:
pass
# This is bad.
# Should probably find a better way to determine the user
# who is making this API call. However, the data to
# convert is already queried by the API using the user's
# source access list, so technically we should not be
# looping through any data the user isn't supposed to see,
# so this sources list is just a formality to get
# download_object_handler() to do what we want.
sources = [s.name for s in SourceAccess.objects()]
if hasattr(data, 'obj'):
objects = [(data.obj._meta['crits_type'],
data.obj.id)]
object_types = [objects[0][0]]
elif 'objects' in data:
try:
objects = []
object_types = []
objs = data['objects']
data['objects'] = []
for obj_ in objs:
objects.append((obj_.obj._meta['crits_type'],
obj_.obj.id))
object_types.append(obj_.obj._meta['crits_type'])
except Exception:
return ""
else:
return ""
try:
# Constants are here to make sure:
# 1: total limit of objects to return
# 0: depth limit - only want this object
# 0: relationship limit - don't get relationships
data = download_object_handler(1,
0,
0,
get_binaries,
bin_fmt,
object_types,
objects,
sources,
False)
except Exception:
data = ""
if 'data' in data:
data = data['data']
return data
def _convert_mongoengine(self, data, options=None):
"""
Convert the MongoEngine class to a serializable object.
This also sanitizes the content.
:param data: The data to be worked on.
:type data: dict for multiple objects,
:class:`tastypie.bundle.Bundle` for a single object.
:param options: Options to alter how this serializer works.
:type options: dict
:returns: dict
"""
# if this is a list of multiple objects, use our internal to_json()
# for each one before processing normally.
username = options.get('username', None)
if 'objects' in data:
objs = data['objects']
data['objects'] = []
for obj_ in objs:
if obj_.obj._has_method('sanitize'):
obj_.obj.sanitize(username=username, rels=True)
data['objects'].append(json.loads(obj_.obj.to_json()))
data = self.to_simple(data, options)
return data
class CRITsAPIResource(MongoEngineResource):
"""
Standard CRITs API Resource.
"""
class Meta:
default_format = "application/json"
def crits_response(self, content, status=200):
"""
An amazing hack so we can return our own custom JSON response. Instead
of having the ability to craft and return an HttpResponse, Tastypie
requires us to raise this custom exception in order to do so.
The content should be a dict with keys of:
- return_code: 0 (success), 1 (failure), etc. for custom returns.
- type: The CRITs TLO type (Sample, Email, etc.)
- id: The ObjectId (as a string) of the TLO. (optional if not
available)
- message: A custom message you wish to return.
If you wish to extend your content to contain more k/v pairs you can do
so as long as they are JSON serializable.
:param content: The information we wish to return in the response.
:type content: dict (must be json serializable)
:param status: If we wish to return anything other than a 200.
:type status: int
:raises: :class:`tastypie.exceptions.ImmediateHttpResponse`
"""
raise ImmediateHttpResponse(HttpResponse(json.dumps(content),
mimetype="application/json",
status=status))
def create_response(self, request, data, response_class=HttpResponse,
**response_kwargs):
"""
Override the default create_response so we can pass the GET
parameters into options. This allows us to use GET parameters
to adjust how our serializers respond.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param data: The data to be worked on.
:type data: dict for multiple objects,
:class:`tastypie.bundle.Bundle` for a single object.
:param response_class: The class to utilize for the response.
:type response_class: :class:`django.http.HttpResponse` (Default)
:returns: :class:`django.http.HttpResponse` (Default)
"""
desired_format = self.determine_format(request)
options = request.GET.copy()
options['username'] = request.user.username
serialized = self.serialize(request, data, desired_format,
options=options)
return response_class(content=serialized,
content_type=build_content_type(desired_format),
**response_kwargs)
def determine_format(self, request):
"""
Used to determine the desired format.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:returns: str
"""
return determine_format(request, self._meta.serializer,
default_format=self._meta.default_format)
def deserialize(self, request, data, format=None):
"""
Custom deserializer which is only used to collect filedata uploads
and pass the binary along with the rest of the POST like
tastyie would normally do.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param data: The data to pass along.
:type data: dict
:param format: The format of the request.
:type format: str
:returns: data in requested format.
"""
# Get format from request. Assume json if nothing provided
if not format:
format = request.META.get('CONTENT_TYPE', 'application/json')
if format == 'application/x-www-form-urlencoded':
if 'filedata' in request.POST:
raise BadRequest("Filedata only supported in multipart forms.")
else:
return request.POST
# If a file was uploaded, add it to data and pass it along
if format.startswith('multipart'):
data = request.POST.copy()
if 'filedata' in request.FILES:
if hasattr(request.FILES['filedata'], 'read'):
data.update(request.FILES)
else:
raise BadRequest("Expected filehandle, got string.")
return data
return super(CRITsAPIResource, self).deserialize(request, data, format)
def get_object_list(self, request, klass, sources=True):
"""
Handle GET requests. This does all sorts of work to ensure the
results are sanitized and that source restriction is adhered to.
Adds the ability to limit results and the content of the results
through GET parameters.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param klass: The CRITs top-level object to get.
:type klass: class which inherits from
:class:`crits.core.crits_mongoengine.CritsBaseAttributes`
:param sources: If we should limit by source.
:type sources: boolean
:returns: :class:`crits.core.crits_mongoengine.CritsQuerySet`
"""
querydict = {}
get_params = request.GET.copy()
regex = request.GET.get('regex', False)
only = request.GET.get('only', None)
exclude = request.GET.get('exclude', None)
source_list = user_sources(request.user.username)
no_sources = True
# Chop off trailing slash and split on remaining slashes.
# If last part of path is not the resource name, assume it is an
# object ID.
path = request.path[:-1].split('/')
if path[-1] != self.Meta.resource_name:
# If this is a valid object ID, convert it. Otherwise, use
# the string. The corresponding query will return 0.
if ObjectId.is_valid(path[-1]):
querydict['_id'] = ObjectId(path[-1])
else:
querydict['_id'] = path[-1]
for k,v in get_params.iteritems():
v = v.strip()
try:
v_int = int(v)
except:
# If can't be converted to an int use the string.
v_int = v
if k == "c-_id":
try:
querydict['_id'] = ObjectId(v)
except:
pass
if k.startswith("c-"):
field = k[2:]
# Attempt to discover query operators. We use django-style operators
# (same as MongoEngine). These also override regex.
try:
op_index = field.index("__")
op = "$%s" % field[op_index+2:]
field = field[:op_index]
except ValueError:
op_index = None
if op_index is not None:
if op in ('$gt', '$gte', '$lt', '$lte', '$ne', '$in', '$nin', '$exists'):
val = v
if field in ('created', 'modified'):
try:
val = parse(val, fuzzy=True)
except:
pass
if op in ('$in', '$nin'):
if field == 'source.name':
val = []
for i in v.split(','):
s = remove_quotes(i)
if s in source_list:
no_sources = False
val.append(s)
else:
val = [remove_quotes(i) for i in v.split(',')]
if op == '$exists':
if val in ('true', 'True', '1'):
val = 1
elif val in ('false', 'False', '0'):
val = 0
if field in ('size', 'schema_version'):
if isinstance(val, list):
v_f = []
for i in val:
try:
v_f.append(int(i))
except:
pass
val = v_f
else:
try:
val = int(val)
except:
val = None
if val or val == 0:
querydict[field] = {op: val}
elif field in ('size', 'schema_version'):
querydict[field] = v_int
elif field in ('created', 'modified'):
try:
querydict[field] = parse(v, fuzzy=True)
except:
querydict[field] = v
elif field == 'source.name':
v = remove_quotes(v)
if v in source_list:
no_sources = False
querydict[field] = v
elif regex:
querydict[field] = generate_regex(v)
else:
querydict[field] = remove_quotes(v)
if no_sources and sources:
querydict['source.name'] = {'$in': source_list}
if only or exclude:
required = [k for k,f in klass._fields.iteritems() if f.required]
if only:
fields = only.split(',')
if exclude:
excludes = exclude.split(',')
fields = [x for x in fields if x not in excludes]
for r in required:
if r not in fields:
fields.append(r)
results = klass.objects(__raw__=querydict).only(*fields)
elif exclude:
fields = exclude.split(',')
for r in required:
if r not in fields:
fields.append(r)
results = klass.objects(__raw__=querydict).exclude(*fields)
else:
results = klass.objects(__raw__=querydict)
return results
def obj_get_list(self, bundle, **kwargs):
"""
Placeholder for overriding the default tastypie function in the future.
"""
return super(CRITsAPIResource, self).obj_get_list(bundle=bundle, **kwargs)
def obj_get(self, bundle, **kwargs):
"""
Placeholder for overriding the default tastypie function in the future.
"""
return super(CRITsAPIResource, self).obj_get(bundle=bundle, **kwargs)
def obj_create(self, bundle, **kwargs):
"""
Create an object in CRITs. Should be overridden by each
individual top-level resource.
:returns: NotImplementedError if the resource doesn't override.
"""
raise NotImplementedError('You cannot currently create this objects through the API.')
def obj_update(self, bundle, **kwargs):
"""
Update an object in CRITs. Should be overridden by each
individual top-level resource.
:returns: NotImplementedError if the resource doesn't override.
"""
import crits.actors.handlers as ah
import crits.core.handlers as coreh
import crits.services.handlers as servh
actions = {
'Common': {
'run_service': servh.run_service,
'add_releasability': coreh.add_releasability,
},
'Actor': {
'update_actor_tags': ah.update_actor_tags,
'attribute_actor_identifier': ah.attribute_actor_identifier,
'set_identifier_confidence': ah.set_identifier_confidence,
'remove_attribution': ah.remove_attribution,
'set_actor_name': ah.set_actor_name,
'update_actor_aliases': ah.update_actor_aliases,
},
'Backdoor': {},
'Campaign': {},
'Certificate': {},
'Domain': {},
'Email': {},
'Event': {},
'Exploit': {},
'Indicator': {},
'IP': {},
'PCAP': {},
'RawData': {},
'Sample': {},
'Target': {},
}
prefix = get_script_prefix()
uri = bundle.request.path
if prefix and uri.startswith(prefix):
uri = uri[len(prefix)-1:]
view, args, kwargs = resolve(uri)
type_ = kwargs['resource_name'].title()
if type_ == "Raw_data":
type_ = "RawData"
if type_[-1] == 's':
type_ = type_[:-1]
if type_ in ("Pcap", "Ip"):
type_ = type_.upper()
id_ = kwargs['pk']
content = {'return_code': 0,
'type': type_,
'message': '',
'id': id_}
# Make sure we have an appropriate action.
action = bundle.data.get("action", None)
atype = actions.get(type_, None)
if atype is None:
content['return_code'] = 1
content['message'] = "'%s' is not a valid resource." % type_
self.crits_response(content)
action_type = atype.get(action, None)
if action_type is None:
atype = actions.get('Common')
action_type = atype.get(action, None)
if action_type:
data = bundle.data
# Requests don't need to have an id_ as we will derive it from
# the request URL. Override id_ if the request provided one.
data['id_'] = id_
# Override type (if provided)
data['type_'] = type_
# Override user (if provided) with the one who made the request.
data['user'] = bundle.request.user.username
try:
results = action_type(**data)
if not results.get('success', False):
content['return_code'] = 1
# TODO: Some messages contain HTML and other such content
# that we shouldn't be returning here.
message = results.get('message', None)
content['message'] = message
else:
content['message'] = "success!"
except Exception, e:
content['return_code'] = 1
content['message'] = str(e)
else:
content['return_code'] = 1
content['message'] = "'%s' is not a valid action." % action
self.crits_response(content)
def obj_delete_list(self, bundle, **kwargs):
"""
Delete list of objects in CRITs. Should be overridden by each
individual top-level resource.
:returns: NotImplementedError if the resource doesn't override.
"""
raise NotImplementedError('You cannot currently delete objects through the API.')
def obj_delete(self, bundle, **kwargs):
"""
Delete an object in CRITs. Should be overridden by each
individual top-level resource.
:returns: NotImplementedError if the resource doesn't override.
"""
raise NotImplementedError('You cannot currently delete this object through the API.')
def resource_name_from_type(self, crits_type):
"""
Take a CRITs type and convert it to the appropriate API resource name.
:param crits_type: The CRITs type.
:type crits_type: str
:returns: str
"""
if crits_type == "RawData":
return "raw_data"
else:
return "%ss" % crits_type.lower()
def determine_format(request, serializer, default_format='application/json'):
"""
This overrides the default tastyie determine_format.
This is done because we want to default to "application/json"
even though most browsers will send along "application/xml" in the
Accept header if no "format" is provided.
:param request: Django request object (Required)
:type request: :class:`django.http.HttpRequest`
:param serializer: The serializer being used.
:type serializer: :class:`crits.core.api.CRITsSerializer`
:param default_format: The format to respond in.
:type default_format: str
:returns: str
"""
# First, check if they forced the format.
if request.GET.get('format'):
if request.GET['format'] in serializer.formats:
return serializer.get_mime_for_format(request.GET['format'])
if request.GET.get('file'):
default_format = 'application/octet-stream'
# No valid 'Accept' header/formats. Sane default.
return default_format
|
jeffchao/xen-3.3-tcg | refs/heads/master | dist/install/usr/lib/python/xen/sv/CreateDomain.py | 42 | from xen.sv.Wizard import *
from xen.sv.util import *
from xen.sv.GenTabbed import PreTab
from xen.xm.create import make_config, OptVals
from xen.xend.XendClient import server
class CreateDomain( Wizard ):
def __init__( self, urlWriter ):
sheets = [ CreatePage0,
CreatePage1,
CreatePage2,
CreatePage3,
CreatePage4,
CreateFinish ]
Wizard.__init__( self, urlWriter, "Create Domain", sheets )
def op_finish( self, request ):
pass
class CreatePage0( Sheet ):
title = "General"
def __init__( self, urlWriter ):
Sheet.__init__( self, urlWriter, "General", 0 )
self.addControl( InputControl( 'name', 'VM Name', 'VM Name:', "[\\w|\\S]+", "You must enter a name in this field" ) )
self.addControl( InputControl( 'memory', '64', 'Memory (Mb):', "[\\d]+", "You must enter a number in this field" ) )
self.addControl( InputControl( 'cpu', '0', 'CPU:', "[\\d]+", "You must enter a number in this feild" ) )
self.addControl( InputControl( 'cpu_weight', '1', 'CPU Weight:', "[\\d]+", "You must enter a number in this feild" ) )
self.addControl( InputControl( 'vcpus', '1', 'Virtual CPUs:', '[\\d]+', "You must enter a number in this feild") )
class CreatePage1( Sheet ):
title = "Setup Kernel Image"
def __init__( self, urlWriter ):
Sheet.__init__( self, urlWriter, "Setup Kernel Image", 1 )
self.addControl( ListControl( 'builder', [('linux', 'Linux'), ('netbsd', 'NetBSD')], 'Domain Builder:' ) )
self.addControl( FileControl( 'kernel', '/boot/vmlinuz-2.6.12-xenU', 'Kernel Image:' ) )
self.addControl( InputControl( 'extra', '', 'Kernel Command Line Parameters:' ) )
self.addControl( ListControl( 'use-initrd', [('yes', 'Yes'), ('no', 'No')], 'Use an Initial Ram Disk?:' ) )
self.addControl( FileControl( 'initrd', '/boot/initrd-2.6.12-xenU.img', 'Initial Ram Disk:' ) )
def validate( self, request ):
if not self.passback: self.parseForm( request )
check = True
request.write( previous_values.get( '>>>>>use-initrd' ) )
previous_values = ssxp2hash( string2sxp( self.passback ) ) #get the map for quick reference
if DEBUG: print previous_values
for (feild, control) in self.feilds:
if feild == 'initrd' and previous_values.get( 'use-initrd' ) != 'no':
request.write( previous_values.get( '>>>>>use-initrd' ) )
if control.validate( previous_values.get( feild ) ):
check = False
elif not control.validate( previous_values.get( feild ) ):
check = False
if DEBUG: print "> %s = %s" % (feild, previous_values.get( feild ))
return check
class CreatePage2( Sheet ):
title = "Choose number of VBDS"
def __init__( self, urlWriter ):
Sheet.__init__( self, urlWriter, "Setup Virtual Block Device", 2 )
self.addControl( InputControl( 'num_vbds', '1', 'Number of VBDs:', '[\\d]+', "You must enter a number in this field" ) )
class CreatePage3( Sheet ):
title = "Setup VBDS"
def __init__( self, urlWriter ):
Sheet.__init__( self, urlWriter, "Setup Virtual Block Device", 3 )
def write_BODY( self, request ):
if not self.passback: self.parseForm( request )
previous_values = sxp2hash( string2sxp( self.passback ) ) #get the hash for quick reference
num_vbds = previous_values.get( 'num_vbds' )
for i in range( int( num_vbds ) ):
self.addControl( InputControl( 'vbd%s_dom0' % i, 'phy:sda%s' % str(i + 1), 'Device %s name:' % i ) )
self.addControl( InputControl( 'vbd%s_domU' % i, 'sda%s' % str(i + 1), 'Virtualized device %s:' % i ) )
self.addControl( ListControl( 'vbd%s_mode' % i, [('w', 'Read + Write'), ('r', 'Read Only')], 'Device %s mode:' % i ) )
self.addControl( InputControl( 'root', '/dev/sda1', 'Root device (in VM):' ) )
Sheet.write_BODY( self, request )
class CreatePage4( Sheet ):
title = "Network Setting"
def __init__( self, urlWriter ):
Sheet.__init__( self, urlWriter, "Network settings", 4 )
self.addControl( ListControl( 'dhcp', [('off', 'No'), ('dhcp', 'Yes')], 'Use DHCP:' ) )
self.addControl( InputControl( 'hostname', 'hostname', 'VM Hostname:' ) )
self.addControl( InputControl( 'ip_addr', '192.168.1.1', 'VM IP Address:' ) )
self.addControl( InputControl( 'ip_subnet', '255.255.255.0', 'VM Subnet Mask:' ) )
self.addControl( InputControl( 'ip_gateway', '192.168.1.1', 'VM Gateway:' ) )
self.addControl( InputControl( 'ip_nfs', '192.168.1.1', 'NFS Server:' ) )
class CreateFinish( Sheet ):
title = "Finish"
def __init__( self, urlWriter ):
Sheet.__init__( self, urlWriter, "All Done", 5 )
def write_BODY( self, request ):
if not self.passback: self.parseForm( request )
xend_sxp = self.translate_sxp( string2sxp( self.passback ) )
request.write( "<pre>%s</pre>" % sxp2prettystring( xend_sxp ) )
try:
server.xend_domain_create( xend_sxp )
request.write( "<p>You domain had been successfully created.</p>" )
except Exception, e:
request.write( "<p>There was an error creating your domain.<br/>The configuration used is as follows:\n</p>" )
request.write( "<pre>%s</pre>" % sxp2prettystring( xend_sxp ) )
request.write( "<p>The error was:</p>" )
request.write( "<pre>%s</pre>" % str( e ) )
request.write( "<input type='hidden' name='passback' value=\"%s\"></p>" % self.passback )
request.write( "<input type='hidden' name='sheet' value='%s'></p>" % self.location )
def translate_sxp( self, fin_sxp ):
fin_hash = ssxp2hash( fin_sxp )
def get( key ):
ret = fin_hash.get( key )
if ret:
return ret
else:
return ""
vals = OptVals()
vals.name = get( 'name' )
vals.memory = get( 'memory' )
vals.maxmem = get( 'maxmem' )
vals.cpu = get( 'cpu' )
vals.cpu_weight = get( 'cpu_weight' )
vals.vcpus = get( 'vcpus' )
vals.builder = get( 'builder' )
vals.kernel = get( 'kernel' )
vals.root = get( 'root' )
vals.extra = get( 'extra' )
#setup vbds
vbds = []
for i in range( int( get( 'num_vbds' ) ) ):
vbds.append( ( get( 'vbd%s_dom0' % i ), get('vbd%s_domU' % i ), get( 'vbd%s_mode' % i ), None ) )
vals.disk = vbds
#misc
vals.pci = []
vals.blkif = None
vals.netif = None
vals.restart = None
vals.console = None
vals.ramdisk = None
vals.ssidref = -1
vals.bootloader = None
vals.usb = []
vals.acpi = []
#setup vifs
vals.vif = []
vals.nics = 1
ip = get( 'ip_addr' )
nfs = get( 'ip_nfs' )
gate = get( 'ip_gateway' )
mask = get( 'ip_subnet' )
host = get( 'hostname' )
dhcp = get( 'dhcp' )
vals.cmdline_ip = "%s:%s:%s:%s:%s:eth0:%s" % (ip, nfs, gate, mask, host, dhcp)
opts = None
try:
return make_config( opts, vals )
except Exception, e:
return [["There was an error creating the domain config SXP. This is typically due to an interface change in xm/create.py:make_config", e]]
|
simplyguru-dot/ansible | refs/heads/devel | lib/ansible/plugins/shell/sh.py | 63 | # (c) 2014, Chris Church <chris@ninemoreminutes.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import pipes
import ansible.constants as C
import time
import random
from six import text_type
_USER_HOME_PATH_RE = re.compile(r'^~[_.A-Za-z0-9][-_.A-Za-z0-9]*$')
class ShellModule(object):
# How to end lines in a python script one-liner
_SHELL_EMBEDDED_PY_EOL = '\n'
_SHELL_REDIRECT_ALLNULL = '> /dev/null 2>&1'
def env_prefix(self, **kwargs):
'''Build command prefix with environment variables.'''
env = dict(
LANG = C.DEFAULT_MODULE_LANG,
LC_CTYPE = C.DEFAULT_MODULE_LANG,
LC_MESSAGES = C.DEFAULT_MODULE_LANG,
)
env.update(kwargs)
return ' '.join(['%s=%s' % (k, pipes.quote(text_type(v))) for k,v in env.items()])
def join_path(self, *args):
return os.path.join(*args)
def path_has_trailing_slash(self, path):
return path.endswith('/')
def chmod(self, mode, path):
path = pipes.quote(path)
return 'chmod %s %s' % (mode, path)
def remove(self, path, recurse=False):
path = pipes.quote(path)
cmd = 'rm -f '
if recurse:
cmd += '-r '
return cmd + "%s %s" % (path, self._SHELL_REDIRECT_ALLNULL)
def mkdtemp(self, basefile=None, system=False, mode=None):
if not basefile:
basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48))
basetmp = self.join_path(C.DEFAULT_REMOTE_TMP, basefile)
if system and (basetmp.startswith('$HOME') or basetmp.startswith('~/')):
basetmp = self.join_path('/tmp', basefile)
cmd = 'mkdir -p "%s"' % basetmp
cmd += ' && echo "%s"' % basetmp
# change the umask in a subshell to achieve the desired mode
# also for directories created with `mkdir -p`
if mode:
tmp_umask = 0o777 & ~mode
cmd = '(umask %o && %s)' % (tmp_umask, cmd)
return cmd
def expand_user(self, user_home_path):
''' Return a command to expand tildes in a path
It can be either "~" or "~username". We use the POSIX definition of
a username:
http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap03.html#tag_03_426
http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap03.html#tag_03_276
'''
# Check that the user_path to expand is safe
if user_home_path != '~':
if not _USER_HOME_PATH_RE.match(user_home_path):
# pipes.quote will make the shell return the string verbatim
user_home_path = pipes.quote(user_home_path)
return 'echo %s' % user_home_path
def checksum(self, path, python_interp):
# The following test needs to be SH-compliant. BASH-isms will
# not work if /bin/sh points to a non-BASH shell.
#
# In the following test, each condition is a check and logical
# comparison (|| or &&) that sets the rc value. Every check is run so
# the last check in the series to fail will be the rc that is
# returned.
#
# If a check fails we error before invoking the hash functions because
# hash functions may successfully take the hash of a directory on BSDs
# (UFS filesystem?) which is not what the rest of the ansible code
# expects
#
# If all of the available hashing methods fail we fail with an rc of
# 0. This logic is added to the end of the cmd at the bottom of this
# function.
# Return codes:
# checksum: success!
# 0: Unknown error
# 1: Remote file does not exist
# 2: No read permissions on the file
# 3: File is a directory
# 4: No python interpreter
# Quoting gets complex here. We're writing a python string that's
# used by a variety of shells on the remote host to invoke a python
# "one-liner".
shell_escaped_path = pipes.quote(path)
test = "rc=flag; [ -r %(p)s ] || rc=2; [ -f %(p)s ] || rc=1; [ -d %(p)s ] && rc=3; %(i)s -V 2>/dev/null || rc=4; [ x\"$rc\" != \"xflag\" ] && echo \"${rc} \"%(p)s && exit 0" % dict(p=shell_escaped_path, i=python_interp)
csums = [
"({0} -c 'import hashlib; BLOCKSIZE = 65536; hasher = hashlib.sha1();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # Python > 2.4 (including python3)
"({0} -c 'import sha; BLOCKSIZE = 65536; hasher = sha.sha();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # Python == 2.4
]
cmd = " || ".join(csums)
cmd = "%s; %s || (echo \'0 \'%s)" % (test, cmd, shell_escaped_path)
return cmd
def build_module_command(self, env_string, shebang, cmd, rm_tmp=None):
# don't quote the cmd if it's an empty string, because this will
# break pipelining mode
if cmd.strip() != '':
cmd = pipes.quote(cmd)
cmd_parts = [env_string.strip(), shebang.replace("#!", "").strip(), cmd]
new_cmd = " ".join(cmd_parts)
if rm_tmp:
new_cmd = '%s; rm -rf "%s" %s' % (new_cmd, rm_tmp, self._SHELL_REDIRECT_ALLNULL)
return new_cmd
|
michelts/lettuce | refs/heads/master | tests/integration/lib/Django-1.3/django/conf/locale/id/formats.py | 355 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j N Y'
DATETIME_FORMAT = "j N Y, G.i.s"
TIME_FORMAT = 'G.i.s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd-m-Y'
SHORT_DATETIME_FORMAT = 'd-m-Y G.i.s'
FIRST_DAY_OF_WEEK = 1 #Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d-%m-%y', '%d/%m/%y', # '25-10-09', 25/10/09'
'%d-%m-%Y', '%d/%m/%Y', # '25-10-2009', 25/10/2009'
'%d %b %Y', # '25 Oct 2006',
'%d %B %Y', # '25 October 2006'
)
TIME_INPUT_FORMATS = (
'%H.%M.%S', # '14.30.59'
'%H.%M', # '14.30'
)
DATETIME_INPUT_FORMATS = (
'%d-%m-%Y %H.%M.%S', # '25-10-2009 14.30.59'
'%d-%m-%Y %H.%M', # '25-10-2009 14.30'
'%d-%m-%Y', # '25-10-2009'
'%d-%m-%y %H.%M.%S', # '25-10-09' 14.30.59'
'%d-%m-%y %H.%M', # '25-10-09' 14.30'
'%d-%m-%y', # '25-10-09''
'%m/%d/%y %H.%M.%S', # '10/25/06 14.30.59'
'%m/%d/%y %H.%M', # '10/25/06 14.30'
'%m/%d/%y', # '10/25/06'
'%m/%d/%Y %H.%M.%S', # '25/10/2009 14.30.59'
'%m/%d/%Y %H.%M', # '25/10/2009 14.30'
'%m/%d/%Y', # '10/25/2009'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
apple/llvm-project | refs/heads/llvm.org/main | llvm/utils/lit/tests/shtest-keyword-parse-errors.py | 8 | # RUN: not %{lit} -j 1 -vv %{inputs}/shtest-keyword-parse-errors > %t.out
# RUN: FileCheck -input-file %t.out %s
#
# END.
# CHECK: Testing: 3 tests
# CHECK-LABEL: UNRESOLVED: shtest-keyword-parse-errors :: empty.txt
# CHECK: {{^}}Test has no 'RUN:' line{{$}}
# CHECK-LABEL: UNRESOLVED: shtest-keyword-parse-errors :: multiple-allow-retries.txt
# CHECK: {{^}}Test has more than one ALLOW_RETRIES lines{{$}}
# CHECK-LABEL: UNRESOLVED: shtest-keyword-parse-errors :: unterminated-run.txt
# CHECK: {{^}}Test has unterminated 'RUN:' lines (with '\'){{$}}
|
chidea/GoPythonDLLWrapper | refs/heads/master | bin/lib/site-packages/pip/_vendor/cachecontrol/wrapper.py | 953 | from .adapter import CacheControlAdapter
from .cache import DictCache
def CacheControl(sess,
cache=None,
cache_etags=True,
serializer=None,
heuristic=None):
cache = cache or DictCache()
adapter = CacheControlAdapter(
cache,
cache_etags=cache_etags,
serializer=serializer,
heuristic=heuristic,
)
sess.mount('http://', adapter)
sess.mount('https://', adapter)
return sess
|
ncliam/serverpos | refs/heads/master | openerp/addons/point_of_sale/__openerp__.py | 261 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Point of Sale',
'version': '1.0.1',
'category': 'Point Of Sale',
'sequence': 6,
'summary': 'Touchscreen Interface for Shops',
'description': """
Quick and Easy sale process
===========================
This module allows you to manage your shop sales very easily with a fully web based touchscreen interface.
It is compatible with all PC tablets and the iPad, offering multiple payment methods.
Product selection can be done in several ways:
* Using a barcode reader
* Browsing through categories of products or via a text search.
Main Features
-------------
* Fast encoding of the sale
* Choose one payment method (the quick way) or split the payment between several payment methods
* Computation of the amount of money to return
* Create and confirm the picking list automatically
* Allows the user to create an invoice automatically
* Refund previous sales
""",
'author': 'OpenERP SA',
'depends': ['sale_stock'],
'data': [
'data/report_paperformat.xml',
'security/point_of_sale_security.xml',
'security/ir.model.access.csv',
'wizard/pos_box.xml',
'wizard/pos_confirm.xml',
'wizard/pos_details.xml',
'wizard/pos_discount.xml',
'wizard/pos_open_statement.xml',
'wizard/pos_payment.xml',
'wizard/pos_session_opening.xml',
'views/templates.xml',
'point_of_sale_report.xml',
'point_of_sale_view.xml',
'point_of_sale_sequence.xml',
'point_of_sale_data.xml',
'report/pos_order_report_view.xml',
'point_of_sale_workflow.xml',
'account_statement_view.xml',
'account_statement_report.xml',
'res_users_view.xml',
'res_partner_view.xml',
'views/report_statement.xml',
'views/report_usersproduct.xml',
'views/report_receipt.xml',
'views/report_saleslines.xml',
'views/report_detailsofsales.xml',
'views/report_payment.xml',
'views/report_sessionsummary.xml',
'views/point_of_sale.xml',
],
'demo': [
'point_of_sale_demo.xml',
'account_statement_demo.xml',
],
'test': [
'test/00_register_open.yml',
'test/01_order_to_payment.yml',
'test/02_order_to_invoice.yml',
'test/point_of_sale_report.yml',
'test/account_statement_reports.yml',
],
'installable': True,
'application': True,
'qweb': ['static/src/xml/pos.xml'],
'website': 'https://www.odoo.com/page/point-of-sale',
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
moreati/django-allauth | refs/heads/master | allauth/socialaccount/providers/tumblr/provider.py | 70 | from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth.provider import OAuthProvider
class TumblrAccount(ProviderAccount):
def get_profile_url_(self):
return 'http://%s.tumblr.com/' \
% self.account.extra_data.get('name')
def to_str(self):
dflt = super(TumblrAccount, self).to_str()
name = self.account.extra_data.get('name', dflt)
return name
class TumblrProvider(OAuthProvider):
id = 'tumblr'
name = 'Tumblr'
package = 'allauth.socialaccount.providers.tumblr'
account_class = TumblrAccount
def extract_uid(self, data):
return data['name']
def extract_common_fields(self, data):
return dict(first_name=data.get('name'),)
providers.registry.register(TumblrProvider)
|
tecan/xchat-rt | refs/heads/master | plugins/scripts/Supybot-0.83.4.1-bitcoinotc-bot/plugins/ChannelLogger/config.py | 8 | ###
# Copyright (c) 2005, Jeremiah Fincher
# Copyright (c) 2009, James Vega
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.conf as conf
import supybot.registry as registry
def configure(advanced):
# This will be called by supybot to configure this module. advanced is
# a bool that specifies whether the user identified himself as an advanced
# user or not. You should effect your configuration by manipulating the
# registry as appropriate.
from supybot.questions import expect, anything, something, yn
conf.registerPlugin('ChannelLogger', True)
ChannelLogger = conf.registerPlugin('ChannelLogger')
conf.registerChannelValue(ChannelLogger, 'enable',
registry.Boolean(True, """Determines whether logging is enabled."""))
conf.registerGlobalValue(ChannelLogger, 'flushImmediately',
registry.Boolean(False, """Determines whether channel logfiles will be
flushed anytime they're written to, rather than being buffered by the
operating system."""))
conf.registerChannelValue(ChannelLogger, 'stripFormatting',
registry.Boolean(True, """Determines whether formatting characters (such
as bolding, color, etc.) are removed when writing the logs to disk."""))
conf.registerChannelValue(ChannelLogger, 'timestamp',
registry.Boolean(True, """Determines whether the logs for this channel are
timestamped with the timestamp in supybot.log.timestampFormat."""))
conf.registerChannelValue(ChannelLogger, 'noLogPrefix',
registry.String('[nolog]', """Determines what string a message should be
prefixed with in order not to be logged. If you don't want any such
prefix, just set it to the empty string."""))
conf.registerChannelValue(ChannelLogger, 'rotateLogs',
registry.Boolean(False, """Determines whether the bot will automatically
rotate the logs for this channel. The bot will rotate logs when the
timestamp for the log changes. The timestamp is set according to
the 'filenameTimestamp' configuration variable."""))
conf.registerChannelValue(ChannelLogger, 'filenameTimestamp',
registry.String('%Y-%m-%d', """Determines how to represent the timestamp
used for the filename in rotated logs. When this timestamp changes, the
old logfiles will be closed and a new one started. The format characters
for the timestamp are in the time.strftime docs at python.org. In order
for your logs to be rotated, you'll also have to enable
supybot.plugins.ChannelLogger.rotateLogs."""))
conf.registerGlobalValue(ChannelLogger, 'directories',
registry.Boolean(True, """Determines whether the bot will partition its
channel logs into separate directories based on different criteria."""))
conf.registerGlobalValue(ChannelLogger.directories, 'network',
registry.Boolean(True, """Determines whether the bot will use a network
directory if using directories."""))
conf.registerGlobalValue(ChannelLogger.directories, 'channel',
registry.Boolean(True, """Determines whether the bot will use a channel
directory if using directories."""))
conf.registerGlobalValue(ChannelLogger.directories, 'timestamp',
registry.Boolean(False, """Determines whether the bot will use a timestamp
(determined by supybot.plugins.ChannelLogger.directories.timestamp.format)
if using directories."""))
conf.registerGlobalValue(ChannelLogger.directories.timestamp, 'format',
registry.String('%B', """Determines what timestamp format will be used in
the directory stucture for channel logs if
supybot.plugins.ChannelLogger.directories.timestamp is True."""))
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
nkgilley/home-assistant | refs/heads/dev | homeassistant/components/ohmconnect/__init__.py | 36 | """The ohmconnect component."""
|
zstyblik/infernal-twin | refs/heads/master | build/pip/build/lib.linux-i686-2.7/pip/_vendor/html5lib/treewalkers/_base.py | 436 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type, string_types
__all__ = ["DOCUMENT", "DOCTYPE", "TEXT", "ELEMENT", "COMMENT", "ENTITY", "UNKNOWN",
"TreeWalker", "NonRecursiveTreeWalker"]
from xml.dom import Node
DOCUMENT = Node.DOCUMENT_NODE
DOCTYPE = Node.DOCUMENT_TYPE_NODE
TEXT = Node.TEXT_NODE
ELEMENT = Node.ELEMENT_NODE
COMMENT = Node.COMMENT_NODE
ENTITY = Node.ENTITY_NODE
UNKNOWN = "<#UNKNOWN#>"
from ..constants import voidElements, spaceCharacters
spaceCharacters = "".join(spaceCharacters)
def to_text(s, blank_if_none=True):
"""Wrapper around six.text_type to convert None to empty string"""
if s is None:
if blank_if_none:
return ""
else:
return None
elif isinstance(s, text_type):
return s
else:
return text_type(s)
def is_text_or_none(string):
"""Wrapper around isinstance(string_types) or is None"""
return string is None or isinstance(string, string_types)
class TreeWalker(object):
def __init__(self, tree):
self.tree = tree
def __iter__(self):
raise NotImplementedError
def error(self, msg):
return {"type": "SerializeError", "data": msg}
def emptyTag(self, namespace, name, attrs, hasChildren=False):
assert namespace is None or isinstance(namespace, string_types), type(namespace)
assert isinstance(name, string_types), type(name)
assert all((namespace is None or isinstance(namespace, string_types)) and
isinstance(name, string_types) and
isinstance(value, string_types)
for (namespace, name), value in attrs.items())
yield {"type": "EmptyTag", "name": to_text(name, False),
"namespace": to_text(namespace),
"data": attrs}
if hasChildren:
yield self.error("Void element has children")
def startTag(self, namespace, name, attrs):
assert namespace is None or isinstance(namespace, string_types), type(namespace)
assert isinstance(name, string_types), type(name)
assert all((namespace is None or isinstance(namespace, string_types)) and
isinstance(name, string_types) and
isinstance(value, string_types)
for (namespace, name), value in attrs.items())
return {"type": "StartTag",
"name": text_type(name),
"namespace": to_text(namespace),
"data": dict(((to_text(namespace, False), to_text(name)),
to_text(value, False))
for (namespace, name), value in attrs.items())}
def endTag(self, namespace, name):
assert namespace is None or isinstance(namespace, string_types), type(namespace)
assert isinstance(name, string_types), type(namespace)
return {"type": "EndTag",
"name": to_text(name, False),
"namespace": to_text(namespace),
"data": {}}
def text(self, data):
assert isinstance(data, string_types), type(data)
data = to_text(data)
middle = data.lstrip(spaceCharacters)
left = data[:len(data) - len(middle)]
if left:
yield {"type": "SpaceCharacters", "data": left}
data = middle
middle = data.rstrip(spaceCharacters)
right = data[len(middle):]
if middle:
yield {"type": "Characters", "data": middle}
if right:
yield {"type": "SpaceCharacters", "data": right}
def comment(self, data):
assert isinstance(data, string_types), type(data)
return {"type": "Comment", "data": text_type(data)}
def doctype(self, name, publicId=None, systemId=None, correct=True):
assert is_text_or_none(name), type(name)
assert is_text_or_none(publicId), type(publicId)
assert is_text_or_none(systemId), type(systemId)
return {"type": "Doctype",
"name": to_text(name),
"publicId": to_text(publicId),
"systemId": to_text(systemId),
"correct": to_text(correct)}
def entity(self, name):
assert isinstance(name, string_types), type(name)
return {"type": "Entity", "name": text_type(name)}
def unknown(self, nodeType):
return self.error("Unknown node type: " + nodeType)
class NonRecursiveTreeWalker(TreeWalker):
def getNodeDetails(self, node):
raise NotImplementedError
def getFirstChild(self, node):
raise NotImplementedError
def getNextSibling(self, node):
raise NotImplementedError
def getParentNode(self, node):
raise NotImplementedError
def __iter__(self):
currentNode = self.tree
while currentNode is not None:
details = self.getNodeDetails(currentNode)
type, details = details[0], details[1:]
hasChildren = False
if type == DOCTYPE:
yield self.doctype(*details)
elif type == TEXT:
for token in self.text(*details):
yield token
elif type == ELEMENT:
namespace, name, attributes, hasChildren = details
if name in voidElements:
for token in self.emptyTag(namespace, name, attributes,
hasChildren):
yield token
hasChildren = False
else:
yield self.startTag(namespace, name, attributes)
elif type == COMMENT:
yield self.comment(details[0])
elif type == ENTITY:
yield self.entity(details[0])
elif type == DOCUMENT:
hasChildren = True
else:
yield self.unknown(details[0])
if hasChildren:
firstChild = self.getFirstChild(currentNode)
else:
firstChild = None
if firstChild is not None:
currentNode = firstChild
else:
while currentNode is not None:
details = self.getNodeDetails(currentNode)
type, details = details[0], details[1:]
if type == ELEMENT:
namespace, name, attributes, hasChildren = details
if name not in voidElements:
yield self.endTag(namespace, name)
if self.tree is currentNode:
currentNode = None
break
nextSibling = self.getNextSibling(currentNode)
if nextSibling is not None:
currentNode = nextSibling
break
else:
currentNode = self.getParentNode(currentNode)
|
cginternals/cppexpose | refs/heads/master | source/tests/googletest/googlemock/scripts/generator/cpp/utils.py | 1158 | #!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic utilities for C++ parsing."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
import sys
# Set to True to see the start/end token indices.
DEBUG = True
def ReadFile(filename, print_error=True):
"""Returns the contents of a file."""
try:
fp = open(filename)
try:
return fp.read()
finally:
fp.close()
except IOError:
if print_error:
print('Error reading %s: %s' % (filename, sys.exc_info()[1]))
return None
|
bejondshao/grape | refs/heads/master | org/bejond/basic/analysis/__init__.py | 23 | # -*- coding: UTF-8 -*-
|
PiRSquared17/creoleparser | refs/heads/master | creoleparser/core.py | 4 | # core.py
# -*- coding: utf-8 -*-
#
# Copyright © Stephen Day
#
# This module is part of Creoleparser and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
#
import re
import warnings
import genshi.builder as bldr
__docformat__ = 'restructuredtext en'
escape_char = '~'
esc_neg_look = '(?<!' + re.escape(escape_char) + ')'
esc_to_remove = re.compile(''.join([r'(?<!',re.escape(escape_char),')',re.escape(escape_char),r'(?!([ \n]|$))']))
place_holder_re = re.compile(r'<<<(-?\d+?)>>>')
class Parser(object):
def __init__(self,dialect, method='xhtml', strip_whitespace=False, encoding='utf-8'):
"""Constructor for Parser objects
:parameters:
dialect
Usually created using :func:`creoleparser.dialects.create_dialect`
method
This value is passed to Genshies Steam.render(). Possible values
include ``xhtml``, ``html``, ``xml``, and ``text``.
strip_whitespace
This value is passed to Genshies Steam.render().
encoding
This value is passed to Genshies Steam.render(). If ``None``, the ouput
will be a unicode object.
"""
if isinstance(dialect,type):
self.dialect = dialect()
else:
warnings.warn("""
'dialect' should be a type object.
"""
)
self.dialect = dialect
self.method = method
self.strip_whitespace = strip_whitespace
self.encoding=encoding
def parse(self,text,element_store=None,context='block', environ=None, preprocess=True):
"""Returns a Genshi Fragment (basically a list of Elements and text nodes).
:parameters:
text
The text to be parsed.
context
This is useful for marco development where (for example) supression
of paragraph tags is desired. Can be 'inline', 'block', or a list
of WikiElement objects (use with caution).
element_store
Internal dictionary that's passed around a lot ;)
environ
This can be any type of object. It will be passed to ``macro_func``
unchanged (for a description of ``macro_func``, see
:func:`~creoleparser.dialects.create_dialect`).
preprocess
Passes text through preprocess method that replaces Windows style
line breaks.
"""
if element_store is None:
element_store = {}
if environ is None:
environ = {}
if not isinstance(context,list):
if context == 'block':
top_level_elements = self.dialect.block_elements
elif context == 'inline':
top_level_elements = self.dialect.inline_elements
else:
top_level_elements = context
if preprocess:
text = self.preprocess(text)
return bldr.tag(fragmentize(text,top_level_elements,element_store, environ))
def generate(self,text,element_store=None,context='block', environ=None, preprocess=True):
"""Returns a Genshi Stream. See
:meth:`~creoleparser.core.Parser.parse` for named parameter descriptions.
"""
return self.parse(text, element_store, context, environ, preprocess).generate()
def render(self, text, element_store=None, context='block', environ=None, preprocess=True, **kwargs):
"""Returns the final output string (e.g., xhtml). See
:meth:`~creoleparser.core.Parser.parse` for named parameter descriptions.
Left over keyword arguments (``kwargs``) will be passed to Genshi's Stream.render() method,
overriding the corresponding attributes of the Parser object. For more infomation on Streams,
see the `Genshi documentation <http://genshi.edgewall.org/wiki/Documentation/streams.html#serialization-options>`_.
"""
kwargs.setdefault('method',self.method)
kwargs.setdefault('encoding',self.encoding)
if kwargs['method'] != "text":
kwargs.setdefault('strip_whitespace',self.strip_whitespace)
stream = self.generate(text, element_store, context, environ, preprocess)
return stream.render(**kwargs)
def __call__(self,text, **kwargs):
"""Wrapper for the render method. Returns final output string.
"""
return self.render(text, **kwargs)
def preprocess(self,text):
"""This should generally be called before fragmentize().
:parameter text: text to be processsed.
"""
text = text.replace("\r\n", "\n")
text = text.replace("\r", "\n")
return text
class ArgParser(object):
"""Creates a callable object for parsing macro argument strings
>>> from dialects import creepy20_base
>>> my_parser = ArgParser(dialect=creepy20_base())
>>> my_parser(" one two foo='three' boo='four' ")
(['one', 'two'], {'foo': 'three', 'boo': 'four'})
A parser returns a two-tuple, the first item being a list of positional
arguments and the second a dictionary of keyword arguments. Argument
values are either strings or lists.
"""
def __init__(self,dialect, convert_implicit_lists=True,
key_func=None, illegal_keys=(), convert_unicode_keys=True):
"""Constructor for ArgParser objects
:parameters:
convert_unicode_keys
If *True*, keys will be converted using ``str(key)`` before being
added to the output dictionary. This allows the dictionary to be
safely passed to functions using the special ``**`` form (i.e.,
``func(**kwargs)``).
dialect
Usually created using :func:`~creoleparser.dialects.creepy10_base`
or :func:`~creoleparser.dialects.creepy20_base`
convert_implicit_lists
If *True*, all implicit lists will be converted to strings
using ``' '.join(list)``. "Implicit" lists are created when
positional arguments follow keyword arguments
(see :func:`~creoleparser.dialects.creepy10_base`).
illegal_keys
A tuple of keys that will be post-fixed with an underscore if found
during parsing.
key_func
If supplied, this function will be used to transform the names of
keyword arguments. It must accept a single positional argument.
For example, this can be used to make keywords case insensitive:
>>> from string import lower
>>> from dialects import creepy20_base
>>> my_parser = ArgParser(dialect=creepy20_base(),key_func=lower)
>>> my_parser(" Foo='one' ")
([], {'foo': 'one'})
"""
self.dialect = dialect()
self.convert_implicit_lists = convert_implicit_lists
self.key_func = key_func
self.illegal_keys = illegal_keys
self.convert_unicode_keys = convert_unicode_keys
def __call__(self, arg_string, **kwargs):
"""Parses the ``arg_string`` returning a two-tuple
Keyword arguments (``kwargs``) can be used to override the corresponding
attributes of the ArgParser object (see above). However, the
``dialect`` attribute **cannot** be overridden.
"""
kwargs.setdefault('convert_implicit_lists',self.convert_implicit_lists)
kwargs.setdefault('key_func',self.key_func)
kwargs.setdefault('illegal_keys',self.illegal_keys)
kwargs.setdefault('convert_unicode_keys',self.convert_unicode_keys)
return self._parse(arg_string,**kwargs)
def _parse(self,arg_string, convert_implicit_lists, key_func, illegal_keys,
convert_unicode_keys):
frags = fragmentize(arg_string,self.dialect.top_elements,{},{})
positional_args = []
kw_args = {}
for arg in frags:
if isinstance(arg,tuple):
k, v = arg
if convert_unicode_keys:
k = str(k)
if key_func:
k = key_func(k)
if k in illegal_keys:
k = k + '_'
if k in kw_args:
if isinstance(v,list):
try:
kw_args[k].extend(v)
except AttributeError:
v.insert(0,kw_args[k])
kw_args[k] = v
elif isinstance(kw_args[k],list):
kw_args[k].append(v)
else:
kw_args[k] = [kw_args[k], v]
kw_args[k] = ImplicitList(kw_args[k])
else:
kw_args[k] = v
if isinstance(kw_args[k],ImplicitList) and convert_implicit_lists:
kw_args[k] = ' ' .join(kw_args[k])
else:
positional_args.append(arg)
return (positional_args, kw_args)
def fragmentize(text,wiki_elements, element_store, environ, remove_escapes=True):
"""Takes a string of wiki markup and outputs a list of genshi
Fragments (Elements and strings).
This recursive function, with help from the WikiElement objects,
does almost all the parsing.
When no WikiElement objects are supplied, escapes are removed from
``text`` (except if remove_escapes=True) and it is
returned as-is. This is the only way for recursion to stop.
:parameters:
text
the text to be parsed
wiki_elements
list of WikiElement objects to be searched for
environ
object that may by used by macros
remove_escapes
If False, escapes will not be removed
"""
while wiki_elements:
# If the first supplied wiki_element is actually a list of elements, \
# search for all of them and match the closest one only.
if isinstance(wiki_elements[0],(list,tuple)):
x = None
mos = None
for element in wiki_elements[0]:
mo = element.regexp.search(text)
if mo:
if x is None or mo.start() < x:
x,wiki_element,mos = mo.start(),element,[mo]
else:
wiki_element = wiki_elements[0]
mos = [mo for mo in wiki_element.regexp.finditer(text)]
if mos:
frags = wiki_element._process(mos, text, wiki_elements, element_store, environ)
break
else:
wiki_elements = wiki_elements[1:]
# remove escape characters
else:
if remove_escapes:
text = esc_to_remove.sub('',text)
frags = fill_from_store(text,element_store)
return frags
def fill_from_store(text,element_store):
frags = []
mos = place_holder_re.finditer(text)
start = 0
for mo in mos:
if mo.start() > start:
frags.append(text[start:mo.start()])
frags.append(element_store.get(mo.group(1),
mo.group(1).join(['<<<','>>>'])))
start = mo.end()
if start < len(text):
frags.append(text[start:])
return frags
class ImplicitList(list):
"""This class marks argument lists as implicit"""
pass
class AttrDict(dict):
def __getattr__(self, attr):
return self[attr]
class MacroError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
|
kool79/intellij-community | refs/heads/master | python/lib/Lib/encodings/cp875.py | 593 | """ Python Character Mapping Codec cp875 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP875.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp875',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x9c' # 0x04 -> CONTROL
u'\t' # 0x05 -> HORIZONTAL TABULATION
u'\x86' # 0x06 -> CONTROL
u'\x7f' # 0x07 -> DELETE
u'\x97' # 0x08 -> CONTROL
u'\x8d' # 0x09 -> CONTROL
u'\x8e' # 0x0A -> CONTROL
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x9d' # 0x14 -> CONTROL
u'\x85' # 0x15 -> CONTROL
u'\x08' # 0x16 -> BACKSPACE
u'\x87' # 0x17 -> CONTROL
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x92' # 0x1A -> CONTROL
u'\x8f' # 0x1B -> CONTROL
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u'\x80' # 0x20 -> CONTROL
u'\x81' # 0x21 -> CONTROL
u'\x82' # 0x22 -> CONTROL
u'\x83' # 0x23 -> CONTROL
u'\x84' # 0x24 -> CONTROL
u'\n' # 0x25 -> LINE FEED
u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
u'\x1b' # 0x27 -> ESCAPE
u'\x88' # 0x28 -> CONTROL
u'\x89' # 0x29 -> CONTROL
u'\x8a' # 0x2A -> CONTROL
u'\x8b' # 0x2B -> CONTROL
u'\x8c' # 0x2C -> CONTROL
u'\x05' # 0x2D -> ENQUIRY
u'\x06' # 0x2E -> ACKNOWLEDGE
u'\x07' # 0x2F -> BELL
u'\x90' # 0x30 -> CONTROL
u'\x91' # 0x31 -> CONTROL
u'\x16' # 0x32 -> SYNCHRONOUS IDLE
u'\x93' # 0x33 -> CONTROL
u'\x94' # 0x34 -> CONTROL
u'\x95' # 0x35 -> CONTROL
u'\x96' # 0x36 -> CONTROL
u'\x04' # 0x37 -> END OF TRANSMISSION
u'\x98' # 0x38 -> CONTROL
u'\x99' # 0x39 -> CONTROL
u'\x9a' # 0x3A -> CONTROL
u'\x9b' # 0x3B -> CONTROL
u'\x14' # 0x3C -> DEVICE CONTROL FOUR
u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
u'\x9e' # 0x3E -> CONTROL
u'\x1a' # 0x3F -> SUBSTITUTE
u' ' # 0x40 -> SPACE
u'\u0391' # 0x41 -> GREEK CAPITAL LETTER ALPHA
u'\u0392' # 0x42 -> GREEK CAPITAL LETTER BETA
u'\u0393' # 0x43 -> GREEK CAPITAL LETTER GAMMA
u'\u0394' # 0x44 -> GREEK CAPITAL LETTER DELTA
u'\u0395' # 0x45 -> GREEK CAPITAL LETTER EPSILON
u'\u0396' # 0x46 -> GREEK CAPITAL LETTER ZETA
u'\u0397' # 0x47 -> GREEK CAPITAL LETTER ETA
u'\u0398' # 0x48 -> GREEK CAPITAL LETTER THETA
u'\u0399' # 0x49 -> GREEK CAPITAL LETTER IOTA
u'[' # 0x4A -> LEFT SQUARE BRACKET
u'.' # 0x4B -> FULL STOP
u'<' # 0x4C -> LESS-THAN SIGN
u'(' # 0x4D -> LEFT PARENTHESIS
u'+' # 0x4E -> PLUS SIGN
u'!' # 0x4F -> EXCLAMATION MARK
u'&' # 0x50 -> AMPERSAND
u'\u039a' # 0x51 -> GREEK CAPITAL LETTER KAPPA
u'\u039b' # 0x52 -> GREEK CAPITAL LETTER LAMDA
u'\u039c' # 0x53 -> GREEK CAPITAL LETTER MU
u'\u039d' # 0x54 -> GREEK CAPITAL LETTER NU
u'\u039e' # 0x55 -> GREEK CAPITAL LETTER XI
u'\u039f' # 0x56 -> GREEK CAPITAL LETTER OMICRON
u'\u03a0' # 0x57 -> GREEK CAPITAL LETTER PI
u'\u03a1' # 0x58 -> GREEK CAPITAL LETTER RHO
u'\u03a3' # 0x59 -> GREEK CAPITAL LETTER SIGMA
u']' # 0x5A -> RIGHT SQUARE BRACKET
u'$' # 0x5B -> DOLLAR SIGN
u'*' # 0x5C -> ASTERISK
u')' # 0x5D -> RIGHT PARENTHESIS
u';' # 0x5E -> SEMICOLON
u'^' # 0x5F -> CIRCUMFLEX ACCENT
u'-' # 0x60 -> HYPHEN-MINUS
u'/' # 0x61 -> SOLIDUS
u'\u03a4' # 0x62 -> GREEK CAPITAL LETTER TAU
u'\u03a5' # 0x63 -> GREEK CAPITAL LETTER UPSILON
u'\u03a6' # 0x64 -> GREEK CAPITAL LETTER PHI
u'\u03a7' # 0x65 -> GREEK CAPITAL LETTER CHI
u'\u03a8' # 0x66 -> GREEK CAPITAL LETTER PSI
u'\u03a9' # 0x67 -> GREEK CAPITAL LETTER OMEGA
u'\u03aa' # 0x68 -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
u'\u03ab' # 0x69 -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
u'|' # 0x6A -> VERTICAL LINE
u',' # 0x6B -> COMMA
u'%' # 0x6C -> PERCENT SIGN
u'_' # 0x6D -> LOW LINE
u'>' # 0x6E -> GREATER-THAN SIGN
u'?' # 0x6F -> QUESTION MARK
u'\xa8' # 0x70 -> DIAERESIS
u'\u0386' # 0x71 -> GREEK CAPITAL LETTER ALPHA WITH TONOS
u'\u0388' # 0x72 -> GREEK CAPITAL LETTER EPSILON WITH TONOS
u'\u0389' # 0x73 -> GREEK CAPITAL LETTER ETA WITH TONOS
u'\xa0' # 0x74 -> NO-BREAK SPACE
u'\u038a' # 0x75 -> GREEK CAPITAL LETTER IOTA WITH TONOS
u'\u038c' # 0x76 -> GREEK CAPITAL LETTER OMICRON WITH TONOS
u'\u038e' # 0x77 -> GREEK CAPITAL LETTER UPSILON WITH TONOS
u'\u038f' # 0x78 -> GREEK CAPITAL LETTER OMEGA WITH TONOS
u'`' # 0x79 -> GRAVE ACCENT
u':' # 0x7A -> COLON
u'#' # 0x7B -> NUMBER SIGN
u'@' # 0x7C -> COMMERCIAL AT
u"'" # 0x7D -> APOSTROPHE
u'=' # 0x7E -> EQUALS SIGN
u'"' # 0x7F -> QUOTATION MARK
u'\u0385' # 0x80 -> GREEK DIALYTIKA TONOS
u'a' # 0x81 -> LATIN SMALL LETTER A
u'b' # 0x82 -> LATIN SMALL LETTER B
u'c' # 0x83 -> LATIN SMALL LETTER C
u'd' # 0x84 -> LATIN SMALL LETTER D
u'e' # 0x85 -> LATIN SMALL LETTER E
u'f' # 0x86 -> LATIN SMALL LETTER F
u'g' # 0x87 -> LATIN SMALL LETTER G
u'h' # 0x88 -> LATIN SMALL LETTER H
u'i' # 0x89 -> LATIN SMALL LETTER I
u'\u03b1' # 0x8A -> GREEK SMALL LETTER ALPHA
u'\u03b2' # 0x8B -> GREEK SMALL LETTER BETA
u'\u03b3' # 0x8C -> GREEK SMALL LETTER GAMMA
u'\u03b4' # 0x8D -> GREEK SMALL LETTER DELTA
u'\u03b5' # 0x8E -> GREEK SMALL LETTER EPSILON
u'\u03b6' # 0x8F -> GREEK SMALL LETTER ZETA
u'\xb0' # 0x90 -> DEGREE SIGN
u'j' # 0x91 -> LATIN SMALL LETTER J
u'k' # 0x92 -> LATIN SMALL LETTER K
u'l' # 0x93 -> LATIN SMALL LETTER L
u'm' # 0x94 -> LATIN SMALL LETTER M
u'n' # 0x95 -> LATIN SMALL LETTER N
u'o' # 0x96 -> LATIN SMALL LETTER O
u'p' # 0x97 -> LATIN SMALL LETTER P
u'q' # 0x98 -> LATIN SMALL LETTER Q
u'r' # 0x99 -> LATIN SMALL LETTER R
u'\u03b7' # 0x9A -> GREEK SMALL LETTER ETA
u'\u03b8' # 0x9B -> GREEK SMALL LETTER THETA
u'\u03b9' # 0x9C -> GREEK SMALL LETTER IOTA
u'\u03ba' # 0x9D -> GREEK SMALL LETTER KAPPA
u'\u03bb' # 0x9E -> GREEK SMALL LETTER LAMDA
u'\u03bc' # 0x9F -> GREEK SMALL LETTER MU
u'\xb4' # 0xA0 -> ACUTE ACCENT
u'~' # 0xA1 -> TILDE
u's' # 0xA2 -> LATIN SMALL LETTER S
u't' # 0xA3 -> LATIN SMALL LETTER T
u'u' # 0xA4 -> LATIN SMALL LETTER U
u'v' # 0xA5 -> LATIN SMALL LETTER V
u'w' # 0xA6 -> LATIN SMALL LETTER W
u'x' # 0xA7 -> LATIN SMALL LETTER X
u'y' # 0xA8 -> LATIN SMALL LETTER Y
u'z' # 0xA9 -> LATIN SMALL LETTER Z
u'\u03bd' # 0xAA -> GREEK SMALL LETTER NU
u'\u03be' # 0xAB -> GREEK SMALL LETTER XI
u'\u03bf' # 0xAC -> GREEK SMALL LETTER OMICRON
u'\u03c0' # 0xAD -> GREEK SMALL LETTER PI
u'\u03c1' # 0xAE -> GREEK SMALL LETTER RHO
u'\u03c3' # 0xAF -> GREEK SMALL LETTER SIGMA
u'\xa3' # 0xB0 -> POUND SIGN
u'\u03ac' # 0xB1 -> GREEK SMALL LETTER ALPHA WITH TONOS
u'\u03ad' # 0xB2 -> GREEK SMALL LETTER EPSILON WITH TONOS
u'\u03ae' # 0xB3 -> GREEK SMALL LETTER ETA WITH TONOS
u'\u03ca' # 0xB4 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
u'\u03af' # 0xB5 -> GREEK SMALL LETTER IOTA WITH TONOS
u'\u03cc' # 0xB6 -> GREEK SMALL LETTER OMICRON WITH TONOS
u'\u03cd' # 0xB7 -> GREEK SMALL LETTER UPSILON WITH TONOS
u'\u03cb' # 0xB8 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
u'\u03ce' # 0xB9 -> GREEK SMALL LETTER OMEGA WITH TONOS
u'\u03c2' # 0xBA -> GREEK SMALL LETTER FINAL SIGMA
u'\u03c4' # 0xBB -> GREEK SMALL LETTER TAU
u'\u03c5' # 0xBC -> GREEK SMALL LETTER UPSILON
u'\u03c6' # 0xBD -> GREEK SMALL LETTER PHI
u'\u03c7' # 0xBE -> GREEK SMALL LETTER CHI
u'\u03c8' # 0xBF -> GREEK SMALL LETTER PSI
u'{' # 0xC0 -> LEFT CURLY BRACKET
u'A' # 0xC1 -> LATIN CAPITAL LETTER A
u'B' # 0xC2 -> LATIN CAPITAL LETTER B
u'C' # 0xC3 -> LATIN CAPITAL LETTER C
u'D' # 0xC4 -> LATIN CAPITAL LETTER D
u'E' # 0xC5 -> LATIN CAPITAL LETTER E
u'F' # 0xC6 -> LATIN CAPITAL LETTER F
u'G' # 0xC7 -> LATIN CAPITAL LETTER G
u'H' # 0xC8 -> LATIN CAPITAL LETTER H
u'I' # 0xC9 -> LATIN CAPITAL LETTER I
u'\xad' # 0xCA -> SOFT HYPHEN
u'\u03c9' # 0xCB -> GREEK SMALL LETTER OMEGA
u'\u0390' # 0xCC -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
u'\u03b0' # 0xCD -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
u'\u2018' # 0xCE -> LEFT SINGLE QUOTATION MARK
u'\u2015' # 0xCF -> HORIZONTAL BAR
u'}' # 0xD0 -> RIGHT CURLY BRACKET
u'J' # 0xD1 -> LATIN CAPITAL LETTER J
u'K' # 0xD2 -> LATIN CAPITAL LETTER K
u'L' # 0xD3 -> LATIN CAPITAL LETTER L
u'M' # 0xD4 -> LATIN CAPITAL LETTER M
u'N' # 0xD5 -> LATIN CAPITAL LETTER N
u'O' # 0xD6 -> LATIN CAPITAL LETTER O
u'P' # 0xD7 -> LATIN CAPITAL LETTER P
u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
u'R' # 0xD9 -> LATIN CAPITAL LETTER R
u'\xb1' # 0xDA -> PLUS-MINUS SIGN
u'\xbd' # 0xDB -> VULGAR FRACTION ONE HALF
u'\x1a' # 0xDC -> SUBSTITUTE
u'\u0387' # 0xDD -> GREEK ANO TELEIA
u'\u2019' # 0xDE -> RIGHT SINGLE QUOTATION MARK
u'\xa6' # 0xDF -> BROKEN BAR
u'\\' # 0xE0 -> REVERSE SOLIDUS
u'\x1a' # 0xE1 -> SUBSTITUTE
u'S' # 0xE2 -> LATIN CAPITAL LETTER S
u'T' # 0xE3 -> LATIN CAPITAL LETTER T
u'U' # 0xE4 -> LATIN CAPITAL LETTER U
u'V' # 0xE5 -> LATIN CAPITAL LETTER V
u'W' # 0xE6 -> LATIN CAPITAL LETTER W
u'X' # 0xE7 -> LATIN CAPITAL LETTER X
u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
u'\xb2' # 0xEA -> SUPERSCRIPT TWO
u'\xa7' # 0xEB -> SECTION SIGN
u'\x1a' # 0xEC -> SUBSTITUTE
u'\x1a' # 0xED -> SUBSTITUTE
u'\xab' # 0xEE -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xEF -> NOT SIGN
u'0' # 0xF0 -> DIGIT ZERO
u'1' # 0xF1 -> DIGIT ONE
u'2' # 0xF2 -> DIGIT TWO
u'3' # 0xF3 -> DIGIT THREE
u'4' # 0xF4 -> DIGIT FOUR
u'5' # 0xF5 -> DIGIT FIVE
u'6' # 0xF6 -> DIGIT SIX
u'7' # 0xF7 -> DIGIT SEVEN
u'8' # 0xF8 -> DIGIT EIGHT
u'9' # 0xF9 -> DIGIT NINE
u'\xb3' # 0xFA -> SUPERSCRIPT THREE
u'\xa9' # 0xFB -> COPYRIGHT SIGN
u'\x1a' # 0xFC -> SUBSTITUTE
u'\x1a' # 0xFD -> SUBSTITUTE
u'\xbb' # 0xFE -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
ArcherSys/ArcherSys | refs/heads/master | Lib/site-packages/IPython/lib/security.py | 19 | """
Password generation for the IPython notebook.
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import getpass
import hashlib
import random
# Our own
from IPython.core.error import UsageError
from IPython.utils.py3compat import cast_bytes, str_to_bytes
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# Length of the salt in nr of hex chars, which implies salt_len * 4
# bits of randomness.
salt_len = 12
#-----------------------------------------------------------------------------
# Functions
#-----------------------------------------------------------------------------
def passwd(passphrase=None, algorithm='sha1'):
"""Generate hashed password and salt for use in notebook configuration.
In the notebook configuration, set `c.NotebookApp.password` to
the generated string.
Parameters
----------
passphrase : str
Password to hash. If unspecified, the user is asked to input
and verify a password.
algorithm : str
Hashing algorithm to use (e.g, 'sha1' or any argument supported
by :func:`hashlib.new`).
Returns
-------
hashed_passphrase : str
Hashed password, in the format 'hash_algorithm:salt:passphrase_hash'.
Examples
--------
>>> passwd('mypassword')
'sha1:7cf3:b7d6da294ea9592a9480c8f52e63cd42cfb9dd12'
"""
if passphrase is None:
for i in range(3):
p0 = getpass.getpass('Enter password: ')
p1 = getpass.getpass('Verify password: ')
if p0 == p1:
passphrase = p0
break
else:
print('Passwords do not match.')
else:
raise UsageError('No matching passwords found. Giving up.')
h = hashlib.new(algorithm)
salt = ('%0' + str(salt_len) + 'x') % random.getrandbits(4 * salt_len)
h.update(cast_bytes(passphrase, 'utf-8') + str_to_bytes(salt, 'ascii'))
return ':'.join((algorithm, salt, h.hexdigest()))
def passwd_check(hashed_passphrase, passphrase):
"""Verify that a given passphrase matches its hashed version.
Parameters
----------
hashed_passphrase : str
Hashed password, in the format returned by `passwd`.
passphrase : str
Passphrase to validate.
Returns
-------
valid : bool
True if the passphrase matches the hash.
Examples
--------
>>> from IPython.lib.security import passwd_check
>>> passwd_check('sha1:0e112c3ddfce:a68df677475c2b47b6e86d0467eec97ac5f4b85a',
... 'mypassword')
True
>>> passwd_check('sha1:0e112c3ddfce:a68df677475c2b47b6e86d0467eec97ac5f4b85a',
... 'anotherpassword')
False
"""
try:
algorithm, salt, pw_digest = hashed_passphrase.split(':', 2)
except (ValueError, TypeError):
return False
try:
h = hashlib.new(algorithm)
except ValueError:
return False
if len(pw_digest) == 0:
return False
h.update(cast_bytes(passphrase, 'utf-8') + cast_bytes(salt, 'ascii'))
return h.hexdigest() == pw_digest
|
kiriappeee/lemur | refs/heads/master | lemur/default.conf.py | 9 | # This is just Python which means you can inherit and tweak settings
import os
_basedir = os.path.abspath(os.path.dirname(__file__))
ADMINS = frozenset([''])
THREADS_PER_PAGE = 8
# General
# These will need to be set to `True` if you are developing locally
CORS = False
debug = False
# Logging
LOG_LEVEL = "DEBUG"
LOG_FILE = "lemur.log"
|
sathnaga/avocado-vt | refs/heads/master | virttest/qemu_io.py | 3 | import re
import aexpect
from avocado.utils import process
from virttest import utils_misc
from virttest import error_context
class QemuIOParamError(Exception):
"""
Parameter Error for qemu-io command
"""
pass
class QemuIO(object):
"""
A class for execute qemu-io command
"""
def __init__(self, test, params, image_name, blkdebug_cfg="",
prompt=r"qemu-io>\s*$", log_filename=None, io_options="",
log_func=None):
self.type = ""
if log_filename:
log_filename += "-" + utils_misc.generate_random_string(4)
self.output_func = utils_misc.log_line
self.output_params = (log_filename,)
else:
self.output_func = None
self.output_params = ()
self.output_prefix = ""
self.prompt = prompt
self.blkdebug_cfg = blkdebug_cfg
self.qemu_io_cmd = utils_misc.get_qemu_io_binary(params)
self.io_options = io_options
self.run_command = False
self.image_name = image_name
self.blkdebug_cfg = blkdebug_cfg
self.log_func = log_func
def get_cmd_line(self, ignore_option=[], essential_option=[],
forbid_option=[]):
"""
Generate the command line for qemu-io from the parameters
:params ignore_option: list for the options should not in command
:params essential_option: list for the essential options
:params forbid_option: list for the option should not in command
:return: qemu-io command line
"""
essential_flag = False
qemu_io_cmd = self.qemu_io_cmd
if self.io_options:
for io_option in re.split(",", self.io_options):
if io_option in ignore_option:
pass
elif io_option in forbid_option:
raise QemuIOParamError
else:
if not essential_flag and io_option in essential_option:
essential_flag = True
if len(io_option) == 1:
qemu_io_cmd += " -%s" % io_option
else:
qemu_io_cmd += " --%s" % io_option
if essential_option and not essential_flag:
raise QemuIOParamError
if self.image_name:
qemu_io_cmd += " "
if self.blkdebug_cfg:
qemu_io_cmd += "blkdebug:%s:" % self.blkdebug_cfg
qemu_io_cmd += self.image_name
return qemu_io_cmd
def cmd_output(self, command):
"""
Run a command in qemu-io
"""
pass
def close(self):
"""
Clean up
"""
pass
class QemuIOShellSession(QemuIO):
"""
Use a shell session to execute qemu-io command
"""
def __init__(self, test, params, image_name, blkdebug_cfg="",
prompt=r"qemu+-io>\s*$", log_filename=None, io_options="",
log_func=None):
QemuIO.__init__(self, test, params, image_name, blkdebug_cfg, prompt,
log_filename, io_options, log_func)
self.type = "shell"
forbid_option = ["h", "help", "V", "version", "c", "cmd"]
self.qemu_io_cmd = self.get_cmd_line(forbid_option=forbid_option)
self.create_session = True
self.session = None
@error_context.context_aware
def cmd_output(self, command, timeout=60):
"""
Get output from shell session. If the create flag is True, init the
shell session and set the create flag to False.
:param command: command to execute in qemu-io
:param timeout: timeout for execute the command
"""
qemu_io_cmd = self.qemu_io_cmd
prompt = self.prompt
output_func = self.output_func
output_params = self.output_params
output_prefix = self.output_prefix
if self.create_session:
error_context.context(
"Running command: %s" % qemu_io_cmd, self.log_func)
self.session = aexpect.ShellSession(qemu_io_cmd, echo=True,
prompt=prompt,
output_func=output_func,
output_params=output_params,
output_prefix=output_prefix)
# Record the command line in log file
if self.output_func:
params = self.output_params + (qemu_io_cmd, )
self.output_func(*params)
self.create_session = False
# Get the reaction from session
self.session.cmd_output("\n")
error_context.context("Executing command: %s" % command, self.log_func)
return self.session.cmd_output(command, timeout=timeout)
def close(self):
"""
Close the shell session for qemu-io
"""
if not self.create_session:
self.session.close()
class QemuIOSystem(QemuIO):
"""
Run qemu-io with a command line which will return immediately
"""
def __init__(self, test, params, image_name, blkdebug_cfg="",
prompt=r"qemu-io>\s*$", log_filename=None, io_options="",
log_func=None):
QemuIO.__init__(self, test, params, image_name, blkdebug_cfg, prompt,
log_filename, io_options, log_func)
ignore_option = ["c", "cmd"]
essential_option = ["h", "help", "V", "version", "c", "cmd"]
self.qemu_io_cmd = self.get_cmd_line(ignore_option=ignore_option,
essential_option=essential_option)
@error_context.context_aware
def cmd_output(self, command, timeout=60):
"""
Get output from system_output. Add the command to the qemu-io command
line with -c and record the output in the log file.
:param command: command to execute in qemu-io
:param timeout: timeout for execute the command
"""
qemu_io_cmd = self.qemu_io_cmd
if command:
qemu_io_cmd += " -c '%s'" % command
error_context.context(
"Running command: %s" % qemu_io_cmd, self.log_func)
output = process.run(qemu_io_cmd, timeout=timeout).stdout_text
# Record command line in log file
if self.output_func:
params = self.output_params + (qemu_io_cmd,)
self.output_func(*params)
params = self.output_params + (output,)
self.output_func(*params)
return output
def close(self):
"""
To keep the the same interface with QemuIOShellSession
"""
pass
|
masakaya/fxref | refs/heads/master | test/Framework/gtest/test/gtest_shuffle_test.py | 3023 | #!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that test shuffling works."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
# Command to run the gtest_shuffle_test_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_shuffle_test_')
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
TEST_FILTER = 'A*.A:A*.B:C*'
ALL_TESTS = []
ACTIVE_TESTS = []
FILTERED_TESTS = []
SHARDED_TESTS = []
SHUFFLED_ALL_TESTS = []
SHUFFLED_ACTIVE_TESTS = []
SHUFFLED_FILTERED_TESTS = []
SHUFFLED_SHARDED_TESTS = []
def AlsoRunDisabledTestsFlag():
return '--gtest_also_run_disabled_tests'
def FilterFlag(test_filter):
return '--gtest_filter=%s' % (test_filter,)
def RepeatFlag(n):
return '--gtest_repeat=%s' % (n,)
def ShuffleFlag():
return '--gtest_shuffle'
def RandomSeedFlag(n):
return '--gtest_random_seed=%s' % (n,)
def RunAndReturnOutput(extra_env, args):
"""Runs the test program and returns its output."""
environ_copy = os.environ.copy()
environ_copy.update(extra_env)
return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output
def GetTestsForAllIterations(extra_env, args):
"""Runs the test program and returns a list of test lists.
Args:
extra_env: a map from environment variables to their values
args: command line flags to pass to gtest_shuffle_test_
Returns:
A list where the i-th element is the list of tests run in the i-th
test iteration.
"""
test_iterations = []
for line in RunAndReturnOutput(extra_env, args).split('\n'):
if line.startswith('----'):
tests = []
test_iterations.append(tests)
elif line.strip():
tests.append(line.strip()) # 'TestCaseName.TestName'
return test_iterations
def GetTestCases(tests):
"""Returns a list of test cases in the given full test names.
Args:
tests: a list of full test names
Returns:
A list of test cases from 'tests', in their original order.
Consecutive duplicates are removed.
"""
test_cases = []
for test in tests:
test_case = test.split('.')[0]
if not test_case in test_cases:
test_cases.append(test_case)
return test_cases
def CalculateTestLists():
"""Calculates the list of tests run under different flags."""
if not ALL_TESTS:
ALL_TESTS.extend(
GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0])
if not ACTIVE_TESTS:
ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0])
if not FILTERED_TESTS:
FILTERED_TESTS.extend(
GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0])
if not SHARDED_TESTS:
SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[])[0])
if not SHUFFLED_ALL_TESTS:
SHUFFLED_ALL_TESTS.extend(GetTestsForAllIterations(
{}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_ACTIVE_TESTS:
SHUFFLED_ACTIVE_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_FILTERED_TESTS:
SHUFFLED_FILTERED_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)])[0])
if not SHUFFLED_SHARDED_TESTS:
SHUFFLED_SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(1)])[0])
class GTestShuffleUnitTest(gtest_test_utils.TestCase):
"""Tests test shuffling."""
def setUp(self):
CalculateTestLists()
def testShufflePreservesNumberOfTests(self):
self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS))
self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS))
self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS))
self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS))
def testShuffleChangesTestOrder(self):
self.assert_(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS)
self.assert_(SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS)
self.assert_(SHUFFLED_FILTERED_TESTS != FILTERED_TESTS,
SHUFFLED_FILTERED_TESTS)
self.assert_(SHUFFLED_SHARDED_TESTS != SHARDED_TESTS,
SHUFFLED_SHARDED_TESTS)
def testShuffleChangesTestCaseOrder(self):
self.assert_(GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS),
GetTestCases(SHUFFLED_ALL_TESTS))
self.assert_(
GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS),
GetTestCases(SHUFFLED_ACTIVE_TESTS))
self.assert_(
GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS),
GetTestCases(SHUFFLED_FILTERED_TESTS))
self.assert_(
GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS),
GetTestCases(SHUFFLED_SHARDED_TESTS))
def testShuffleDoesNotRepeatTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assertEqual(1, SHUFFLED_ALL_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assertEqual(1, SHUFFLED_ACTIVE_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assertEqual(1, SHUFFLED_FILTERED_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assertEqual(1, SHUFFLED_SHARDED_TESTS.count(test),
'%s appears more than once' % (test,))
def testShuffleDoesNotCreateNewTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assert_(test in ALL_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assert_(test in ACTIVE_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assert_(test in FILTERED_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assert_(test in SHARDED_TESTS, '%s is an invalid test' % (test,))
def testShuffleIncludesAllTests(self):
for test in ALL_TESTS:
self.assert_(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,))
for test in ACTIVE_TESTS:
self.assert_(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,))
for test in FILTERED_TESTS:
self.assert_(test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,))
for test in SHARDED_TESTS:
self.assert_(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,))
def testShuffleLeavesDeathTestsAtFront(self):
non_death_test_found = False
for test in SHUFFLED_ACTIVE_TESTS:
if 'DeathTest.' in test:
self.assert_(not non_death_test_found,
'%s appears after a non-death test' % (test,))
else:
non_death_test_found = True
def _VerifyTestCasesDoNotInterleave(self, tests):
test_cases = []
for test in tests:
[test_case, _] = test.split('.')
if test_cases and test_cases[-1] != test_case:
test_cases.append(test_case)
self.assertEqual(1, test_cases.count(test_case),
'Test case %s is not grouped together in %s' %
(test_case, tests))
def testShuffleDoesNotInterleaveTestCases(self):
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS)
def testShuffleRestoresOrderAfterEachIteration(self):
# Get the test lists in all 3 iterations, using random seed 1, 2,
# and 3 respectively. Google Test picks a different seed in each
# iteration, and this test depends on the current implementation
# picking successive numbers. This dependency is not ideal, but
# makes the test much easier to write.
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
# Make sure running the tests with random seed 1 gets the same
# order as in iteration 1 above.
[tests_with_seed1] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])
self.assertEqual(tests_in_iteration1, tests_with_seed1)
# Make sure running the tests with random seed 2 gets the same
# order as in iteration 2 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 2.
[tests_with_seed2] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(2)])
self.assertEqual(tests_in_iteration2, tests_with_seed2)
# Make sure running the tests with random seed 3 gets the same
# order as in iteration 3 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 3.
[tests_with_seed3] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(3)])
self.assertEqual(tests_in_iteration3, tests_with_seed3)
def testShuffleGeneratesNewOrderInEachIteration(self):
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
self.assert_(tests_in_iteration1 != tests_in_iteration2,
tests_in_iteration1)
self.assert_(tests_in_iteration1 != tests_in_iteration3,
tests_in_iteration1)
self.assert_(tests_in_iteration2 != tests_in_iteration3,
tests_in_iteration2)
def testShuffleShardedTestsPreservesPartition(self):
# If we run M tests on N shards, the same M tests should be run in
# total, regardless of the random seeds used by the shards.
[tests1] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '0'},
[ShuffleFlag(), RandomSeedFlag(1)])
[tests2] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(20)])
[tests3] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '2'},
[ShuffleFlag(), RandomSeedFlag(25)])
sorted_sharded_tests = tests1 + tests2 + tests3
sorted_sharded_tests.sort()
sorted_active_tests = []
sorted_active_tests.extend(ACTIVE_TESTS)
sorted_active_tests.sort()
self.assertEqual(sorted_active_tests, sorted_sharded_tests)
if __name__ == '__main__':
gtest_test_utils.Main()
|
endocode/contrib | refs/heads/master | flannel-server/Godeps/_workspace/src/github.com/ugorji/go/codec/test.py | 1139 | #!/usr/bin/env python
# This will create golden files in a directory passed to it.
# A Test calls this internally to create the golden files
# So it can process them (so we don't have to checkin the files).
# Ensure msgpack-python and cbor are installed first, using:
# sudo apt-get install python-dev
# sudo apt-get install python-pip
# pip install --user msgpack-python msgpack-rpc-python cbor
import cbor, msgpack, msgpackrpc, sys, os, threading
def get_test_data_list():
# get list with all primitive types, and a combo type
l0 = [
-8,
-1616,
-32323232,
-6464646464646464,
192,
1616,
32323232,
6464646464646464,
192,
-3232.0,
-6464646464.0,
3232.0,
6464646464.0,
False,
True,
None,
u"someday",
u"",
u"bytestring",
1328176922000002000,
-2206187877999998000,
270,
-2013855847999995777,
#-6795364578871345152,
]
l1 = [
{ "true": True,
"false": False },
{ "true": "True",
"false": False,
"uint16(1616)": 1616 },
{ "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
"int32":32323232, "bool": True,
"LONG STRING": "123456789012345678901234567890123456789012345678901234567890",
"SHORT STRING": "1234567890" },
{ True: "true", 8: False, "false": 0 }
]
l = []
l.extend(l0)
l.append(l0)
l.extend(l1)
return l
def build_test_data(destdir):
l = get_test_data_list()
for i in range(len(l)):
# packer = msgpack.Packer()
serialized = msgpack.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb')
f.write(serialized)
f.close()
serialized = cbor.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb')
f.write(serialized)
f.close()
def doRpcServer(port, stopTimeSec):
class EchoHandler(object):
def Echo123(self, msg1, msg2, msg3):
return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3))
def EchoStruct(self, msg):
return ("%s" % msg)
addr = msgpackrpc.Address('localhost', port)
server = msgpackrpc.Server(EchoHandler())
server.listen(addr)
# run thread to stop it after stopTimeSec seconds if > 0
if stopTimeSec > 0:
def myStopRpcServer():
server.stop()
t = threading.Timer(stopTimeSec, myStopRpcServer)
t.start()
server.start()
def doRpcClientToPythonSvc(port):
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("Echo123", "A1", "B2", "C3")
print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doRpcClientToGoSvc(port):
# print ">>>> port: ", port, " <<<<<"
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])
print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doMain(args):
if len(args) == 2 and args[0] == "testdata":
build_test_data(args[1])
elif len(args) == 3 and args[0] == "rpc-server":
doRpcServer(int(args[1]), int(args[2]))
elif len(args) == 2 and args[0] == "rpc-client-python-service":
doRpcClientToPythonSvc(int(args[1]))
elif len(args) == 2 and args[0] == "rpc-client-go-service":
doRpcClientToGoSvc(int(args[1]))
else:
print("Usage: test.py " +
"[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
if __name__ == "__main__":
doMain(sys.argv[1:])
|
emonty/ansible-modules-core | refs/heads/devel | packaging/os/apt_key.py | 10 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2012, Jayson Vantuyl <jayson@aggressive.ly>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: apt_key
author: "Jayson Vantuyl & others (@jvantuyl)"
version_added: "1.0"
short_description: Add or remove an apt key
description:
- Add or remove an I(apt) key, optionally downloading it
notes:
- doesn't download the key unless it really needs it
- as a sanity check, downloaded key id must match the one specified
- best practice is to specify the key id and the url
options:
id:
required: false
default: none
description:
- identifier of key. Including this allows check mode to correctly report the changed state.
- "If specifying a subkey's id be aware that apt-key does not understand how to remove keys via a subkey id. Specify the primary key's id instead."
data:
required: false
default: none
description:
- keyfile contents to add to the keyring
file:
required: false
default: none
description:
- path to a keyfile on the remote server to add to the keyring
keyring:
required: false
default: none
description:
- path to specific keyring file in /etc/apt/trusted.gpg.d
version_added: "1.3"
url:
required: false
default: none
description:
- url to retrieve key from.
keyserver:
version_added: "1.6"
required: false
default: none
description:
- keyserver to retrieve key from.
state:
required: false
choices: [ absent, present ]
default: present
description:
- used to specify if key is being added or revoked
validate_certs:
description:
- If C(no), SSL certificates for the target url will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
'''
EXAMPLES = '''
# Add an apt key by id from a keyserver
- apt_key:
keyserver: keyserver.ubuntu.com
id: 36A1D7869245C8950F966E92D8576A8BA88D21E9
# Add an Apt signing key, uses whichever key is at the URL
- apt_key:
url: "https://ftp-master.debian.org/keys/archive-key-6.0.asc"
state: present
# Add an Apt signing key, will not download if present
- apt_key:
id: 473041FA
url: "https://ftp-master.debian.org/keys/archive-key-6.0.asc"
state: present
# Remove an Apt signing key, uses whichever key is at the URL
- apt_key:
url: "https://ftp-master.debian.org/keys/archive-key-6.0.asc"
state: absent
# Remove a Apt specific signing key, leading 0x is valid
- apt_key:
id: 0x473041FA
state: absent
# Add a key from a file on the Ansible server
- apt_key:
data: "{{ lookup('file', 'apt.gpg') }}"
state: present
# Add an Apt signing key to a specific keyring file
- apt_key:
id: 473041FA
url: "https://ftp-master.debian.org/keys/archive-key-6.0.asc"
keyring: /etc/apt/trusted.gpg.d/debian.gpg
# Add Apt signing key on remote server to keyring
- apt_key:
id: 473041FA
file: /tmp/apt.gpg
state: present
'''
# FIXME: standardize into module_common
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import fetch_url
apt_key_bin = None
def find_needed_binaries(module):
global apt_key_bin
apt_key_bin = module.get_bin_path('apt-key', required=True)
### FIXME: Is there a reason that gpg and grep are checked? Is it just
# cruft or does the apt .deb package not require them (and if they're not
# installed, /usr/bin/apt-key fails?)
module.get_bin_path('gpg', required=True)
module.get_bin_path('grep', required=True)
def parse_key_id(key_id):
"""validate the key_id and break it into segments
:arg key_id: The key_id as supplied by the user. A valid key_id will be
8, 16, or more hexadecimal chars with an optional leading ``0x``.
:returns: The portion of key_id suitable for apt-key del, the portion
suitable for comparisons with --list-public-keys, and the portion that
can be used with --recv-key. If key_id is long enough, these will be
the last 8 characters of key_id, the last 16 characters, and all of
key_id. If key_id is not long enough, some of the values will be the
same.
* apt-key del <= 1.10 has a bug with key_id != 8 chars
* apt-key adv --list-public-keys prints 16 chars
* apt-key adv --recv-key can take more chars
"""
# Make sure the key_id is valid hexadecimal
int(key_id, 16)
key_id = key_id.upper()
if key_id.startswith('0X'):
key_id = key_id[2:]
key_id_len = len(key_id)
if (key_id_len != 8 and key_id_len != 16) and key_id_len <= 16:
raise ValueError('key_id must be 8, 16, or 16+ hexadecimal characters in length')
short_key_id = key_id[-8:]
fingerprint = key_id
if key_id_len > 16:
fingerprint = key_id[-16:]
return short_key_id, fingerprint, key_id
def all_keys(module, keyring, short_format):
if keyring:
cmd = "%s --keyring %s adv --list-public-keys --keyid-format=long" % (apt_key_bin, keyring)
else:
cmd = "%s adv --list-public-keys --keyid-format=long" % apt_key_bin
(rc, out, err) = module.run_command(cmd)
results = []
lines = to_native(out).split('\n')
for line in lines:
if line.startswith("pub") or line.startswith("sub"):
tokens = line.split()
code = tokens[1]
(len_type, real_code) = code.split("/")
results.append(real_code)
if short_format:
results = shorten_key_ids(results)
return results
def shorten_key_ids(key_id_list):
"""
Takes a list of key ids, and converts them to the 'short' format,
by reducing them to their last 8 characters.
"""
short = []
for key in key_id_list:
short.append(key[-8:])
return short
def download_key(module, url):
# FIXME: move get_url code to common, allow for in-memory D/L, support proxies
# and reuse here
if url is None:
module.fail_json(msg="needed a URL but was not specified")
try:
rsp, info = fetch_url(module, url)
if info['status'] != 200:
module.fail_json(msg="Failed to download key at %s: %s" % (url, info['msg']))
return rsp.read()
except Exception:
module.fail_json(msg="error getting key id from url: %s" % url, traceback=format_exc())
def import_key(module, keyring, keyserver, key_id):
if keyring:
cmd = "%s --keyring %s adv --keyserver %s --recv %s" % (apt_key_bin, keyring, keyserver, key_id)
else:
cmd = "%s adv --keyserver %s --recv %s" % (apt_key_bin, keyserver, key_id)
for retry in range(5):
lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
(rc, out, err) = module.run_command(cmd, environ_update=lang_env)
if rc == 0:
break
else:
# Out of retries
if rc == 2 and 'not found on keyserver' in out:
msg = 'Key %s not found on keyserver %s' % (key_id, keyserver)
module.fail_json(cmd=cmd, msg=msg)
else:
msg = "Error fetching key %s from keyserver: %s" % (key_id, keyserver)
module.fail_json(cmd=cmd, msg=msg, rc=rc, stdout=out, stderr=err)
return True
def add_key(module, keyfile, keyring, data=None):
if data is not None:
if keyring:
cmd = "%s --keyring %s add -" % (apt_key_bin, keyring)
else:
cmd = "%s add -" % apt_key_bin
(rc, out, err) = module.run_command(cmd, data=data, check_rc=True, binary_data=True)
else:
if keyring:
cmd = "%s --keyring %s add %s" % (apt_key_bin, keyring, keyfile)
else:
cmd = "%s add %s" % (apt_key_bin, keyfile)
(rc, out, err) = module.run_command(cmd, check_rc=True)
return True
def remove_key(module, key_id, keyring):
# FIXME: use module.run_command, fail at point of error and don't discard useful stdin/stdout
if keyring:
cmd = '%s --keyring %s del %s' % (apt_key_bin, keyring, key_id)
else:
cmd = '%s del %s' % (apt_key_bin, key_id)
(rc, out, err) = module.run_command(cmd, check_rc=True)
return True
def main():
module = AnsibleModule(
argument_spec=dict(
id=dict(required=False, default=None),
url=dict(required=False),
data=dict(required=False),
file=dict(required=False, type='path'),
key=dict(required=False),
keyring=dict(required=False, type='path'),
validate_certs=dict(default='yes', type='bool'),
keyserver=dict(required=False),
state=dict(required=False, choices=['present', 'absent'], default='present')
),
supports_check_mode=True,
mutually_exclusive=(('filename', 'keyserver', 'data', 'url'),),
)
key_id = module.params['id']
url = module.params['url']
data = module.params['data']
filename = module.params['file']
keyring = module.params['keyring']
state = module.params['state']
keyserver = module.params['keyserver']
changed = False
fingerprint = short_key_id = key_id
short_format = False
if key_id:
try:
short_key_id, fingerprint, key_id = parse_key_id(key_id)
except ValueError:
module.fail_json(msg='Invalid key_id', id=key_id)
if len(fingerprint) == 8:
short_format = True
find_needed_binaries(module)
keys = all_keys(module, keyring, short_format)
return_values = {}
if state == 'present':
if fingerprint and fingerprint in keys:
module.exit_json(changed=False)
elif fingerprint and fingerprint not in keys and module.check_mode:
### TODO: Someday we could go further -- write keys out to
# a temporary file and then extract the key id from there via gpg
# to decide if the key is installed or not.
module.exit_json(changed=True)
else:
if not filename and not data and not keyserver:
data = download_key(module, url)
if filename:
add_key(module, filename, keyring)
elif keyserver:
import_key(module, keyring, keyserver, key_id)
else:
add_key(module, "-", keyring, data)
changed = False
keys2 = all_keys(module, keyring, short_format)
if len(keys) != len(keys2):
changed=True
if fingerprint and fingerprint not in keys2:
module.fail_json(msg="key does not seem to have been added", id=key_id)
module.exit_json(changed=changed)
elif state == 'absent':
if not key_id:
module.fail_json(msg="key is required")
if fingerprint in keys:
if module.check_mode:
module.exit_json(changed=True)
# we use the "short" id: key_id[-8:], short_format=True
# it's a workaround for https://bugs.launchpad.net/ubuntu/+source/apt/+bug/1481871
if remove_key(module, short_key_id, keyring):
keys = all_keys(module, keyring, short_format)
if fingerprint in keys:
module.fail_json(msg="apt-key del did not return an error but the key was not removed (check that the id is correct and *not* a subkey)", id=key_id)
changed = True
else:
# FIXME: module.fail_json or exit-json immediately at point of failure
module.fail_json(msg="error removing key_id", **return_values)
module.exit_json(changed=changed, **return_values)
if __name__ == '__main__':
main()
|
gordonb3/domoticz | refs/heads/development | plugins/examples/Pinger.py | 14 | # ICMP Plugin
#
# Author: Dnpwwo, 2017 - 2018
#
"""
<plugin key="ICMP" name="Pinger (ICMP)" author="dnpwwo" version="3.1.1">
<description>
ICMP Pinger Plugin.<br/><br/>
Specify comma delimted addresses (IP or DNS names) of devices that are to be pinged.<br/>
When remote devices are found a matching Domoticz device is created in the Devices tab.
</description>
<params>
<param field="Address" label="Address(es) comma separated" width="300px" required="true" default="127.0.0.1"/>
<param field="Mode1" label="Ping Frequency" width="40px">
<options>
<option label="2" value="2"/>
<option label="3" value="3"/>
<option label="4" value="4"/>
<option label="5" value="5"/>
<option label="6" value="6"/>
<option label="8" value="8"/>
<option label="10" value="10" default="true" />
<option label="12" value="12"/>
<option label="14" value="14"/>
<option label="16" value="16"/>
<option label="18" value="18"/>
<option label="20" value="20"/>
</options>
</param>
<param field="Mode5" label="Time Out Lost Devices" width="75px">
<options>
<option label="True" value="True" default="true"/>
<option label="False" value="False" />
</options>
</param>
<param field="Mode6" label="Debug" width="150px">
<options>
<option label="None" value="0" default="true" />
<option label="Python Only" value="2"/>
<option label="Basic Debugging" value="62"/>
<option label="Basic+Messages" value="126"/>
<option label="Connections Only" value="16"/>
<option label="Connections+Python" value="18"/>
<option label="Connections+Queue" value="144"/>
<option label="All" value="-1"/>
</options>
</param>
</params>
</plugin>
"""
import Domoticz
from datetime import datetime
class IcmpDevice:
Address = ""
icmpConn = None
def __init__(self, destination):
self.Address = destination
self.Open()
def __str__(self):
if (self.icmpConn != None):
return str(self.icmpConn)
else:
return "None"
def Open(self):
if (self.icmpConn != None):
self.Close()
self.icmpConn = Domoticz.Connection(Name=self.Address, Transport="ICMP/IP", Protocol="ICMP", Address=self.Address)
self.icmpConn.Listen()
def Send(self):
if (self.icmpConn == None):
self.Open()
else:
self.icmpConn.Send("Domoticz")
def Close(self):
self.icmpConn = None
class BasePlugin:
icmpConn = None
icmpList = []
nextDev = 0
def onStart(self):
if Parameters["Mode6"] != "0":
DumpConfigToLog()
Domoticz.Debugging(int(Parameters["Mode6"]))
Domoticz.Heartbeat(int(Parameters["Mode1"]))
# Find devices that already exist, create those that don't
self.icmpList = Parameters["Address"].replace(" ", "").split(",")
for destination in self.icmpList:
Domoticz.Debug("Endpoint '"+destination+"' found.")
deviceFound = False
for Device in Devices:
if (("Name" in Devices[Device].Options) and (Devices[Device].Options["Name"] == destination)): deviceFound = True
if (deviceFound == False):
Domoticz.Device(Name=destination, Unit=len(Devices)+1, Type=243, Subtype=31, Image=17, Options={"Custom":"1;ms"}).Create()
Domoticz.Device(Name=destination, Unit=len(Devices)+1, Type=17, Subtype=0, Image=17, Options={"Name":destination,"Related":str(len(Devices))}).Create()
# Mark all devices as connection lost if requested
deviceLost = 0
if Parameters["Mode5"] == "True":
deviceLost = 1
for Device in Devices:
UpdateDevice(Device, Devices[Device].nValue, Devices[Device].sValue, deviceLost)
def onConnect(self, Connection, Status, Description):
if (Status == 0):
Domoticz.Log("Successful connect to: "+Connection.Address+" which is surprising because ICMP is connectionless.")
else:
Domoticz.Log("Failed to connect to: "+Connection.Address+", Description: "+Description)
self.icmpConn = None
def onMessage(self, Connection, Data):
Domoticz.Debug("onMessage called for connection: '"+Connection.Name+"'")
if Parameters["Mode6"] == "1":
DumpICMPResponseToLog(Data)
if isinstance(Data, dict) and (Data["Status"] == 0):
iUnit = -1
for Device in Devices:
if ("Name" in Devices[Device].Options):
Domoticz.Debug("Checking: '"+Connection.Name+"' against '"+Devices[Device].Options["Name"]+"'")
if (Devices[Device].Options["Name"] == Connection.Name):
iUnit = Device
break
if (iUnit > 0):
# Device found, set it to On and if elapsed time suplied update related device
UpdateDevice(iUnit, 1, "On", 0)
relatedDevice = int(Devices[iUnit].Options["Related"])
if ("ElapsedMs" in Data):
UpdateDevice(relatedDevice, Data["ElapsedMs"], str(Data["ElapsedMs"]), 0)
else:
Domoticz.Log("Device: '"+Connection.Name+"' returned '"+Data["Description"]+"'.")
if Parameters["Mode6"] == "1":
DumpICMPResponseToLog(Data)
TimedOut = 0
if Parameters["Mode5"] == "True": TimedOut = 1
for Device in Devices:
if (("Name" in Devices[Device].Options) and (Devices[Device].Options["Name"] == Connection.Name)):
UpdateDevice(Device, 0, "Off", TimedOut)
self.icmpConn = None
def onHeartbeat(self):
Domoticz.Debug("Heartbeating...")
# No response to previous heartbeat so mark as Off
if (self.icmpConn != None):
for Device in Devices:
if (("Name" in Devices[Device].Options) and (Devices[Device].Options["Name"] == self.icmpConn.Name)):
Domoticz.Log("Device: '"+Devices[Device].Options["Name"]+"' address '"+self.icmpConn.Address+"' - No response.")
TimedOut = 0
if Parameters["Mode5"] == "True": TimedOut = 1
UpdateDevice(Device, 0, "Off", TimedOut)
break
self.icmpConn = None
Domoticz.Debug("Heartbeating '"+self.icmpList[self.nextDev]+"'")
self.icmpConn = IcmpDevice(self.icmpList[self.nextDev])
self.nextDev += 1
if (self.nextDev >= len(self.icmpList)):
self.nextDev = 0
global _plugin
_plugin = BasePlugin()
def onStart():
global _plugin
_plugin.onStart()
def onConnect(Connection, Status, Description):
global _plugin
_plugin.onConnect(Connection, Status, Description)
def onMessage(Connection, Data):
global _plugin
_plugin.onMessage(Connection, Data)
def onHeartbeat():
global _plugin
_plugin.onHeartbeat()
def UpdateDevice(Unit, nValue, sValue, TimedOut):
# Make sure that the Domoticz device still exists (they can be deleted) before updating it
if (Unit in Devices):
if (Devices[Unit].nValue != nValue) or (Devices[Unit].sValue != sValue) or (Devices[Unit].TimedOut != TimedOut):
Devices[Unit].Update(nValue=nValue, sValue=str(sValue), TimedOut=TimedOut)
Domoticz.Log("Update "+str(nValue)+":'"+str(sValue)+"' ("+Devices[Unit].Name+")")
return
def DumpConfigToLog():
for x in Parameters:
if Parameters[x] != "":
Domoticz.Log( "'" + x + "':'" + str(Parameters[x]) + "'")
Domoticz.Log("Device count: " + str(len(Devices)))
for x in Devices:
Domoticz.Log("Device: " + str(x) + " - " + str(Devices[x]))
Domoticz.Log("Device ID: '" + str(Devices[x].ID) + "'")
Domoticz.Log("Device Name: '" + Devices[x].Name + "'")
Domoticz.Log("Device nValue: " + str(Devices[x].nValue))
Domoticz.Log("Device sValue: '" + Devices[x].sValue + "'")
Domoticz.Log("Device LastLevel: " + str(Devices[x].LastLevel))
return
def DumpICMPResponseToLog(icmpList):
if isinstance(icmpList, dict):
Domoticz.Log("ICMP Details ("+str(len(icmpList))+"):")
for x in icmpList:
if isinstance(icmpList[x], dict):
Domoticz.Log("--->'"+x+" ("+str(len(icmpList[x]))+"):")
for y in icmpList[x]:
Domoticz.Log("------->'" + y + "':'" + str(icmpList[x][y]) + "'")
else:
Domoticz.Log("--->'" + x + "':'" + str(icmpList[x]) + "'")
else:
Domoticz.Log(Data.decode("utf-8", "ignore"))
|
ruud-v-a/rhythmbox | refs/heads/album-artist | plugins/artsearch/artsearch.py | 1 | # -*- Mode: python; coding: utf-8; tab-width: 8; indent-tabs-mode: t; -*-
#
# Copyright (C) 2011 Jonathan Matthew
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# The Rhythmbox authors hereby grant permission for non-GPL compatible
# GStreamer plugins to be used and distributed together with GStreamer
# and Rhythmbox. This permission is above and beyond the permissions granted
# by the GPL license by which Rhythmbox is covered. If you modify this code
# you may extend this exception to your version of the code, but you are not
# obligated to do so. If you do not wish to do so, delete this exception
# statement from your version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
from gi.repository import GObject, Peas, RB, GdkPixbuf
import gettext
gettext.install('rhythmbox', RB.locale_dir())
from songinfo import AlbumArtPage
import oldcache
from lastfm import LastFMSearch
from local import LocalSearch
from musicbrainz import MusicBrainzSearch
from embedded import EmbeddedSearch
class Search(object):
def __init__(self, store, key, last_time, searches):
self.store = store
self.key = key.copy()
self.last_time = last_time
self.searches = searches
def next_search(self):
if len(self.searches) == 0:
key = RB.ExtDBKey.create_storage("album", self.key.get_field("album"))
key.add_field("artist", self.key.get_field("artist"))
self.store.store(key, RB.ExtDBSourceType.NONE, None)
return False
search = self.searches.pop(0)
search.search(self.key, self.last_time, self.store, self.search_done, None)
return True
def search_done(self, args):
self.next_search()
class ArtSearchPlugin (GObject.GObject, Peas.Activatable):
__gtype_name__ = 'ArtSearchPlugin'
object = GObject.property(type=GObject.GObject)
def __init__ (self):
GObject.GObject.__init__ (self)
def do_activate (self):
self.art_store = RB.ExtDB(name="album-art")
self.req_id = self.art_store.connect("request", self.album_art_requested)
shell = self.object
self.csi_id = shell.connect("create_song_info", self.create_song_info)
def do_deactivate (self):
self.art_store.disconnect(self.req_id)
self.req_id = 0
self.art_store = None
shell = self.object
shell.disconnect(self.csi_id)
self.csi_id = 0
def album_art_requested(self, store, key, last_time):
searches = []
if oldcache.USEFUL:
searches.append(oldcache.OldCacheSearch())
searches.append(EmbeddedSearch())
searches.append(LocalSearch())
searches.append(MusicBrainzSearch())
searches.append(LastFMSearch())
s = Search(store, key, last_time, searches)
return s.next_search()
def create_song_info(self, shell, song_info, is_multiple):
if is_multiple is False:
x = AlbumArtPage(shell, song_info)
|
yvaucher/vertical-ngo | refs/heads/8.0 | logistic_requisition/wizard/logistic_requisition_cancel.py | 5 | # -*- coding: utf-8 -*-
#
#
# Author: Guewen Baconnier
# Copyright 2013-2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp import models, fields, api
class LogisticsRequisitionCancel(models.TransientModel):
""" Ask a reason for the logistic requisition cancellation."""
_name = 'logistic.requisition.cancel'
_description = __doc__
reason_id = fields.Many2one('logistic.requisition.cancel.reason',
string='Reason',
required=True)
@api.multi
def confirm_cancel(self):
self.ensure_one()
act_close = {'type': 'ir.actions.act_window_close'}
requisition_ids = self.env.context.get('active_ids')
if requisition_ids is None:
return act_close
reqs = self.env['logistic.requisition'].browse(requisition_ids)
reqs._do_cancel(self.reason_id.id)
return act_close
|
BeegorMif/HTPC-Manager | refs/heads/master | tornado/test/websocket_test.py | 2 | from __future__ import absolute_import, division, print_function, with_statement
import traceback
from tornado.concurrent import Future
from tornado.httpclient import HTTPError, HTTPRequest
from tornado.log import gen_log
from tornado.testing import AsyncHTTPTestCase, gen_test, bind_unused_port, ExpectLog
from tornado.test.util import unittest
from tornado.web import Application, RequestHandler
try:
import tornado.websocket
from tornado.util import _websocket_mask_python
except ImportError:
# The unittest module presents misleading errors on ImportError
# (it acts as if websocket_test could not be found, hiding the underlying
# error). If we get an ImportError here (which could happen due to
# TORNADO_EXTENSION=1), print some extra information before failing.
traceback.print_exc()
raise
from tornado.websocket import WebSocketHandler, websocket_connect, WebSocketError
try:
from tornado import speedups
except ImportError:
speedups = None
class TestWebSocketHandler(WebSocketHandler):
"""Base class for testing handlers that exposes the on_close event.
This allows for deterministic cleanup of the associated socket.
"""
def initialize(self, close_future):
self.close_future = close_future
def on_close(self):
self.close_future.set_result((self.close_code, self.close_reason))
class EchoHandler(TestWebSocketHandler):
def on_message(self, message):
self.write_message(message, isinstance(message, bytes))
class HeaderHandler(TestWebSocketHandler):
def open(self):
try:
# In a websocket context, many RequestHandler methods
# raise RuntimeErrors.
self.set_status(503)
raise Exception("did not get expected exception")
except RuntimeError:
pass
self.write_message(self.request.headers.get('X-Test', ''))
class NonWebSocketHandler(RequestHandler):
def get(self):
self.write('ok')
class CloseReasonHandler(TestWebSocketHandler):
def open(self):
self.close(1001, "goodbye")
class WebSocketTest(AsyncHTTPTestCase):
def get_app(self):
self.close_future = Future()
return Application([
('/echo', EchoHandler, dict(close_future=self.close_future)),
('/non_ws', NonWebSocketHandler),
('/header', HeaderHandler, dict(close_future=self.close_future)),
('/close_reason', CloseReasonHandler,
dict(close_future=self.close_future)),
])
def test_http_request(self):
# WS server, HTTP client.
response = self.fetch('/echo')
self.assertEqual(response.code, 400)
@gen_test
def test_websocket_gen(self):
ws = yield websocket_connect(
'ws://localhost:%d/echo' % self.get_http_port(),
io_loop=self.io_loop)
ws.write_message('hello')
response = yield ws.read_message()
self.assertEqual(response, 'hello')
ws.close()
yield self.close_future
def test_websocket_callbacks(self):
websocket_connect(
'ws://localhost:%d/echo' % self.get_http_port(),
io_loop=self.io_loop, callback=self.stop)
ws = self.wait().result()
ws.write_message('hello')
ws.read_message(self.stop)
response = self.wait().result()
self.assertEqual(response, 'hello')
self.close_future.add_done_callback(lambda f: self.stop())
ws.close()
self.wait()
@gen_test
def test_websocket_http_fail(self):
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(
'ws://localhost:%d/notfound' % self.get_http_port(),
io_loop=self.io_loop)
self.assertEqual(cm.exception.code, 404)
@gen_test
def test_websocket_http_success(self):
with self.assertRaises(WebSocketError):
yield websocket_connect(
'ws://localhost:%d/non_ws' % self.get_http_port(),
io_loop=self.io_loop)
@gen_test
def test_websocket_network_fail(self):
sock, port = bind_unused_port()
sock.close()
with self.assertRaises(IOError):
with ExpectLog(gen_log, ".*"):
yield websocket_connect(
'ws://localhost:%d/' % port,
io_loop=self.io_loop,
connect_timeout=3600)
@gen_test
def test_websocket_close_buffered_data(self):
ws = yield websocket_connect(
'ws://localhost:%d/echo' % self.get_http_port())
ws.write_message('hello')
ws.write_message('world')
ws.stream.close()
yield self.close_future
@gen_test
def test_websocket_headers(self):
# Ensure that arbitrary headers can be passed through websocket_connect.
ws = yield websocket_connect(
HTTPRequest('ws://localhost:%d/header' % self.get_http_port(),
headers={'X-Test': 'hello'}))
response = yield ws.read_message()
self.assertEqual(response, 'hello')
ws.close()
yield self.close_future
@gen_test
def test_server_close_reason(self):
ws = yield websocket_connect(
'ws://localhost:%d/close_reason' % self.get_http_port())
msg = yield ws.read_message()
# A message of None means the other side closed the connection.
self.assertIs(msg, None)
self.assertEqual(ws.close_code, 1001)
self.assertEqual(ws.close_reason, "goodbye")
@gen_test
def test_client_close_reason(self):
ws = yield websocket_connect(
'ws://localhost:%d/echo' % self.get_http_port())
ws.close(1001, 'goodbye')
code, reason = yield self.close_future
self.assertEqual(code, 1001)
self.assertEqual(reason, 'goodbye')
@gen_test
def test_check_origin_valid_no_path(self):
port = self.get_http_port()
url = 'ws://localhost:%d/echo' % port
headers = {'Origin': 'http://localhost:%d' % port}
ws = yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
ws.write_message('hello')
response = yield ws.read_message()
self.assertEqual(response, 'hello')
ws.close()
yield self.close_future
@gen_test
def test_check_origin_valid_with_path(self):
port = self.get_http_port()
url = 'ws://localhost:%d/echo' % port
headers = {'Origin': 'http://localhost:%d/something' % port}
ws = yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
ws.write_message('hello')
response = yield ws.read_message()
self.assertEqual(response, 'hello')
ws.close()
yield self.close_future
@gen_test
def test_check_origin_invalid_partial_url(self):
port = self.get_http_port()
url = 'ws://localhost:%d/echo' % port
headers = {'Origin': 'localhost:%d' % port}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
self.assertEqual(cm.exception.code, 403)
@gen_test
def test_check_origin_invalid(self):
port = self.get_http_port()
url = 'ws://localhost:%d/echo' % port
# Host is localhost, which should not be accessible from some other
# domain
headers = {'Origin': 'http://somewhereelse.com'}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
self.assertEqual(cm.exception.code, 403)
@gen_test
def test_check_origin_invalid_subdomains(self):
port = self.get_http_port()
url = 'ws://localhost:%d/echo' % port
# Subdomains should be disallowed by default. If we could pass a
# resolver to websocket_connect we could test sibling domains as well.
headers = {'Origin': 'http://subtenant.localhost'}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
self.assertEqual(cm.exception.code, 403)
class MaskFunctionMixin(object):
# Subclasses should define self.mask(mask, data)
def test_mask(self):
self.assertEqual(self.mask(b'abcd', b''), b'')
self.assertEqual(self.mask(b'abcd', b'b'), b'\x03')
self.assertEqual(self.mask(b'abcd', b'54321'), b'TVPVP')
self.assertEqual(self.mask(b'ZXCV', b'98765432'), b'c`t`olpd')
# Include test cases with \x00 bytes (to ensure that the C
# extension isn't depending on null-terminated strings) and
# bytes with the high bit set (to smoke out signedness issues).
self.assertEqual(self.mask(b'\x00\x01\x02\x03',
b'\xff\xfb\xfd\xfc\xfe\xfa'),
b'\xff\xfa\xff\xff\xfe\xfb')
self.assertEqual(self.mask(b'\xff\xfb\xfd\xfc',
b'\x00\x01\x02\x03\x04\x05'),
b'\xff\xfa\xff\xff\xfb\xfe')
class PythonMaskFunctionTest(MaskFunctionMixin, unittest.TestCase):
def mask(self, mask, data):
return _websocket_mask_python(mask, data)
@unittest.skipIf(speedups is None, "tornado.speedups module not present")
class CythonMaskFunctionTest(MaskFunctionMixin, unittest.TestCase):
def mask(self, mask, data):
return speedups.websocket_mask(mask, data)
|
Manojkumar91/odoo_inresto | refs/heads/master | addons/subscription/__openerp__.py | 7 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Recurring Documents',
'version': '1.0',
'category': 'Tools',
'description': """
Create recurring documents.
===========================
This module allows to create new documents and add subscriptions on that document.
e.g. To have an invoice generated automatically periodically:
-------------------------------------------------------------
* Define a document type based on Invoice object
* Define a subscription whose source document is the document defined as
above. Specify the interval information and partner to be invoice.
""",
'author': 'OpenERP SA',
'depends': ['base'],
'data': ['security/subcription_security.xml', 'security/ir.model.access.csv', 'subscription_view.xml'],
'demo': ['subscription_demo.xml',],
'installable': True,
'auto_install': False,
}
|
forte916/hello_world | refs/heads/master | python/xor1.py | 1 | #!/usr/bin/env python
# coding: UTF-8
'''
XOR calculation samples.
'''
Enc = lambda plain, key: ''.join([chr(ord(x) ^ key) for x in plain])
Dec = Enc
print(Dec(Enc("abcd", 0x4f), 0x4f)) # => "abcd"
|
srene/ndnSIM-inrpp | refs/heads/master | waf-tools/clang_compilation_database.py | 99 | #!/usr/bin/env python
# encoding: utf-8
# Christoph Koke, 2013
"""
Writes the c and cpp compile commands into build/compile_commands.json
see http://clang.llvm.org/docs/JSONCompilationDatabase.html
Usage:
def configure(conf):
conf.load('compiler_cxx')
...
conf.load('clang_compilation_database')
"""
import sys, os, json, shlex, pipes
from waflib import Logs, TaskGen
from waflib.Tools import c, cxx
if sys.hexversion >= 0x3030000:
quote = shlex.quote
else:
quote = pipes.quote
@TaskGen.feature('*')
@TaskGen.after_method('process_use')
def collect_compilation_db_tasks(self):
"Add a compilation database entry for compiled tasks"
try:
clang_db = self.bld.clang_compilation_database_tasks
except AttributeError:
clang_db = self.bld.clang_compilation_database_tasks = []
self.bld.add_post_fun(write_compilation_database)
for task in getattr(self, 'compiled_tasks', []):
if isinstance(task, (c.c, cxx.cxx)):
clang_db.append(task)
def write_compilation_database(ctx):
"Write the clang compilation database as JSON"
database_file = ctx.bldnode.make_node('compile_commands.json')
Logs.info("Build commands will be stored in %s" % database_file.path_from(ctx.path))
try:
root = json.load(database_file)
except IOError:
root = []
clang_db = dict((x["file"], x) for x in root)
for task in getattr(ctx, 'clang_compilation_database_tasks', []):
try:
cmd = task.last_cmd
except AttributeError:
continue
directory = getattr(task, 'cwd', ctx.variant_dir)
f_node = task.inputs[0]
filename = os.path.relpath(f_node.abspath(), directory)
cmd = " ".join(map(quote, cmd))
entry = {
"directory": directory,
"command": cmd,
"file": filename,
}
clang_db[filename] = entry
root = list(clang_db.values())
database_file.write(json.dumps(root, indent=2))
|
ulope/django | refs/heads/master | tests/i18n/patterns/urls/path_unused.py | 47 | from django.conf.urls import url
from django.views.generic import TemplateView
view = TemplateView.as_view(template_name='dummy.html')
urlpatterns = [
url(r'^nl/foo/', view, name='not-translated'),
]
|
AutorestCI/azure-sdk-for-python | refs/heads/master | azure-mgmt-network/azure/mgmt/network/v2017_03_01/models/application_gateway_path_rule.py | 1 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class ApplicationGatewayPathRule(SubResource):
"""Path rule of URL path map of an application gateway.
:param id: Resource ID.
:type id: str
:param paths: Path rules of URL path map.
:type paths: list[str]
:param backend_address_pool: Backend address pool resource of URL path
map.
:type backend_address_pool:
~azure.mgmt.network.v2017_03_01.models.SubResource
:param backend_http_settings: Backend http settings resource of URL path
map.
:type backend_http_settings:
~azure.mgmt.network.v2017_03_01.models.SubResource
:param provisioning_state: Path rule of URL path map resource. Possible
values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Name of the resource that is unique within a resource group.
This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'paths': {'key': 'properties.paths', 'type': '[str]'},
'backend_address_pool': {'key': 'properties.backendAddressPool', 'type': 'SubResource'},
'backend_http_settings': {'key': 'properties.backendHttpSettings', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, paths=None, backend_address_pool=None, backend_http_settings=None, provisioning_state=None, name=None, etag=None):
super(ApplicationGatewayPathRule, self).__init__(id=id)
self.paths = paths
self.backend_address_pool = backend_address_pool
self.backend_http_settings = backend_http_settings
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
|
RaviTezu/yowsup | refs/heads/master | yowsup/demos/echoclient/stack.py | 27 | from yowsup.stacks import YowStack
from .layer import EchoLayer
from yowsup.layers import YowLayerEvent
from yowsup.layers.auth import YowCryptLayer, YowAuthenticationProtocolLayer, AuthError
from yowsup.layers.coder import YowCoderLayer
from yowsup.layers.network import YowNetworkLayer
from yowsup.layers.protocol_messages import YowMessagesProtocolLayer
from yowsup.layers.protocol_media import YowMediaProtocolLayer
from yowsup.layers.stanzaregulator import YowStanzaRegulator
from yowsup.layers.protocol_receipts import YowReceiptProtocolLayer
from yowsup.layers.protocol_acks import YowAckProtocolLayer
from yowsup.layers.logger import YowLoggerLayer
from yowsup.layers.protocol_iq import YowIqProtocolLayer
from yowsup.layers.protocol_calls import YowCallsProtocolLayer
from yowsup.common import YowConstants
from yowsup import env
class YowsupEchoStack(object):
def __init__(self, credentials, encryptionEnabled = False):
if encryptionEnabled:
from yowsup.layers.axolotl import YowAxolotlLayer
layers = (
EchoLayer,
(YowAuthenticationProtocolLayer, YowMessagesProtocolLayer, YowReceiptProtocolLayer, YowAckProtocolLayer, YowMediaProtocolLayer, YowIqProtocolLayer, YowCallsProtocolLayer),
YowAxolotlLayer,
YowLoggerLayer,
YowCoderLayer,
YowCryptLayer,
YowStanzaRegulator,
YowNetworkLayer
)
else:
layers = (
EchoLayer,
(YowAuthenticationProtocolLayer, YowMessagesProtocolLayer, YowReceiptProtocolLayer, YowAckProtocolLayer, YowMediaProtocolLayer, YowIqProtocolLayer, YowCallsProtocolLayer),
YowLoggerLayer,
YowCoderLayer,
YowCryptLayer,
YowStanzaRegulator,
YowNetworkLayer
)
self.stack = YowStack(layers)
self.stack.setCredentials(credentials)
def start(self):
self.stack.broadcastEvent(YowLayerEvent(YowNetworkLayer.EVENT_STATE_CONNECT))
try:
self.stack.loop()
except AuthError as e:
print("Authentication Error: %s" % e.message)
|
agry/NGECore2 | refs/heads/master | scripts/mobiles/dathomir/cavern_spider_broodling.py | 2 | import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('cave_gaping_spider_broodling')
mobileTemplate.setLevel(77)
mobileTemplate.setDifficulty(Difficulty.ELITE)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(True)
mobileTemplate.setScale(1)
mobileTemplate.setMeatType("Insect Meat")
mobileTemplate.setMeatAmount(35)
mobileTemplate.setSocialGroup("spider nightsister")
mobileTemplate.setAssistRange(12)
mobileTemplate.setStalker(True)
mobileTemplate.setOptionsBitmask(Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_gaping_spider_broodling.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
attacks.add('bm_damage_poison_5')
attacks.add('bm_defensive_5')
attacks.add('bm_puncture_3')
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('cavern_spider_broodling', mobileTemplate)
return |
isyippee/ceilometer | refs/heads/master | ceilometer/tests/unit/compute/virt/vmware/test_vsphere_operations.py | 12 | # Copyright (c) 2014 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_vmware import api
from oslotest import base
from ceilometer.compute.virt.vmware import vsphere_operations
class VsphereOperationsTest(base.BaseTestCase):
def setUp(self):
api_session = api.VMwareAPISession("test_server", "test_user",
"test_password", 0, None,
create_session=False)
api_session._vim = mock.MagicMock()
self._vsphere_ops = vsphere_operations.VsphereOperations(api_session,
1000)
super(VsphereOperationsTest, self).setUp()
def test_get_vm_moid(self):
vm1_moid = "vm-1"
vm2_moid = "vm-2"
vm1_instance = "0a651a71-142c-4813-aaa6-42e5d5c80d85"
vm2_instance = "db1d2533-6bef-4cb2-aef3-920e109f5693"
def construct_mock_vm_object(vm_moid, vm_instance):
vm_object = mock.MagicMock()
vm_object.obj.value = vm_moid
vm_object.propSet[0].val = vm_instance
return vm_object
def retrieve_props_side_effect(pc, specSet, options):
# assert inputs
self.assertEqual(self._vsphere_ops._max_objects,
options.maxObjects)
self.assertEqual(vsphere_operations.VM_INSTANCE_ID_PROPERTY,
specSet[0].pathSet[0])
# mock return result
vm1 = construct_mock_vm_object(vm1_moid, vm1_instance)
vm2 = construct_mock_vm_object(vm2_moid, vm2_instance)
result = mock.MagicMock()
result.objects.__iter__.return_value = [vm1, vm2]
return result
vim_mock = self._vsphere_ops._api_session._vim
vim_mock.RetrievePropertiesEx.side_effect = retrieve_props_side_effect
vim_mock.ContinueRetrievePropertiesEx.return_value = None
vm_moid = self._vsphere_ops.get_vm_moid(vm1_instance)
self.assertEqual(vm1_moid, vm_moid)
vm_moid = self._vsphere_ops.get_vm_moid(vm2_instance)
self.assertEqual(vm2_moid, vm_moid)
def test_query_vm_property(self):
vm_moid = "vm-21"
vm_property_name = "runtime.powerState"
vm_property_val = "poweredON"
def retrieve_props_side_effect(pc, specSet, options):
# assert inputs
self.assertEqual(vm_moid, specSet[0].obj.value)
self.assertEqual(vm_property_name, specSet[0].pathSet[0])
# mock return result
result = mock.MagicMock()
result.objects[0].propSet[0].val = vm_property_val
return result
vim_mock = self._vsphere_ops._api_session._vim
vim_mock.RetrievePropertiesEx.side_effect = retrieve_props_side_effect
actual_val = self._vsphere_ops.query_vm_property(vm_moid,
vm_property_name)
self.assertEqual(vm_property_val, actual_val)
def test_get_perf_counter_id(self):
def construct_mock_counter_info(group_name, counter_name, rollup_type,
counter_id):
counter_info = mock.MagicMock()
counter_info.groupInfo.key = group_name
counter_info.nameInfo.key = counter_name
counter_info.rollupType = rollup_type
counter_info.key = counter_id
return counter_info
def retrieve_props_side_effect(pc, specSet, options):
# assert inputs
self.assertEqual(vsphere_operations.PERF_COUNTER_PROPERTY,
specSet[0].pathSet[0])
# mock return result
counter_info1 = construct_mock_counter_info("a", "b", "c", 1)
counter_info2 = construct_mock_counter_info("x", "y", "z", 2)
result = mock.MagicMock()
(result.objects[0].propSet[0].val.PerfCounterInfo.__iter__.
return_value) = [counter_info1, counter_info2]
return result
vim_mock = self._vsphere_ops._api_session._vim
vim_mock.RetrievePropertiesEx.side_effect = retrieve_props_side_effect
counter_id = self._vsphere_ops.get_perf_counter_id("a:b:c")
self.assertEqual(1, counter_id)
counter_id = self._vsphere_ops.get_perf_counter_id("x:y:z")
self.assertEqual(2, counter_id)
def test_query_vm_stats(self):
vm_moid = "vm-21"
device1 = "device-1"
device2 = "device-2"
device3 = "device-3"
counter_id = 5
def construct_mock_metric_series(device_name, stat_values):
metric_series = mock.MagicMock()
metric_series.value = stat_values
metric_series.id.instance = device_name
return metric_series
def vim_query_perf_side_effect(perf_manager, querySpec):
# assert inputs
self.assertEqual(vm_moid, querySpec[0].entity.value)
self.assertEqual(counter_id, querySpec[0].metricId[0].counterId)
self.assertEqual(vsphere_operations.VC_REAL_TIME_SAMPLING_INTERVAL,
querySpec[0].intervalId)
# mock return result
perf_stats = mock.MagicMock()
perf_stats[0].sampleInfo = ["s1", "s2", "s3"]
perf_stats[0].value.__iter__.return_value = [
construct_mock_metric_series(None, [111, 222, 333]),
construct_mock_metric_series(device1, [100, 200, 300]),
construct_mock_metric_series(device2, [10, 20, 30]),
construct_mock_metric_series(device3, [1, 2, 3])
]
return perf_stats
vim_mock = self._vsphere_ops._api_session._vim
vim_mock.QueryPerf.side_effect = vim_query_perf_side_effect
ops = self._vsphere_ops
# test aggregate stat
stat_val = ops.query_vm_aggregate_stats(vm_moid, counter_id, 60)
self.assertEqual(222, stat_val)
# test per-device(non-aggregate) stats
expected_device_stats = {
device1: 200,
device2: 20,
device3: 2
}
stats = ops.query_vm_device_stats(vm_moid, counter_id, 60)
self.assertEqual(expected_device_stats, stats)
|
kschwank/paperwork | refs/heads/stable | src/paperwork/paperwork.py | 1 | #!/usr/bin/env python
# Paperwork - Using OCR to grep dead trees the easy way
# Copyright (C) 2012-2014 Jerome Flesch
#
# Paperwork is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Paperwork is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Paperwork. If not, see <http://www.gnu.org/licenses/>.
"""
Bootstrapping code
"""
import os
import gettext
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('Poppler', '0.18')
gi.require_version('PangoCairo', '1.0')
from gi.repository import GObject
from gi.repository import Gtk
from gi.repository import GLib
import locale
import logging
import signal
from frontend.mainwindow import ActionRefreshIndex, MainWindow
from frontend.util.config import load_config
logger = logging.getLogger(__name__)
LOCALE_PATHS = [
# French
('locale/fr/LC_MESSAGES/paperwork.mo', 'locale'),
('/usr/local/share/locale/fr/LC_MESSAGES/paperwork.mo',
'/usr/local/share/locale'),
('/usr/share/locale/fr/LC_MESSAGES/paperwork.mo', '/usr/share/locale'),
# German
('locale/de/LC_MESSAGES/paperwork.mo', 'locale'),
('/usr/local/share/locale/de/LC_MESSAGES/paperwork.mo',
'/usr/local/share/locale'),
('/usr/share/locale/de/LC_MESSAGES/paperwork.mo', '/usr/share/locale'),
]
def init_logging():
formatter = logging.Formatter(
'%(levelname)-6s %(name)-30s %(message)s')
handler = logging.StreamHandler()
logger = logging.getLogger()
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel({
"DEBUG": logging.DEBUG,
"INFO": logging.INFO,
"WARNING": logging.WARNING,
"ERROR": logging.ERROR,
}[os.getenv("PAPERWORK_VERBOSE", "INFO")])
def set_locale():
"""
Enable locale support
"""
locale.setlocale(locale.LC_ALL, '')
got_locales = False
locales_path = None
for (fr_locale_path, locales_path) in LOCALE_PATHS:
logger.info("Looking for locales in '%s' ..." % (fr_locale_path))
if os.access(fr_locale_path, os.R_OK):
logger.info("Will use locales from '%s'" % (locales_path))
got_locales = True
break
if not got_locales:
logger.warning("WARNING: Locales not found")
else:
for module in (gettext, locale):
module.bindtextdomain('paperwork', locales_path)
module.textdomain('paperwork')
def main():
"""
Where everything start.
"""
init_logging()
set_locale()
GObject.threads_init()
if hasattr(GLib, "unix_signal_add"):
GLib.unix_signal_add(GLib.PRIORITY_DEFAULT, signal.SIGINT,
Gtk.main_quit, None)
GLib.unix_signal_add(GLib.PRIORITY_DEFAULT, signal.SIGTERM,
Gtk.main_quit, None)
try:
config = load_config()
config.read()
main_win = MainWindow(config)
ActionRefreshIndex(main_win, config).do()
Gtk.main()
for scheduler in main_win.schedulers.values():
scheduler.stop()
config.write()
finally:
logger.info("Good bye")
if __name__ == "__main__":
main()
|
wakatime/sublime-wakatime | refs/heads/master | packages/wakatime/session_cache.py | 3 | # -*- coding: utf-8 -*-
"""
wakatime.session_cache
~~~~~~~~~~~~~~~~~~~~~~
Persist requests.Session for multiprocess SSL handshake pooling.
:copyright: (c) 2015 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
import logging
import os
import pickle
import sys
try:
import sqlite3
HAS_SQL = True
except ImportError: # pragma: nocover
HAS_SQL = False
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'packages'))
from .packages import requests
log = logging.getLogger('WakaTime')
class SessionCache(object):
db_file = '.wakatime.db'
table_name = 'session'
def connect(self):
conn = sqlite3.connect(self._get_db_file(), isolation_level=None)
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS {0} (
value BLOB)
'''.format(self.table_name))
return (conn, c)
def save(self, session):
"""Saves a requests.Session object for the next heartbeat process.
"""
if not HAS_SQL: # pragma: nocover
return
try:
conn, c = self.connect()
c.execute('DELETE FROM {0}'.format(self.table_name))
values = {
'value': sqlite3.Binary(pickle.dumps(session, protocol=2)),
}
c.execute('INSERT INTO {0} VALUES (:value)'.format(self.table_name), values)
conn.commit()
except: # pragma: nocover
log.traceback(logging.DEBUG)
try:
conn.close()
except: # pragma: nocover
pass
def get(self):
"""Returns a requests.Session object.
Gets Session from sqlite3 cache or creates a new Session.
"""
if not HAS_SQL: # pragma: nocover
return requests.session()
try:
conn, c = self.connect()
except:
log.traceback(logging.DEBUG)
return requests.session()
session = None
try:
c.execute('BEGIN IMMEDIATE')
c.execute('SELECT value FROM {0} LIMIT 1'.format(self.table_name))
row = c.fetchone()
if row is not None:
session = pickle.loads(row[0])
except: # pragma: nocover
log.traceback(logging.DEBUG)
try:
conn.close()
except: # pragma: nocover
pass
return session if session is not None else requests.session()
def delete(self):
"""Clears all cached Session objects.
"""
if not HAS_SQL: # pragma: nocover
return
try:
conn, c = self.connect()
c.execute('DELETE FROM {0}'.format(self.table_name))
conn.commit()
except:
log.traceback(logging.DEBUG)
try:
conn.close()
except: # pragma: nocover
pass
def _get_db_file(self):
home = '~'
if os.environ.get('WAKATIME_HOME'):
home = os.environ.get('WAKATIME_HOME')
return os.path.join(os.path.expanduser(home), '.wakatime.db')
|
wencongyang/qemu-colo | refs/heads/master | tests/image-fuzzer/qcow2/__init__.py | 95 | from layout import create_image
|
sauloal/pycluster | refs/heads/master | pypy-1.9_64/lib-python/2.7/test/test_smtplib.py | 20 | import asyncore
import email.utils
import socket
import smtpd
import smtplib
import StringIO
import sys
import time
import select
import unittest
from test import test_support
try:
import threading
except ImportError:
threading = None
HOST = test_support.HOST
def server(evt, buf, serv):
serv.listen(5)
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 500
while buf and n > 0:
r, w, e = select.select([], [conn], [])
if w:
sent = conn.send(buf)
buf = buf[sent:]
n -= 1
conn.close()
finally:
serv.close()
evt.set()
@unittest.skipUnless(threading, 'Threading required for this test.')
class GeneralTests(unittest.TestCase):
def setUp(self):
self._threads = test_support.threading_setup()
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(15)
self.port = test_support.bind_port(self.sock)
servargs = (self.evt, "220 Hola mundo\n", self.sock)
self.thread = threading.Thread(target=server, args=servargs)
self.thread.start()
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
self.thread.join()
test_support.threading_cleanup(*self._threads)
def testBasic1(self):
# connects
smtp = smtplib.SMTP(HOST, self.port)
smtp.close()
def testBasic2(self):
# connects, include port in host name
smtp = smtplib.SMTP("%s:%s" % (HOST, self.port))
smtp.close()
def testLocalHostName(self):
# check that supplied local_hostname is used
smtp = smtplib.SMTP(HOST, self.port, local_hostname="testhost")
self.assertEqual(smtp.local_hostname, "testhost")
smtp.close()
def testTimeoutDefault(self):
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
smtp = smtplib.SMTP(HOST, self.port)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
def testTimeoutNone(self):
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
smtp = smtplib.SMTP(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(smtp.sock.gettimeout() is None)
smtp.close()
def testTimeoutValue(self):
smtp = smtplib.SMTP(HOST, self.port, timeout=30)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
# Test server thread using the specified SMTP server class
def debugging_server(serv, serv_evt, client_evt):
serv_evt.set()
try:
if hasattr(select, 'poll'):
poll_fun = asyncore.poll2
else:
poll_fun = asyncore.poll
n = 1000
while asyncore.socket_map and n > 0:
poll_fun(0.01, asyncore.socket_map)
# when the client conversation is finished, it will
# set client_evt, and it's then ok to kill the server
if client_evt.is_set():
serv.close()
break
n -= 1
except socket.timeout:
pass
finally:
if not client_evt.is_set():
# allow some time for the client to read the result
time.sleep(0.5)
serv.close()
asyncore.close_all()
serv_evt.set()
MSG_BEGIN = '---------- MESSAGE FOLLOWS ----------\n'
MSG_END = '------------ END MESSAGE ------------\n'
# NOTE: Some SMTP objects in the tests below are created with a non-default
# local_hostname argument to the constructor, since (on some systems) the FQDN
# lookup caused by the default local_hostname sometimes takes so long that the
# test server times out, causing the test to fail.
# Test behavior of smtpd.DebuggingServer
@unittest.skipUnless(threading, 'Threading required for this test.')
class DebuggingServerTests(unittest.TestCase):
def setUp(self):
# temporarily replace sys.stdout to capture DebuggingServer output
self.old_stdout = sys.stdout
self.output = StringIO.StringIO()
sys.stdout = self.output
self._threads = test_support.threading_setup()
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = smtpd.DebuggingServer((HOST, 0), ('nowhere', -1))
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
test_support.threading_cleanup(*self._threads)
# restore sys.stdout
sys.stdout = self.old_stdout
def testBasic(self):
# connect
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.quit()
def testNOOP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, 'Ok')
self.assertEqual(smtp.noop(), expected)
smtp.quit()
def testRSET(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, 'Ok')
self.assertEqual(smtp.rset(), expected)
smtp.quit()
def testNotImplemented(self):
# EHLO isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (502, 'Error: command "EHLO" not implemented')
self.assertEqual(smtp.ehlo(), expected)
smtp.quit()
def testVRFY(self):
# VRFY isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (502, 'Error: command "VRFY" not implemented')
self.assertEqual(smtp.vrfy('nobody@nowhere.com'), expected)
self.assertEqual(smtp.verify('nobody@nowhere.com'), expected)
smtp.quit()
def testSecondHELO(self):
# check that a second HELO returns a message that it's a duplicate
# (this behavior is specific to smtpd.SMTPChannel)
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.helo()
expected = (503, 'Duplicate HELO/EHLO')
self.assertEqual(smtp.helo(), expected)
smtp.quit()
def testHELP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.assertEqual(smtp.help(), 'Error: command "HELP" not implemented')
smtp.quit()
def testSend(self):
# connect and send mail
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX(nnorwitz): this test is flaky and dies with a bad file descriptor
# in asyncore. This sleep might help, but should really be fixed
# properly by using an Event variable.
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
class NonConnectingTests(unittest.TestCase):
def testNotConnected(self):
# Test various operations on an unconnected SMTP object that
# should raise exceptions (at present the attempt in SMTP.send
# to reference the nonexistent 'sock' attribute of the SMTP object
# causes an AttributeError)
smtp = smtplib.SMTP()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.ehlo)
self.assertRaises(smtplib.SMTPServerDisconnected,
smtp.send, 'test msg')
def testNonnumericPort(self):
# check that non-numeric port raises socket.error
self.assertRaises(socket.error, smtplib.SMTP,
"localhost", "bogus")
self.assertRaises(socket.error, smtplib.SMTP,
"localhost:bogus")
# test response of client to a non-successful HELO message
@unittest.skipUnless(threading, 'Threading required for this test.')
class BadHELOServerTests(unittest.TestCase):
def setUp(self):
self.old_stdout = sys.stdout
self.output = StringIO.StringIO()
sys.stdout = self.output
self._threads = test_support.threading_setup()
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(15)
self.port = test_support.bind_port(self.sock)
servargs = (self.evt, "199 no hello for you!\n", self.sock)
self.thread = threading.Thread(target=server, args=servargs)
self.thread.start()
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
self.thread.join()
test_support.threading_cleanup(*self._threads)
sys.stdout = self.old_stdout
def testFailingHELO(self):
self.assertRaises(smtplib.SMTPConnectError, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
sim_users = {'Mr.A@somewhere.com':'John A',
'Ms.B@somewhere.com':'Sally B',
'Mrs.C@somewhereesle.com':'Ruth C',
}
sim_auth = ('Mr.A@somewhere.com', 'somepassword')
sim_cram_md5_challenge = ('PENCeUxFREJoU0NnbmhNWitOMjNGNn'
'dAZWx3b29kLmlubm9zb2Z0LmNvbT4=')
sim_auth_credentials = {
'login': 'TXIuQUBzb21ld2hlcmUuY29t',
'plain': 'AE1yLkFAc29tZXdoZXJlLmNvbQBzb21lcGFzc3dvcmQ=',
'cram-md5': ('TXIUQUBZB21LD2HLCMUUY29TIDG4OWQ0MJ'
'KWZGQ4ODNMNDA4NTGXMDRLZWMYZJDMODG1'),
}
sim_auth_login_password = 'C29TZXBHC3N3B3JK'
sim_lists = {'list-1':['Mr.A@somewhere.com','Mrs.C@somewhereesle.com'],
'list-2':['Ms.B@somewhere.com',],
}
# Simulated SMTP channel & server
class SimSMTPChannel(smtpd.SMTPChannel):
def __init__(self, extra_features, *args, **kw):
self._extrafeatures = ''.join(
[ "250-{0}\r\n".format(x) for x in extra_features ])
smtpd.SMTPChannel.__init__(self, *args, **kw)
def smtp_EHLO(self, arg):
resp = ('250-testhost\r\n'
'250-EXPN\r\n'
'250-SIZE 20000000\r\n'
'250-STARTTLS\r\n'
'250-DELIVERBY\r\n')
resp = resp + self._extrafeatures + '250 HELP'
self.push(resp)
def smtp_VRFY(self, arg):
raw_addr = email.utils.parseaddr(arg)[1]
quoted_addr = smtplib.quoteaddr(arg)
if raw_addr in sim_users:
self.push('250 %s %s' % (sim_users[raw_addr], quoted_addr))
else:
self.push('550 No such user: %s' % arg)
def smtp_EXPN(self, arg):
list_name = email.utils.parseaddr(arg)[1].lower()
if list_name in sim_lists:
user_list = sim_lists[list_name]
for n, user_email in enumerate(user_list):
quoted_addr = smtplib.quoteaddr(user_email)
if n < len(user_list) - 1:
self.push('250-%s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('250 %s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('550 No access for you!')
def smtp_AUTH(self, arg):
if arg.strip().lower()=='cram-md5':
self.push('334 {0}'.format(sim_cram_md5_challenge))
return
mech, auth = arg.split()
mech = mech.lower()
if mech not in sim_auth_credentials:
self.push('504 auth type unimplemented')
return
if mech == 'plain' and auth==sim_auth_credentials['plain']:
self.push('235 plain auth ok')
elif mech=='login' and auth==sim_auth_credentials['login']:
self.push('334 Password:')
else:
self.push('550 No access for you!')
def handle_error(self):
raise
class SimSMTPServer(smtpd.SMTPServer):
def __init__(self, *args, **kw):
self._extra_features = []
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accept(self):
conn, addr = self.accept()
self._SMTPchannel = SimSMTPChannel(self._extra_features,
self, conn, addr)
def process_message(self, peer, mailfrom, rcpttos, data):
pass
def add_feature(self, feature):
self._extra_features.append(feature)
def handle_error(self):
raise
# Test various SMTP & ESMTP commands/behaviors that require a simulated server
# (i.e., something with more features than DebuggingServer)
@unittest.skipUnless(threading, 'Threading required for this test.')
class SMTPSimTests(unittest.TestCase):
def setUp(self):
self._threads = test_support.threading_setup()
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPServer((HOST, 0), ('nowhere', -1))
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
test_support.threading_cleanup(*self._threads)
def testBasic(self):
# smoke test
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.quit()
def testEHLO(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
# no features should be present before the EHLO
self.assertEqual(smtp.esmtp_features, {})
# features expected from the test server
expected_features = {'expn':'',
'size': '20000000',
'starttls': '',
'deliverby': '',
'help': '',
}
smtp.ehlo()
self.assertEqual(smtp.esmtp_features, expected_features)
for k in expected_features:
self.assertTrue(smtp.has_extn(k))
self.assertFalse(smtp.has_extn('unsupported-feature'))
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for email, name in sim_users.items():
expected_known = (250, '%s %s' % (name, smtplib.quoteaddr(email)))
self.assertEqual(smtp.vrfy(email), expected_known)
u = 'nobody@nowhere.com'
expected_unknown = (550, 'No such user: %s' % smtplib.quoteaddr(u))
self.assertEqual(smtp.vrfy(u), expected_unknown)
smtp.quit()
def testEXPN(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for listname, members in sim_lists.items():
users = []
for m in members:
users.append('%s %s' % (sim_users[m], smtplib.quoteaddr(m)))
expected_known = (250, '\n'.join(users))
self.assertEqual(smtp.expn(listname), expected_known)
u = 'PSU-Members-List'
expected_unknown = (550, 'No access for you!')
self.assertEqual(smtp.expn(u), expected_unknown)
smtp.quit()
def testAUTH_PLAIN(self):
self.serv.add_feature("AUTH PLAIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
expected_auth_ok = (235, b'plain auth ok')
self.assertEqual(smtp.login(sim_auth[0], sim_auth[1]), expected_auth_ok)
# SimSMTPChannel doesn't fully support LOGIN or CRAM-MD5 auth because they
# require a synchronous read to obtain the credentials...so instead smtpd
# sees the credential sent by smtplib's login method as an unknown command,
# which results in smtplib raising an auth error. Fortunately the error
# message contains the encoded credential, so we can partially check that it
# was generated correctly (partially, because the 'word' is uppercased in
# the error message).
def testAUTH_LOGIN(self):
self.serv.add_feature("AUTH LOGIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
try: smtp.login(sim_auth[0], sim_auth[1])
except smtplib.SMTPAuthenticationError as err:
if sim_auth_login_password not in str(err):
raise "expected encoded password not found in error message"
def testAUTH_CRAM_MD5(self):
self.serv.add_feature("AUTH CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
try: smtp.login(sim_auth[0], sim_auth[1])
except smtplib.SMTPAuthenticationError as err:
if sim_auth_credentials['cram-md5'] not in str(err):
raise "expected encoded credentials not found in error message"
#TODO: add tests for correct AUTH method fallback now that the
#test infrastructure can support it.
def test_main(verbose=None):
test_support.run_unittest(GeneralTests, DebuggingServerTests,
NonConnectingTests,
BadHELOServerTests, SMTPSimTests)
if __name__ == '__main__':
test_main()
|
fjorba/invenio | refs/heads/master | modules/webcomment/lib/webcomment_regression_tests.py | 1 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""WebComment Regression Test Suite."""
__revision__ = "$Id$"
import unittest
import shutil
from mechanize import Browser, LinkNotFoundError, HTTPError
from invenio.config import \
CFG_SITE_URL, \
CFG_WEBDIR, \
CFG_TMPDIR, \
CFG_SITE_RECORD
from invenio.testutils import make_test_suite, run_test_suite, \
test_web_page_content, merge_error_messages
from invenio.dbquery import run_sql
from invenio.webcomment import query_add_comment_or_remark
from invenio.webcommentadminlib import query_delete_comment_auth
from invenio.webcomment_washer import EmailWasher
def prepare_attachments():
"""
We copy necessary files to temporary directory. Every time we will
attach files to a comment, these files get moved, so this function
must be called again.
"""
shutil.copy(CFG_WEBDIR + '/img/journal_water_dog.gif', CFG_TMPDIR)
shutil.copy(CFG_WEBDIR + '/img/invenio.css', CFG_TMPDIR)
class WebCommentWebPagesAvailabilityTest(unittest.TestCase):
"""Check WebComment web pages whether they are up or not."""
def test_your_baskets_pages_availability(self):
"""webcomment - availability of comments pages"""
baseurl = CFG_SITE_URL + '/%s/10/comments/' % CFG_SITE_RECORD
_exports = ['', 'display', 'add', 'vote', 'report']
error_messages = []
for url in [baseurl + page for page in _exports]:
error_messages.extend(test_web_page_content(url))
if error_messages:
self.fail(merge_error_messages(error_messages))
return
def test_webcomment_admin_interface_availability(self):
"""webcomment - availability of WebComment Admin interface pages"""
baseurl = CFG_SITE_URL + '/admin/webcomment/webcommentadmin.py/'
_exports = ['', 'comments', 'delete', 'users']
error_messages = []
for url in [baseurl + page for page in _exports]:
# first try as guest:
error_messages.extend(test_web_page_content(url,
username='guest',
expected_text=
'Authorization failure'))
# then try as admin:
error_messages.extend(test_web_page_content(url,
username='admin'))
if error_messages:
self.fail(merge_error_messages(error_messages))
return
def test_webcomment_admin_guide_availability(self):
"""webcomment - availability of WebComment Admin Guide"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/help/admin/webcomment-admin-guide',
expected_text="WebComment Admin Guide"))
return
def test_webcomment_mini_review_availability(self):
"""webcomment - availability of mini-review panel on detailed record page"""
url = CFG_SITE_URL + '/%s/12' % CFG_SITE_RECORD
error_messages = test_web_page_content(url,
expected_text="(Not yet reviewed)")
class WebCommentRestrictionsTest(unittest.TestCase):
"""Check WebComment restrictions"""
def setUp(self):
"""Insert some comments in some records"""
# Comments have access restrictions when:
# - the comment is in a restricted collection ('viewrestrcoll' action)
# - the comment is in a restricted discussion page ('viewcomment' action)
# - the comment itself is restricted ('viewrestrcomment'
# action), either because of the markup of the record, or
# because it is a reply to a restricted comment.
self.public_record = 5
self.public_record_restr_comment = 6
self.restr_record = 42
self.restr_record_restr_comment = 41
self.restricted_discussion = 76
self.romeo_uid = 5
self.jekyll_uid = 2
self.attached_files = {'file1': CFG_TMPDIR + '/journal_water_dog.gif',
'file2': CFG_TMPDIR + '/invenio.css'}
# Load content of texual file2
prepare_attachments()
fp = file(self.attached_files['file2'])
self.attached_file2_content = fp.read()
fp.close()
# Insert a public comment in a public record (public collection)
self.msg1 = "A test comment 1"
self.public_comid = query_add_comment_or_remark(reviews=0, recID=self.public_record,
uid=self.romeo_uid, msg=self.msg1,
editor_type='textarea',
attached_files=self.attached_files)
# Insert a public comment in a restricted record (restricted collection)
self.msg2 = "A test comment 2"
prepare_attachments()
self.restr_comid_1 = \
query_add_comment_or_remark(reviews=0, recID=self.restr_record,
uid=self.jekyll_uid, msg=self.msg2,
editor_type='textarea',
attached_files=self.attached_files)
# Insert a restricted comment in a public collection
self.msg3 = "A test comment 3"
prepare_attachments()
self.restr_comid_2 = \
query_add_comment_or_remark(reviews=0, recID=self.public_record_restr_comment,
uid=self.jekyll_uid, msg=self.msg3,
editor_type='textarea',
attached_files=self.attached_files)
# Insert a restricted comment, in a restricted collection
self.msg5 = "A test comment 5"
prepare_attachments()
self.restr_comid_4 = \
query_add_comment_or_remark(reviews=0, recID=self.restr_record_restr_comment,
uid=self.romeo_uid, msg=self.msg5,
editor_type='textarea',
attached_files=self.attached_files)
# Insert a public comment in a restricted discussion
self.msg6 = "A test comment 6"
prepare_attachments()
self.restr_comid_5 = \
query_add_comment_or_remark(reviews=0, recID=self.restricted_discussion,
uid=self.romeo_uid, msg=self.msg6,
editor_type='textarea',
attached_files=self.attached_files)
self.restr_comid_3 = None
# Insert a public, deleted comment in a public record (public collection)
self.msg7 = "A test comment 7"
prepare_attachments()
self.deleted_comid = query_add_comment_or_remark(reviews=0, recID=self.public_record,
uid=self.romeo_uid, msg=self.msg7,
editor_type='textarea',
attached_files=self.attached_files)
query_delete_comment_auth(self.deleted_comid)
def tearDown(self):
"""Remove inserted comments"""
run_sql("""DELETE FROM cmtRECORDCOMMENT WHERE id=%s""", (self.public_comid,))
run_sql("""DELETE FROM cmtRECORDCOMMENT WHERE id=%s""", (self.restr_comid_1,))
run_sql("""DELETE FROM cmtRECORDCOMMENT WHERE id=%s""", (self.restr_comid_2,))
if self.restr_comid_3:
run_sql("""DELETE FROM cmtRECORDCOMMENT WHERE id=%s""", (self.restr_comid_3,))
run_sql("""DELETE FROM cmtRECORDCOMMENT WHERE id=%s""", (self.restr_comid_4,))
run_sql("""DELETE FROM cmtRECORDCOMMENT WHERE id=%s""", (self.restr_comid_5,))
run_sql("""DELETE FROM cmtRECORDCOMMENT WHERE id=%s""", (self.deleted_comid,))
pass
def test_access_public_record_public_discussion_public_comment(self):
"""webcomment - accessing "public" comment in a "public" discussion of a restricted record"""
# Guest user should not be able to access it
self.assertNotEqual([],
test_web_page_content("%s/%s/%i/comments/" % (CFG_SITE_URL, CFG_SITE_RECORD, self.restr_record),
expected_text=self.msg2))
# Accessing a non existing file for a restricted comment should also ask to login
self.assertEqual([],
test_web_page_content("%s/%s/%i/comments/attachments/get/%i/not_existing_file" % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.restr_record, self.restr_comid_1),
expected_text='You can use your nickname or your email address to login'))
# Check accessing file of a restricted comment
self.assertEqual([],
test_web_page_content("%s/%s/%i/comments/attachments/get/%i/file2" % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.restr_record, self.restr_comid_1),
expected_text='You can use your nickname or your email address to login'))
def test_access_restricted_record_public_discussion_public_comment(self):
"""webcomment - accessing "public" comment in a "public" discussion of a restricted record"""
# Guest user should not be able to access it
self.assertNotEqual([],
test_web_page_content("%s/%s/%i/comments/" % (CFG_SITE_URL, CFG_SITE_RECORD, self.restr_record),
expected_text=self.msg2))
# Accessing a non existing file for a restricted comment should also ask to login
self.assertEqual([],
test_web_page_content("%s/%s/%i/comments/attachments/get/%i/not_existing_file" % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.restr_record, self.restr_comid_1),
expected_text='You can use your nickname or your email address to login'))
# Check accessing file of a restricted comment
self.assertEqual([],
test_web_page_content("%s/%s/%i/comments/attachments/get/%i/file2" % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.restr_record, self.restr_comid_1),
expected_text='You can use your nickname or your email address to login'))
# Juliet should not be able to access the comment
br = Browser()
br.open(CFG_SITE_URL + '/youraccount/login')
br.select_form(nr=0)
br['p_un'] = 'juliet'
br['p_pw'] = 'j123uliet'
br.submit()
br.open("%s/%s/%i/comments/" % (CFG_SITE_URL, CFG_SITE_RECORD, self.restr_record))
response = br.response().read()
if not self.msg2 in response:
pass
else:
self.fail("Oops, this user should not have access to this comment")
# Juliet should not be able to access the attached files
br.open("%s/%s/%i/comments/attachments/get/%i/file2" % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.restr_record, self.restr_comid_1))
response = br.response().read()
if "You are not authorized" in response:
pass
else:
self.fail("Oops, this user should not have access to this comment attachment")
# Jekyll should be able to access the comment
br = Browser()
br.open(CFG_SITE_URL + '/youraccount/login')
br.select_form(nr=0)
br['p_un'] = 'jekyll'
br['p_pw'] = 'j123ekyll'
br.submit()
br.open("%s/%s/%i/comments/" % (CFG_SITE_URL, CFG_SITE_RECORD, self.restr_record))
response = br.response().read()
if not self.msg2 in response:
self.fail("Oops, this user should have access to this comment")
# Jekyll should be able to access the attached files
br.open("%s/%s/%i/comments/attachments/get/%i/file2" % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.restr_record, self.restr_comid_1))
response = br.response().read()
self.assertEqual(self.attached_file2_content, response)
def test_access_public_record_restricted_discussion_public_comment(self):
"""webcomment - accessing "public" comment in a restricted discussion of a public record"""
# Guest user should not be able to access it
self.assertNotEqual([],
test_web_page_content("%s/%s/%i/comments/" % (CFG_SITE_URL, CFG_SITE_RECORD, self.restricted_discussion),
expected_text=self.msg2))
# Accessing a non existing file for a restricted comment should also ask to login
self.assertEqual([],
test_web_page_content("%s/%s/%i/comments/attachments/get/%i/not_existing_file" % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.restricted_discussion, self.restr_comid_5),
expected_text='You can use your nickname or your email address to login'))
# Check accessing file of a restricted comment
self.assertEqual([],
test_web_page_content("%s/%s/%i/comments/attachments/get/%i/file2" % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.restricted_discussion, self.restr_comid_5),
expected_text='You can use your nickname or your email address to login'))
# Juliet should not be able to access the comment
br = Browser()
br.open(CFG_SITE_URL + '/youraccount/login')
br.select_form(nr=0)
br['p_un'] = 'juliet'
br['p_pw'] = 'j123uliet'
br.submit()
br.open("%s/%s/%i/comments/" % (CFG_SITE_URL, CFG_SITE_RECORD, self.restricted_discussion))
response = br.response().read()
if not self.msg6 in response:
pass
else:
self.fail("Oops, this user should not have access to this comment")
# Juliet should not be able to access the attached files
br.open("%s/%s/%i/comments/attachments/get/%i/file2" % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.restricted_discussion, self.restr_comid_5))
response = br.response().read()
if "You are not authorized" in response:
pass
else:
self.fail("Oops, this user should not have access to this comment attachment")
# Romeo should be able to access the comment
br = Browser()
br.open(CFG_SITE_URL + '/youraccount/login')
br.select_form(nr=0)
br['p_un'] = 'romeo'
br['p_pw'] = 'r123omeo'
br.submit()
br.open("%s/%s/%i/comments/" % (CFG_SITE_URL, CFG_SITE_RECORD, self.restricted_discussion))
response = br.response().read()
if not self.msg6 in response:
self.fail("Oops, this user should have access to this comment")
# Romeo should be able to access the attached files
br.open("%s/%s/%i/comments/attachments/get/%i/file2" % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.restricted_discussion, self.restr_comid_5))
response = br.response().read()
self.assertEqual(self.attached_file2_content, response)
def test_comment_replies_inherit_restrictions(self):
"""webcomment - a reply to a comment inherits restrictions"""
# In this test we reply to a restricted comment, and check if
# the restriction is inherited. However, in order to make sure
# that the comment restriction is inherited, and not the
# record restriction, we temporary change the restriction of
# the parent.
self.public_record_restr_comment
original_restriction = run_sql("SELECT restriction FROM cmtRECORDCOMMENT WHERE id=%s",
(self.restr_comid_2,))[0][0]
restriction_to_inherit = 'juliet_only'
run_sql("UPDATE cmtRECORDCOMMENT SET restriction=%s WHERE id=%s",
(restriction_to_inherit, self.restr_comid_2))
# Reply to a restricted comment
self.msg4 = "A test comment 4"
prepare_attachments()
self.restr_comid_3 = \
query_add_comment_or_remark(reviews=0, recID=self.public_record_restr_comment,
uid=self.jekyll_uid, msg=self.msg4,
editor_type='textarea',
attached_files=self.attached_files,
reply_to=self.restr_comid_2)
inherited_restriction = run_sql("SELECT restriction FROM cmtRECORDCOMMENT WHERE id=%s",
(self.restr_comid_3,))[0][0]
self.assertEqual(restriction_to_inherit, inherited_restriction)
# Restore original restriction
run_sql("UPDATE cmtRECORDCOMMENT SET restriction=%s WHERE id=%s",
(original_restriction, self.restr_comid_2))
def test_comment_reply_with_wrong_record(self):
"""webcomment - replying to comment using mismatching recid"""
# Juliet should not be able to reply to the comment, even through a public record
br = Browser()
br.open(CFG_SITE_URL + '/youraccount/login')
br.select_form(nr=0)
br['p_un'] = 'juliet'
br['p_pw'] = 'j123uliet'
br.submit()
br.open("%s/%s/%i/comments/add?action=REPLY&comid=%s&ln=en" % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.public_record, self.restr_comid_1))
response = br.response().read()
if not self.msg2 in response and \
"Authorization failure" in response:
pass
else:
self.fail("Oops, users should not be able to reply to comment using mismatching recid")
# Jekyll should also not be able to reply the comment using the wrong recid
br = Browser()
br.open(CFG_SITE_URL + '/youraccount/login')
br.select_form(nr=0)
br['p_un'] = 'jekyll'
br['p_pw'] = 'j123ekyll'
br.submit()
br.open("%s/%s/%i/comments/add?action=REPLY&comid=%s&ln=en" % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.public_record, self.restr_comid_1))
response = br.response().read()
if not self.msg2 in response and \
"Authorization failure" in response:
pass
else:
self.fail("Oops, users should not be able to reply to comment using mismatching recid")
def test_comment_access_attachment_with_wrong_record(self):
"""webcomment - accessing attachments using mismatching recid"""
# Juliet should not be able to access these files, especially with wrong recid
br = Browser()
br.open(CFG_SITE_URL + '/youraccount/login')
br.select_form(nr=0)
br['p_un'] = 'juliet'
br['p_pw'] = 'j123uliet'
br.submit()
try:
br.open("%s/%s/%i/comments/attachments/get/%i/file2" % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.public_record, self.restr_comid_1))
response = br.response().read()
except HTTPError:
pass
else:
self.fail("Oops, users should not be able to access comment attachment using mismatching recid")
# Jekyll should also not be able to access these files when using wrong recid
br = Browser()
br.open(CFG_SITE_URL + '/youraccount/login')
br.select_form(nr=0)
br['p_un'] = 'jekyll'
br['p_pw'] = 'j123ekyll'
br.submit()
try:
br.open("%s/%s/%i/comments/attachments/get/%i/file2" % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.public_record, self.restr_comid_1))
response = br.response().read()
response = br.response().read()
except HTTPError:
pass
else:
self.fail("Oops, users should not be able to access comment attachment using mismatching recid")
def test_comment_reply_to_deleted_comment(self):
"""webcomment - replying to a deleted comment"""
# Juliet should not be able to reply to the deleted comment
br = Browser()
br.open(CFG_SITE_URL + '/youraccount/login')
br.select_form(nr=0)
br['p_un'] = 'juliet'
br['p_pw'] = 'j123uliet'
br.submit()
br.open("%s/%s/%i/comments/add?action=REPLY&comid=%s&ln=en" % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.public_record, self.deleted_comid))
response = br.response().read()
if not self.msg7 in response:
# There should be no authorization failure, in case the
# comment was deleted in between. We'll simply go on but
# the orginal comment will not be included
pass
else:
self.fail("Oops, users should not be able to reply to a deleted comment")
# Jekyll should also not be able to reply the deleted comment
br = Browser()
br.open(CFG_SITE_URL + '/youraccount/login')
br.select_form(nr=0)
br['p_un'] = 'jekyll'
br['p_pw'] = 'j123ekyll'
br.submit()
br.open("%s/%s/%i/comments/add?action=REPLY&comid=%s&ln=en" % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.public_record, self.deleted_comid))
response = br.response().read()
if not self.msg7 in response:
# There should be no authorization failure, in case the
# comment was deleted in between. We'll simply go on but
# the orginal comment will not be included
pass
else:
self.fail("Oops, users should not be able to reply to a deleted comment")
def test_comment_access_files_deleted_comment(self):
"""webcomment - access files of a deleted comment"""
# Juliet should not be able to access the files
br = Browser()
br.open(CFG_SITE_URL + '/youraccount/login')
br.select_form(nr=0)
br['p_un'] = 'juliet'
br['p_pw'] = 'j123uliet'
br.submit()
br.open("%s/%s/%i/comments/attachments/get/%i/file2" % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.public_record, self.deleted_comid))
response = br.response().read()
if "You cannot access files of a deleted comment" in response:
pass
else:
self.fail("Oops, users should not have access to this deleted comment attachment")
# Jekyll should also not be able to access the files
br = Browser()
br.open(CFG_SITE_URL + '/youraccount/login')
br.select_form(nr=0)
br['p_un'] = 'jekyll'
br['p_pw'] = 'j123ekyll'
br.submit()
br.open("%s/%s/%i/comments/attachments/get/%i/file2" % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.public_record, self.deleted_comid))
response = br.response().read()
if "Authorization failure" in response:
pass
else:
self.fail("Oops, users should not have access to this deleted comment attachment")
def test_comment_report_deleted_comment(self):
"""webcomment - report a deleted comment"""
# Juliet should not be able to report a the deleted comment
br = Browser()
br.open(CFG_SITE_URL + '/youraccount/login')
br.select_form(nr=0)
br['p_un'] = 'juliet'
br['p_pw'] = 'j123uliet'
br.submit()
br.open("%s/%s/%i/comments/report?comid=%s&ln=en" % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.public_record, self.deleted_comid))
response = br.response().read()
if not "Authorization failure" in response:
self.fail("Oops, users should not be able to report a deleted comment")
def test_comment_vote_deleted_comment(self):
"""webcomment - report a deleted comment"""
# Juliet should not be able to vote for a the deleted comment/review
br = Browser()
br.open(CFG_SITE_URL + '/youraccount/login')
br.select_form(nr=0)
br['p_un'] = 'juliet'
br['p_pw'] = 'j123uliet'
br.submit()
br.open("%s/%s/%i/comments/vote?comid=%s&com_value=1&ln=en" % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.public_record, self.deleted_comid))
response = br.response().read()
if not "Authorization failure" in response:
self.fail("Oops, users should not be able to vote for a deleted comment")
def test_comment_report_with_wrong_record(self):
"""webcomment - report a comment using mismatching recid"""
# Juliet should not be able to report a comment she cannot access, even through public recid
br = Browser()
br.open(CFG_SITE_URL + '/youraccount/login')
br.select_form(nr=0)
br['p_un'] = 'juliet'
br['p_pw'] = 'j123uliet'
br.submit()
br.open("%s/%s/%i/comments/report?comid=%s&ln=en" % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.public_record, self.restr_comid_1))
response = br.response().read()
if not "Authorization failure" in response:
self.fail("Oops, users should not be able to report using mismatching recid")
# Jekyll should also not be able to report the comment using the wrong recid
br = Browser()
br.open(CFG_SITE_URL + '/youraccount/login')
br.select_form(nr=0)
br['p_un'] = 'jekyll'
br['p_pw'] = 'j123ekyll'
br.submit()
br.open("%s/%s/%i/comments/report?comid=%s&ln=en" % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.public_record, self.restr_comid_1))
response = br.response().read()
if not "Authorization failure" in response:
self.fail("Oops, users should not be able to report using mismatching recid")
def test_comment_vote_with_wrong_record(self):
"""webcomment - vote for a comment using mismatching recid"""
# Juliet should not be able to vote for a comment she cannot access, especially through public recid
br = Browser()
br.open(CFG_SITE_URL + '/youraccount/login')
br.select_form(nr=0)
br['p_un'] = 'juliet'
br['p_pw'] = 'j123uliet'
br.submit()
br.open("%s/%s/%i/comments/vote?comid=%s&com_value=1&ln=en" % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.public_record, self.restr_comid_1))
response = br.response().read()
if not "Authorization failure" in response:
self.fail("Oops, this user should not be able to report a deleted comment")
# Jekyll should also not be able to vote for the comment using the wrong recid
br = Browser()
br.open(CFG_SITE_URL + '/youraccount/login')
br.select_form(nr=0)
br['p_un'] = 'jekyll'
br['p_pw'] = 'j123ekyll'
br.submit()
br.open("%s/%s/%i/comments/vote?comid=%s&com_value=1&ln=en" % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.public_record, self.restr_comid_1))
response = br.response().read()
if not "Authorization failure" in response:
self.fail("Oops, users should not be able to report using mismatching recid")
def test_report_restricted_record_public_discussion_public_comment(self):
"""webcomment - report a comment restricted by 'viewrestrcoll'"""
# Juliet should not be able to report the comment
br = Browser()
br.open(CFG_SITE_URL + '/youraccount/login')
br.select_form(nr=0)
br['p_un'] = 'juliet'
br['p_pw'] = 'j123uliet'
br.submit()
br.open("%s/%s/%i/comments/report?comid=%s&ln=en" % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.restr_record, self.restr_comid_1))
response = br.response().read()
if not "Authorization failure" in response:
self.fail("Oops, this user should not be able to report this comment")
def test_report_public_record_restricted_discussion_public_comment(self):
"""webcomment - report a comment restricted by 'viewcomment'"""
# Juliet should not be able to report the comment
br = Browser()
br.open(CFG_SITE_URL + '/youraccount/login')
br.select_form(nr=0)
br['p_un'] = 'juliet'
br['p_pw'] = 'j123uliet'
br.submit()
br.open("%s/%s/%i/comments/report?comid=%s&ln=en" % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.restricted_discussion, self.restr_comid_5))
response = br.response().read()
if not "Authorization failure" in response:
self.fail("Oops, this user should not be able to report this comment")
def test_report_public_record_public_discussion_restricted_comment(self):
"""webcomment - report a comment restricted by 'viewrestrcomment'"""
# Juliet should not be able to report the comment
br = Browser()
br.open(CFG_SITE_URL + '/youraccount/login')
br.select_form(nr=0)
br['p_un'] = 'juliet'
br['p_pw'] = 'j123uliet'
br.submit()
br.open("%s/%s/%i/comments/report?comid=%s&ln=en" % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.public_record_restr_comment, self.restr_comid_2))
response = br.response().read()
if not "Authorization failure" in response:
self.fail("Oops, this user should not be able to report this comment")
def test_vote_restricted_record_public_discussion_public_comment(self):
"""webcomment - vote for a comment restricted by 'viewrestrcoll'"""
# Juliet should not be able to vote for the comment
br = Browser()
br.open(CFG_SITE_URL + '/youraccount/login')
br.select_form(nr=0)
br['p_un'] = 'juliet'
br['p_pw'] = 'j123uliet'
br.submit()
br.open("%s/%s/%i/comments/vote?comid=%s&com_value=1&ln=en" % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.restr_record, self.restr_comid_1))
response = br.response().read()
if not "Authorization failure" in response:
self.fail("Oops, this user should not be able to report this comment")
def test_vote_public_record_restricted_discussion_public_comment(self):
"""webcomment - vote for a comment restricted by 'viewcomment'"""
# Juliet should not be able to vote for the comment
br = Browser()
br.open(CFG_SITE_URL + '/youraccount/login')
br.select_form(nr=0)
br['p_un'] = 'juliet'
br['p_pw'] = 'j123uliet'
br.submit()
br.open("%s/%s/%i/comments/vote?comid=%s&com_value=1&ln=en" % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.restricted_discussion, self.restr_comid_5))
response = br.response().read()
if not "Authorization failure" in response:
self.fail("Oops, this user should not be able to report this comment")
def test_vote_public_record_public_discussion_restricted_comment(self):
"""webcomment - vote for a comment restricted by 'viewrestrcomment'"""
# Juliet should not be able to vote for the comment
br = Browser()
br.open(CFG_SITE_URL + '/youraccount/login')
br.select_form(nr=0)
br['p_un'] = 'juliet'
br['p_pw'] = 'j123uliet'
br.submit()
br.open("%s/%s/%i/comments/vote?comid=%s&com_value=1&ln=en" % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.public_record_restr_comment, self.restr_comid_2))
response = br.response().read()
if not "Authorization failure" in response:
self.fail("Oops, this user should not be able to report this comment")
def test_comment_subscribe_restricted_record_public_discussion(self):
"""webcomment - subscribe to a discussion restricted with 'viewrestrcoll'"""
# Juliet should not be able to subscribe to the discussion
br = Browser()
br.open(CFG_SITE_URL + '/youraccount/login')
br.select_form(nr=0)
br['p_un'] = 'juliet'
br['p_pw'] = 'j123uliet'
br.submit()
br.open("%s/%s/%i/comments/subscribe?ln=en" % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.restr_record))
response = br.response().read()
if not "Authorization failure" in response:
self.fail("Oops, this user should not be able to subscribe to this discussion")
# Jekyll should be able to subscribe
br = Browser()
br.open(CFG_SITE_URL + '/youraccount/login')
br.select_form(nr=0)
br['p_un'] = 'jekyll'
br['p_pw'] = 'j123ekyll'
br.submit()
br.open("%s/%s/%i/comments/subscribe?ln=en" % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.restr_record))
response = br.response().read()
if not "You have been subscribed" in response or \
"Authorization failure" in response:
self.fail("Oops, this user should be able to subscribe to this discussion")
def test_comment_subscribe_public_record_restricted_discussion(self):
"""webcomment - subscribe to a discussion restricted with 'viewcomment'"""
# Juliet should not be able to subscribe to the discussion
br = Browser()
br.open(CFG_SITE_URL + '/youraccount/login')
br.select_form(nr=0)
br['p_un'] = 'juliet'
br['p_pw'] = 'j123uliet'
br.submit()
br.open("%s/%s/%i/comments/subscribe?ln=en" % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.restricted_discussion))
response = br.response().read()
if not "Authorization failure" in response:
self.fail("Oops, this user should not be able to subscribe to this discussion")
# Romeo should be able to subscribe
br = Browser()
br.open(CFG_SITE_URL + '/youraccount/login')
br.select_form(nr=0)
br['p_un'] = 'romeo'
br['p_pw'] = 'r123omeo'
br.submit()
br.open("%s/%s/%i/comments/subscribe?ln=en" % \
(CFG_SITE_URL, CFG_SITE_RECORD, self.restricted_discussion))
response = br.response().read()
if not "You have been subscribed" in response or \
"Authorization failure" in response:
print response
self.fail("Oops, this user should be able to subscribe to this discussion")
class WebCommentTransformationHTMLMarkupTest(unittest.TestCase):
""" Test functions related to transforming HTML markup."""
def test_unordered_lists_markup_transformation(self):
"""webcomment - unordered lists markup transformation """
washer = EmailWasher()
body_input = """<ul>
<li>foo</li>
<li>bar</li>
</ul>"""
body_expected = """
* foo
* bar
"""
self.assertEqual(washer.wash(body_input),
body_expected)
# Without spaces and EOL
body_input = '<ul><li>foo</li><li>bar</li></ul>'
self.assertEqual(washer.wash(body_input),
body_expected)
def test_ordered_lists_markup_transformation(self):
""" webcomment - ordered lists markup transformation """
washer = EmailWasher()
body_input = """<ol>
<li>foo</li>
<li>bar</li>
</ol>"""
body_expected = """
1. foo
2. bar
"""
self.assertEqual(washer.wash(body_input),
body_expected)
# Without spaces and EOL
body_input = '<ol><li>foo</li><li>bar</li></ol>'
self.assertEqual(washer.wash(body_input),
body_expected)
def test_nested_lists_markup_transformation(self):
""" webcomment - nested lists markup transformation """
washer = EmailWasher()
body_input = """<ol>
<li>foo
<ol>
<li>nested foo</li>
</ol>
</li>
<li>bar</li>
</ol>"""
body_expected = """
1. foo
1. nested foo
2. bar
"""
self.assertEqual(washer.wash(body_input),
body_expected)
# Without spaces and EOL
body_input = '<ol><li>foo<ol><li>nested foo</li></ol></li><li>bar</li></ol>'
self.assertEqual(washer.wash(body_input),
body_expected)
def test_links_markup_transformation(self):
""" webcomment - links markup transformation """
washer = EmailWasher()
body_input = 'text http://foo.com some more text'
body_expected = 'text http://foo.com some more text'
self.assertEqual(washer.wash(body_input),
body_expected)
washer = EmailWasher()
body_input = '<a href="https://cds.cern.ch/">CDS</a>'
body_expected = '<https://cds.cern.ch/>(CDS)'
self.assertEqual(washer.wash(body_input),
body_expected)
washer = EmailWasher()
body_input = '<a href="https://cds.cern.ch/">https://cds.cern.ch/</a>'
body_expected = '<https://cds.cern.ch/>'
self.assertEqual(washer.wash(body_input),
body_expected)
def test_global_markup_transformation(self):
""" webcomment - global transformation """
washer = EmailWasher()
body_input = """<a href="http://foo.com">http://foo.com</a>
<ol>
<li>Main Ordered List item</li>
<li>Below is an example of HTML nested unordered list
<ul>
<li>nested list item 1</li>
<li>nested list item 2</li>
<li>Sub nested ordered list
<ol>
<li>sub nested list item A</li>
<li>sub nested list item B</li>
</ol>
</li>
</ul>
</li>
<li>The last line in the main ordered list</li>
</ol> <a href="http://foo.com">bar</a>"""
body_expected = """<http://foo.com>
1. Main Ordered List item
2. Below is an example of HTML nested unordered list
* nested list item 1
* nested list item 2
* Sub nested ordered list
1. sub nested list item A
2. sub nested list item B
3. The last line in the main ordered list
<http://foo.com>(bar)"""
self.assertEqual(washer.wash(body_input),
body_expected)
# Without spaces and EOL
body_input = '<a href="http://foo.com">http://foo.com</a><ol><li>Main Ordered List item</li><li>Below is an example of HTML nested unordered list<ul><li>nested list item 1</li><li>nested list item 2</li><li>Sub nested ordered list<ol><li>sub nested list item A</li><li>sub nested list item B</li></ol></li></ul></li><li>The last line in the main ordered list</li></ol> <a href="http://foo.com">bar</a>'
self.assertEqual(washer.wash(body_input),
body_expected)
TEST_SUITE = make_test_suite(#WebCommentWebPagesAvailabilityTest,
#WebCommentRestrictionsTest,
WebCommentTransformationHTMLMarkupTest)
if __name__ == "__main__":
run_test_suite(TEST_SUITE, warn_user=True)
|
Tesora/tesora-tempest | refs/heads/master | tempest/lib/services/compute/certificates_client.py | 12 | # Copyright 2013 IBM Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils as json
from tempest.lib.api_schema.response.compute.v2_1 import certificates as schema
from tempest.lib.common import rest_client
from tempest.lib.services.compute import base_compute_client
class CertificatesClient(base_compute_client.BaseComputeClient):
def show_certificate(self, certificate_id):
url = "os-certificates/%s" % certificate_id
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(schema.get_certificate, resp, body)
return rest_client.ResponseBody(resp, body)
def create_certificate(self):
"""Create a certificate."""
url = "os-certificates"
resp, body = self.post(url, None)
body = json.loads(body)
self.validate_response(schema.create_certificate, resp, body)
return rest_client.ResponseBody(resp, body)
|
zhaobin19918183/zhaobinCode | refs/heads/master | BlogDjango/BlogDjango/wsgi.py | 1 | """
WSGI config for BlogDjango project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "BlogDjango.settings")
application = get_wsgi_application()
|
dufresnedavid/canada | refs/heads/7.0-ddufresne-employer-contributions | l10n_ca_hr_payroll_leave_type/__init__.py | 1 | # -*- coding:utf-8 -*-
##############################################################################
#
# Copyright (C) 2012 - 2014 Odoo Canada. All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import (
hr_holiday_status,
)
|
davido/buck | refs/heads/master | src/com/facebook/buck/json/buck_parser/buck_test.py | 7 | from .buck import (
BuildFileContext,
LazyBuildEnvPartial,
flatten_dicts,
get_mismatched_args,
subdir_glob,
)
from .glob_mercurial import _load_manifest_trie, glob_mercurial_manifest
from .glob_watchman import format_watchman_query_params
from .glob_internal import path_component_contains_dot, glob_internal
from pathlib import Path, PurePosixPath, PureWindowsPath
import itertools
import os
import shutil
import tempfile
import unittest
class FakePathMixin(object):
def glob(self, pattern):
# Python glob supports unix paths on windows out of the box
norm_pattern = pattern.replace('\\', '/')
return self.glob_results.get(norm_pattern)
def is_file(self):
return True
class FakePosixPath(FakePathMixin, PurePosixPath):
pass
class FakeWindowsPath(FakePathMixin, PureWindowsPath):
pass
def fake_path(fake_path_class, path, glob_results={}):
# Path does magic in __new__ with its args; it's hard to add more without
# changing that class. So we use a wrapper function to diddle with
# FakePath's members.
result = fake_path_class(path)
result.glob_results = {}
for pattern, paths in glob_results.iteritems():
result.glob_results[pattern] = [result / fake_path_class(p) for p in paths]
return result
class TestBuckPlatform(unittest.TestCase):
def test_lazy_build_env_partial(self):
def cobol_binary(
name,
deps=[],
build_env=None):
return (name, deps, build_env)
testLazy = LazyBuildEnvPartial(cobol_binary)
testLazy.build_env = {}
self.assertEqual(
('HAL', [1, 2, 3], {}),
testLazy.invoke(name='HAL', deps=[1, 2, 3]))
testLazy.build_env = {'abc': 789}
self.assertEqual(
('HAL', [1, 2, 3], {'abc': 789}),
testLazy.invoke(name='HAL', deps=[1, 2, 3]))
class TestBuckGlobMixin(object):
def do_glob(self, *args, **kwargs):
# subclasses can override this to test a different glob implementation
return glob_internal(*args, **kwargs)
def test_glob_includes_simple(self):
search_base = self.fake_path(
'foo',
glob_results={'*.java': ['A.java', 'B.java']})
self.assertGlobMatches(
['A.java', 'B.java'],
self.do_glob(
includes=['*.java'],
excludes=[],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=search_base,
project_root='.'))
def test_glob_includes_sort(self):
search_base = self.fake_path(
'foo',
glob_results={'*.java': ['A.java', 'E.java', 'D.java', 'C.java', 'B.java']})
self.assertGlobMatches(
['A.java', 'B.java', 'C.java', 'D.java', 'E.java'],
self.do_glob(
includes=['*.java'],
excludes=[],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=search_base,
project_root='.'))
def test_glob_includes_multi(self):
search_base = self.fake_path(
'foo',
glob_results={
'bar/*.java': ['bar/A.java', 'bar/B.java'],
'baz/*.java': ['baz/C.java', 'baz/D.java'],
})
self.assertGlobMatches(
['bar/A.java', 'bar/B.java', 'baz/C.java', 'baz/D.java'],
self.do_glob(
includes=['bar/*.java', 'baz/*.java'],
excludes=[],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=search_base,
project_root='.'))
def test_glob_excludes_double_star(self):
search_base = self.fake_path(
'foo',
glob_results={
'**/*.java': ['A.java', 'B.java', 'Test.java'],
})
self.assertGlobMatches(
['A.java', 'B.java'],
self.do_glob(
includes=['**/*.java'],
excludes=['**/*Test.java'],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=search_base,
project_root='.'))
def test_glob_excludes_multi(self):
search_base = self.fake_path(
'foo',
glob_results={
'bar/*.java': ['bar/A.java', 'bar/B.java'],
'baz/*.java': ['baz/C.java', 'baz/D.java'],
})
self.assertGlobMatches(
['bar/B.java', 'baz/D.java'],
self.do_glob(
includes=['bar/*.java', 'baz/*.java'],
excludes=['*/[AC].java'],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=search_base,
project_root='.'))
def test_glob_excludes_relative(self):
search_base = self.fake_path(
'foo',
glob_results={
'**/*.java': ['foo/A.java', 'foo/bar/B.java', 'bar/C.java'],
})
self.assertGlobMatches(
['foo/A.java', 'foo/bar/B.java'],
self.do_glob(
includes=['**/*.java'],
excludes=['bar/*.java'],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=search_base,
project_root='.'))
def test_glob_project_root_relative_excludes_relative(self):
search_base = self.fake_path(
'foo',
glob_results={
'**/*.java': ['foo/A.java', 'foo/bar/B.java', 'bar/C.java'],
})
self.assertGlobMatches(
['bar/C.java'],
self.do_glob(
includes=['**/*.java'],
excludes=[],
project_root_relative_excludes=['foo/foo/**'],
include_dotfiles=False,
search_base=search_base,
project_root='.'))
def test_glob_includes_skips_dotfiles(self):
search_base = self.fake_path(
'foo',
glob_results={'*.java': ['A.java', '.B.java']})
self.assertGlobMatches(
['A.java'],
self.do_glob(
includes=['*.java'],
excludes=[],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=search_base,
project_root='.'))
def test_glob_includes_skips_dot_directories(self):
search_base = self.fake_path(
'foo',
glob_results={'*.java': ['A.java', '.test/B.java']})
self.assertGlobMatches(
['A.java'],
self.do_glob(
includes=['*.java'],
excludes=[],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=search_base,
project_root='.'))
def test_glob_includes_does_not_skip_dotfiles_if_include_dotfiles(self):
search_base = self.fake_path(
'foo',
glob_results={'*.java': ['A.java', '.B.java']})
self.assertGlobMatches(
['.B.java', 'A.java'],
self.do_glob(
includes=['*.java'],
excludes=[],
project_root_relative_excludes=[],
include_dotfiles=True,
search_base=search_base,
project_root='.'))
def test_explicit_exclude_with_file_separator_excludes(self):
search_base = self.fake_path(
'foo',
glob_results={'java/**/*.java': ['java/Include.java', 'java/Exclude.java']})
self.assertGlobMatches(
['java/Include.java'],
self.do_glob(
includes=['java/**/*.java'],
excludes=['java/Exclude.java'],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=search_base,
project_root='.'))
class TestBuckSubdirGlobMixin(object):
def do_subdir_glob(self, *args, **kwargs):
# subclasses can override this to test a different glob implementation
return subdir_glob(*args, **kwargs)
def test_subdir_glob(self):
build_env = BuildFileContext(
self.fake_path(''), None, None, None, None, [], None, None, None, None, False,
False, False)
search_base = self.fake_path(
'foo',
glob_results={
'lib/bar/*.h': ['lib/bar/A.h', 'lib/bar/B.h'],
'lib/baz/*.h': ['lib/baz/C.h', 'lib/baz/D.h'],
})
self.assertGlobMatches(
{
'bar/B.h': 'lib/bar/B.h',
'bar/A.h': 'lib/bar/A.h',
'baz/D.h': 'lib/baz/D.h',
'baz/C.h': 'lib/baz/C.h',
},
self.do_subdir_glob([
('lib', 'bar/*.h'),
('lib', 'baz/*.h')],
build_env=build_env,
search_base=search_base))
def test_subdir_glob_with_prefix(self):
build_env = BuildFileContext(
self.fake_path(''), None, None, None, None, [], None, None, None, None, False,
False, False)
search_base = self.fake_path(
'foo',
glob_results={
'lib/bar/*.h': ['lib/bar/A.h', 'lib/bar/B.h'],
})
self.assertGlobMatches(
{
'Prefix/bar/B.h': 'lib/bar/B.h',
'Prefix/bar/A.h': 'lib/bar/A.h',
},
self.do_subdir_glob([('lib', 'bar/*.h')],
prefix='Prefix',
build_env=build_env,
search_base=search_base))
class TestBuckPosix(TestBuckGlobMixin, TestBuckSubdirGlobMixin, unittest.TestCase):
@staticmethod
def fake_path(*args, **kwargs):
return fake_path(FakePosixPath, *args, **kwargs)
def assertGlobMatches(self, expected, actual):
self.assertEqual(expected, actual)
class TestBuckWindows(TestBuckGlobMixin, TestBuckSubdirGlobMixin, unittest.TestCase):
@staticmethod
def fake_path(*args, **kwargs):
return fake_path(FakeWindowsPath, *args, **kwargs)
def assertGlobMatches(self, expected, actual):
# Fix the path separator to make test writing easier
fixed_expected = None
if isinstance(expected, list):
fixed_expected = []
for path in expected:
fixed_expected.append(path.replace('/', '\\'))
else:
fixed_expected = {}
for key, value in expected.items():
fixed_expected.update({key.replace('/', '\\'): value.replace('/', '\\')})
self.assertEqual(fixed_expected, actual)
# Mercurial manifest / status globbing tests
class FakeStatus(object):
def __init__(self, removed=None, deleted=None, added=None, unknown=None):
self.removed = removed or []
self.deleted = deleted or []
self.added = added or []
self.unknown = unknown or []
class TestMercurialManifestGlob(TestBuckGlobMixin, unittest.TestCase):
fake_status = None
fake_manifest = None
def setUp(self):
# clear the memoization cache for _load_manifest_trie
_load_manifest_trie._cache.clear()
def status(self, removed=None, deleted=None, added=None, unknown=None):
class StatusContext(object):
def __init__(self, test):
self.test = test
def __enter__(self):
_load_manifest_trie._cache.clear()
self.orig_status = self.test.fake_status
self.test.fake_status = FakeStatus(
removed, deleted, added, unknown)
def __exit__(self, *exc):
self.test.fake_status = self.orig_status
return StatusContext(self)
def fake_path(self, *args, **kwargs):
fp = fake_path(FakePosixPath, *args, **kwargs)
# produce a mercurial manifest from the test data
self.fake_manifest = [
'./' + str(p) for paths in fp.glob_results.values() for p in paths]
return fp
def assertGlobMatches(self, expected, actual):
self.assertEqual(expected, actual)
def do_glob(self, includes, excludes, project_root_relative_excludes,
include_dotfiles, search_base, project_root):
repo_info = (
project_root,
self.fake_manifest or [],
self.fake_status or FakeStatus()
)
return glob_mercurial_manifest(
includes, excludes, project_root_relative_excludes,
include_dotfiles, search_base, project_root, repo_info)
def test_status_added(self):
search_base = self.fake_path(
'foo',
glob_results={'*.java': ['A.java', 'B.java']})
self.assertGlobMatches(
['A.java', 'B.java'],
self.do_glob(
includes=['*.java'],
excludes=[],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=search_base,
project_root='.'))
with self.status(added=['./foo/C.java']):
self.assertGlobMatches(
['A.java', 'B.java', 'C.java'],
self.do_glob(
includes=['*.java'],
excludes=[],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=search_base,
project_root='.'))
def test_status_deleted(self):
search_base = self.fake_path(
'foo',
glob_results={'*.java': ['A.java', 'B.java']})
self.assertGlobMatches(
['A.java', 'B.java'],
self.do_glob(
includes=['*.java'],
excludes=[],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=search_base,
project_root='.'))
with self.status(deleted=['./foo/A.java']):
self.assertGlobMatches(
['B.java'],
self.do_glob(
includes=['*.java'],
excludes=[],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=search_base,
project_root='.'))
def test_status_unknown(self):
search_base = self.fake_path(
'foo',
glob_results={'*.java': ['A.java', 'B.java']})
self.assertGlobMatches(
['A.java', 'B.java'],
self.do_glob(
includes=['*.java'],
excludes=[],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=search_base,
project_root='.'))
with self.status(unknown=['./foo/C.java']):
self.assertGlobMatches(
['A.java', 'B.java', 'C.java'],
self.do_glob(
includes=['*.java'],
excludes=[],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=search_base,
project_root='.'))
def test_status_removed(self):
search_base = self.fake_path(
'foo',
glob_results={'*.java': ['A.java', 'B.java']})
self.assertGlobMatches(
['A.java', 'B.java'],
self.do_glob(
includes=['*.java'],
excludes=[],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=search_base,
project_root='.'))
with self.status(removed=['./foo/A.java']):
self.assertGlobMatches(
['B.java'],
self.do_glob(
includes=['*.java'],
excludes=[],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=search_base,
project_root='.'))
class TestBuck(unittest.TestCase):
def test_glob_double_star_integration(self):
d = tempfile.mkdtemp()
try:
subdir = os.path.join(d, 'b', 'a', 'c', 'a')
os.makedirs(subdir)
f = open(os.path.join(subdir, 'A.java'), 'w')
f.close()
f = open(os.path.join(subdir, 'B.java'), 'w')
f.close()
f = open(os.path.join(subdir, 'Test.java'), 'w')
f.close()
f = open(os.path.join(subdir, '.tmp.java'), 'w')
f.close()
os.makedirs(os.path.join(subdir, 'NotAFile.java'))
self.assertEquals(
[
os.path.join('b', 'a', 'c', 'a', 'A.java'),
os.path.join('b', 'a', 'c', 'a', 'B.java'),
],
glob_internal(
includes=['b/a/**/*.java'],
excludes=['**/*Test.java'],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=Path(d),
project_root=Path(d)))
finally:
shutil.rmtree(d)
def test_case_preserved(self):
d = tempfile.mkdtemp()
try:
subdir = os.path.join(d, 'java')
os.makedirs(subdir)
open(os.path.join(subdir, 'Main.java'), 'w').close()
self.assertEquals(
[
os.path.join('java', 'Main.java'),
],
glob_internal(
includes=['java/Main.java'],
excludes=[],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=Path(d),
project_root=Path(d)))
finally:
shutil.rmtree(d)
def test_watchman_query_params_includes(self):
query_params = format_watchman_query_params(
['**/*.java'],
[],
False,
'/path/to/glob',
False)
self.assertEquals(
{
'relative_root': '/path/to/glob',
'path': [''],
'fields': ['name'],
'expression': [
'allof',
['anyof', ['type', 'f'], ['type', 'l']],
'exists',
['anyof', ['match', '**/*.java', 'wholename', {}]],
]
},
query_params)
def test_watchman_query_params_includes_and_excludes(self):
query_params = format_watchman_query_params(
['**/*.java'],
['**/*Test.java'],
False,
'/path/to/glob',
False)
self.assertEquals(
{
'relative_root': '/path/to/glob',
'path': [''],
'fields': ['name'],
'expression': [
'allof',
['anyof', ['type', 'f'], ['type', 'l']],
['not', ['anyof', ['match', '**/*Test.java', 'wholename', {}]]],
'exists',
['anyof', ['match', '**/*.java', 'wholename', {}]],
]
},
query_params)
def test_watchman_query_params_glob_generator(self):
query_params = format_watchman_query_params(
['**/*.java'],
['**/*Test.java'],
False,
'/path/to/glob',
True)
self.assertEquals(
{
'relative_root': '/path/to/glob',
'glob': ['**/*.java'],
'fields': ['name'],
'expression': [
'allof',
['anyof', ['type', 'f'], ['type', 'l']],
['not', ['anyof', ['match', '**/*Test.java', 'wholename', {}]]],
]
},
query_params)
def test_flatten_dicts_overrides_earlier_keys_with_later_ones(self):
base = {
'a': 'foo',
'b': 'bar',
}
override = {
'a': 'baz',
}
override2 = {
'a': 42,
'c': 'new',
}
self.assertEquals(
{
'a': 'baz',
'b': 'bar',
},
flatten_dicts(base, override))
self.assertEquals(
{
'a': 42,
'b': 'bar',
'c': 'new',
},
flatten_dicts(base, override, override2)
)
# assert none of the input dicts were changed:
self.assertEquals(
{
'a': 'foo',
'b': 'bar',
},
base
)
self.assertEquals(
{
'a': 'baz',
},
override
)
self.assertEquals(
{
'a': 42,
'c': 'new',
},
override2
)
def test_path_component_contains_dot(self):
self.assertFalse(path_component_contains_dot(Path('')))
self.assertFalse(path_component_contains_dot(Path('foo')))
self.assertFalse(path_component_contains_dot(Path('foo/bar')))
self.assertTrue(path_component_contains_dot(Path('.foo/bar')))
self.assertTrue(path_component_contains_dot(Path('foo/.bar')))
self.assertTrue(path_component_contains_dot(Path('.foo/.bar')))
class TestMemoized(unittest.TestCase):
def _makeone(self, func, *args, **kwargs):
from .util import memoized
return memoized(*args, **kwargs)(func)
def test_cache_none(self):
decorated = self._makeone(
lambda _retval=iter([None, 'foo']): next(_retval))
uncached = decorated()
cached = decorated()
self.assertEqual(uncached, cached)
self.assertTrue(cached is None)
def test_no_deepcopy(self):
decorated = self._makeone(
lambda: [],
deepcopy=False,
)
initial = decorated()
cached = decorated()
self.assertTrue(initial is cached)
def test_deepcopy(self):
decorated = self._makeone(
lambda: [{}],
)
initial = decorated()
cached = decorated()
self.assertTrue(initial is not cached)
initial[0]['foo'] = 'bar'
self.assertTrue(cached[0] == {})
def test_cachekey(self):
decorated = self._makeone(
# note that in Python 2 without hash randomisation, 'bar' and 'baz' will collide in
# a small dictionary, as their hash keys differ by 8.
lambda foo, bar='baz', baz='bar', _retval=itertools.count(): next(_retval)
)
initial = decorated(42, baz='spam', bar='eggs')
cached = decorated(42, bar='eggs', baz='spam')
different_keyword_values = decorated(42, bar='eric', baz='idle')
self.assertEqual(initial, cached)
self.assertNotEqual(initial, different_keyword_values)
def test_custom_cachekey(self):
decorated = self._makeone(
lambda foo, bar='baz', _retval=itertools.count(): next(_retval),
keyfunc=lambda foo, **kwargs: foo,
)
initial = decorated(42, bar='spam')
cached = decorated(42, bar='ignored')
different_foo = decorated(81, bar='spam')
self.assertEqual(initial, cached)
self.assertNotEqual(initial, different_foo)
def test_missing_foo(self):
def fn(foo, bar=1, baz=None):
pass
missing, extra = get_mismatched_args(fn, [], {})
self.assertEqual(missing, ['foo'])
self.assertEqual(extra, [])
def test_extra_kwargs(self):
def fn(foo, bar=1, baz=None):
pass
missing, extra = get_mismatched_args(fn, [], {'parrot': 'dead', 'trout': 'slapped'})
self.assertEqual(missing, ['foo'])
self.assertEqual(extra, ['parrot', 'trout'])
def test_foo_as_kwarg(self):
def fn(foo, bar=1, baz=None):
pass
missing, extra = get_mismatched_args(fn, [], {'foo': 'value'})
self.assertEqual(missing, [])
self.assertEqual(extra, [])
if __name__ == '__main__':
unittest.main()
|
trungnt13/uef-summerschool2016 | refs/heads/master | logistic_regression.py | 1 | # make our code python2 and python3 compatible
from __future__ import print_function, absolute_import, division
# use appropriate matplotlib backend for ipython notebook
import matplotlib
matplotlib.use('Agg')
from IPython.core.pylabtools import figsize
figsize(12, 4)
# figures and plot library
from matplotlib import pyplot as plt
# don't need to care about this
import os
import sys
os.environ['THEANO_FLAGS'] = "device=cpu,optimizer=fast_run"
DATA_DIR = os.path.join('/res', 'data')
# path to our libraries source code
sys.path.append(os.path.join('/res', 'src'))
import scipy.io as sio
import numpy as np
# computation libraries
import theano
from theano import tensor as T
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
import h5py # for loading data
# some utilities I write for you for easily plotting stuffs
from utils import plot_images, Progbar, plot_confusion_matrix, plot_weights
dataset = h5py.File(os.path.join(DATA_DIR, 'mnist.h5'), 'r')
for key, value in dataset.iteritems():
print('Name:%s, Shape:%s, Dtype:%s' % (key, value.shape, value.dtype))
# Load the training data
X_train = dataset['X_train'].value
y_train = dataset['y_train'].value
# Load validation data
X_valid = dataset['X_valid'].value
y_valid = dataset['y_valid'].value
# Load test data
X_test = dataset['X_test'].value
y_test = dataset['y_test'].value
# pick randomly 16 images from training data
random_choices = np.random.choice(np.arange(X_train.shape[0]),
size=16, replace=False)
X_sampled = X_train[random_choices]
y_samples = y_train[random_choices]
# start plotting
plt.figure()
_ = plot_images(X_sampled)
plt.show()
print(y_samples)
# start plotting
plt.figure()
plt.subplot(1, 3, 1)
plt.title("Training set statistics")
plt.hist(y_train, bins=10)
plt.subplot(1, 3, 2)
plt.title("Validation set statistics")
plt.hist(y_valid, bins=10)
_ = plt.subplot(1, 3, 3)
plt.title("Test set statistics")
plt.hist(y_test, bins=10)
plt.show()
def glorot_uniform(shape, gain=1.0):
if len(shape) < 2:
shape = (1,) + tuple(shape)
n1, n2 = shape[:2]
receptive_field_size = np.prod(shape[2:])
std = gain * np.sqrt(2.0 / ((n1 + n2) * receptive_field_size))
a = 0.0 - np.sqrt(3) * std
b = 0.0 + np.sqrt(3) * std
return np.cast['float32'](
np.random.uniform(low=a, high=b, size=shape))
# our features are stored in a tensor (nb_samples, nb_row, nb_col)
X = T.tensor3(name='X', dtype='float32')
y_true = T.ivector() # our output is integer vector (i.e. the number 1, 2, 3, 4, 5, 6 ...)
# Our parameters
nb_features = np.prod(X_train.shape[1:]) # 784
nb_classes = 10 # 10 different digits
W_init = glorot_uniform((nb_features, nb_classes))
W = theano.shared(W_init, name='W')
b = theano.shared(np.zeros(shape=(nb_classes,), dtype='float32'), name='bias')
# activation just a linear combination of features and parameters (weights)
# Don't forget to add the bias
activation = T.dot(T.flatten(X, outdim=2), W) + b
# softmax function "smash" to activation to the probability value (confident value) for each digits
y_pred = T.nnet.softmax(activation)
# We use categorical_crossentropy as objective function
cost = T.mean(T.nnet.categorical_crossentropy(y_pred, y_true))
# gradient descent
W_gradient, b_gradient = T.grad(cost=cost, wrt=[W, b])
# we have to cast the update to float32 to make the type of weights consistent
learning_rate = theano.shared(np.cast['float32'](0.1), name='learning_rate')
update = [(W, W - W_gradient * learning_rate),
(b, b - b_gradient * learning_rate)]
# create function for training and making prediction
f_train = theano.function(inputs=[X, y_true], outputs=cost,
updates=update,
allow_input_downcast=True)
f_predict = theano.function(inputs=[X], outputs=y_pred,
allow_input_downcast=True)
NB_EPOCH = 2
BATCH_SIZE = 128
LEARNING_RATE = 0.1
learning_rate.set_value(np.cast['float32'](LEARNING_RATE))
training_history = []
valid_history = []
for epoch in range(NB_EPOCH):
prog = Progbar(target=X_train.shape[0])
n = 0
history = []
while n < X_train.shape[0]:
start = n
end = min(n + BATCH_SIZE, X_train.shape[0])
c = f_train(X_train[start:end], y_train[start:end])
prog.title = 'Epoch: %.2d, Cost: %.4f' % (epoch + 1, c)
prog.add(end - start)
n += BATCH_SIZE
history.append(c)
# end of epoch, start validating
y = np.argmax(f_predict(X_valid), axis=-1)
accuracy = accuracy_score(y_valid, y)
print('Validation accuracy:', accuracy)
# save history
training_history.append(np.mean(history))
valid_history.append(accuracy)
y = np.argmax(f_predict(X_test), axis=-1)
accuracy = accuracy_score(y_test, y)
print('Test accuracy:', accuracy)
print('Classification report:')
print(classification_report(y_test, y))
plt.figure()
plot_confusion_matrix(confusion_matrix(y_test, y),
labels=range(1, 11))
plt.show()
plt.figure()
plt.plot(training_history, c='b', label="Training cost")
plt.plot(valid_history, c='r', label="Validation accuracy")
plt.legend()
plt.show()
plt.figure()
plt.subplot(2, 1, 1)
plot_weights(W_init, keep_aspect=False)
plt.subplot(2, 1, 2)
plot_weights(W.get_value(), keep_aspect=False)
plt.show()
|
pathompongoo/ThGovJobApp | refs/heads/master | env/lib/python2.7/site-packages/dominate/util.py | 10 | '''
Utility classes for creating dynamic html documents
'''
__license__ = '''
This file is part of Dominate.
Dominate is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Dominate is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General
Public License along with Dominate. If not, see
<http://www.gnu.org/licenses/>.
'''
import re
from .dom_tag import dom_tag
try:
basestring = basestring
except NameError:
basestring = str
unichr = chr
def include(f):
'''
includes the contents of a file on disk.
takes a filename
'''
fl = open(f, 'r')
data = fl.read()
fl.close()
return raw(data)
def system(cmd, data=None):
'''
pipes the output of a program
'''
import subprocess
s = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
out, err = s.communicate(data)
return out.decode('utf8')
def escape(data, quote=True): # stoled from std lib cgi
'''
Escapes special characters into their html entities
Replace special characters "&", "<" and ">" to HTML-safe sequences.
If the optional flag quote is true, the quotation mark character (")
is also translated.
This is used to escape content that appears in the body of an HTML cocument
'''
data = data.replace("&", "&") # Must be done first!
data = data.replace("<", "<")
data = data.replace(">", ">")
if quote:
data = data.replace('"', """)
return data
_unescape = {
'quot': 34,
'amp': 38,
'lt': 60,
'gt': 62,
'nbsp': 32,
# more here
# http://www.w3.org/TR/html4/sgml/entities.html
'yuml': 255,
}
def unescape(data):
'''
unescapes html entities. the opposite of escape.
'''
cc = re.compile('&(?:(?:#(\d+))|([^;]+));')
result = []
m = cc.search(data)
while m:
result.append(data[0:m.start()])
d = m.group(1)
if d:
d = int(d)
result.append(unichr(d))
else:
d = _unescape.get(m.group(2), ord('?'))
result.append(unichr(d))
data = data[m.end():]
m = cc.search(data)
result.append(data)
return ''.join(result)
_reserved = ";/?:@&=+$, "
_replace_map = dict((c, '%%%2X' % ord(c)) for c in _reserved)
def url_escape(data):
return ''.join(_replace_map.get(c, c) for c in data)
def url_unescape(data):
return re.sub('%([0-9a-fA-F]{2})',
lambda m: unichr(int(m.group(1), 16)), data)
class lazy(dom_tag):
'''
delays function execution until rendered
'''
def __new__(_cls, *args, **kwargs):
'''
Need to reset this special method or else
dom_tag will think it's being used as a dectorator.
This means lazy() can't be used as a dectorator, but
thinking about when you might want that just confuses me.
'''
return object.__new__(_cls)
def __init__(self, func, *args, **kwargs):
super(lazy, self).__init__()
self.func = func
self.args = args
self.kwargs = kwargs
def _render(self, rendered, indent=1, inline=False):
r = self.func(*self.args, **self.kwargs)
rendered.append(str(r))
# TODO rename this to raw?
class text(dom_tag):
'''
Just a string. useful for inside context managers
'''
is_pretty = False
def __init__(self, _text, escape=True):
super(text, self).__init__()
if escape:
self.text = globals()['escape'](_text)
else:
self.text = _text
def _render(self, rendered, indent, inline):
rendered.append(self.text)
return rendered
def raw(s):
'''
Inserts a raw string into the DOM. Unsafe.
'''
return text(s, escape=False)
|
xuxiao19910803/edx-platform | refs/heads/master | common/djangoapps/heartbeat/urls.py | 154 | from django.conf.urls import url, patterns
urlpatterns = patterns(
'',
url(r'^$', 'heartbeat.views.heartbeat', name='heartbeat'),
)
|
Donkyhotay/MoonPy | refs/heads/master | zope/documenttemplate/dt_insv.py | 1 | ##############################################################################
#
# Copyright (c) 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Sequence variables support
$Id: dt_insv.py 38178 2005-08-30 21:50:19Z mj $
"""
from math import sqrt
from types import IntType, TupleType
mv = None # Missing value
class sequence_variables:
def __init__(self, items=None, query_string='', start_name_re=None):
self.items = items
self.query_string = query_string
self.start_name_re = start_name_re
self.data = data = {
'previous-sequence': 0,
'next-sequence': 0,
'sequence-start': 1,
'sequence-end': 0,
}
def __len__(self):
return 1
def number(self, index):
return index+1
def even(self, index):
return index%2 == 0
def odd(self, index):
return index%2
def letter(self, index):
return chr(ord('a') + index)
def Letter(self, index):
return chr(ord('A') + index)
def key(self,index):
return self.items[index][0]
def item(self,index, tt = TupleType):
i = self.items[index]
if isinstance(i, tt) and len(i)==2:
return i[1]
return i
def roman(self, index):
return self.Roman(index).lower()
def Roman(self,num):
# Force number to be an integer value
num = int(num) + 1
# Initialize roman as an empty string
roman = ''
while num >= 1000:
num = num - 1000
roman = '%sM' % roman
while num >= 500:
num = num - 500
roman = '%sD' % roman
while num >= 100:
num = num - 100
roman = '%sC' % roman
while num >= 50:
num = num - 50
roman = '%sL' % roman
while num >= 10:
num = num - 10
roman = '%sX' % roman
while num >= 5:
num = num - 5
roman = '%sV' % roman
while num < 5 and num >= 1:
num = num - 1
roman = '%sI' % roman
# Replaces special cases in Roman Numerals
roman = sub('DCCCC', 'CM', roman)
roman = sub('CCCC', 'CD', roman)
roman = sub('LXXXX', 'XC', roman)
roman = sub('XXXX', 'XL', roman)
roman = sub('VIIII', 'IX', roman)
roman = sub('IIII', 'IV', roman)
return roman
def value(self, index, name):
data = self.data
item = self.items[index]
if isinstance(item, TupleType) and len(item)==2:
item = item[1]
if data['mapping']:
return item[name]
return getattr(item, name)
def first(self, name, key=''):
data = self.data
if data['sequence-start']:
return 1
index = data['sequence-index']
return self.value(index, name) != self.value(index-1, name)
def last(self, name, key=''):
data = self.data
if data['sequence-end']:
return 1
index = data['sequence-index']
return self.value(index, name) != self.value(index+1, name)
def length(self, ignored):
l=self.data['sequence-length'] = len(self.items)
return l
def query(self, *ignored):
if self.start_name_re is None:
raise KeyError('sequence-query')
query_string = self.query_string
while query_string and query_string[:1] in '?&':
query_string = query_string[1:]
while query_string[-1:] == '&':
query_string = query_string[:-1]
if query_string:
query_string = '&%s&' % query_string
re = self.start_name_re
l = re.search_group(query_string, (0,))
if l:
v = l[1]
l = l[0]
query_string = (query_string[:l] +
query_string[l + len(v) - 1:])
query_string = '?' + query_string[1:]
else:
query_string = '?'
self.data['sequence-query'] = query_string
return query_string
statistic_names = (
'total', 'count', 'min', 'max', 'median', 'mean',
'variance', 'variance-n','standard-deviation', 'standard-deviation-n',
)
def statistics(self, name, key):
items = self.items
data = self.data
mapping = data['mapping']
count = sum = sumsq = 0
min = max = None
scount = smin = smax = None
values = []
svalues = []
for item in items:
try:
if mapping:
item = item[name]
else:
item = getattr(item, name)
try:
if item is mv:
item = None
if isinstance(item, IntType):
s = item * long(item)
else:
s = item * item
sum = sum + item
sumsq = sumsq + s
values.append(item)
if min is None:
min = max = item
else:
if item < min:
min = item
if item > max:
max = item
except:
if item is not None and item is not mv:
if smin is None:
smin = smax = item
else:
if item < smin:
smin = item
if item > smax:
smax = item
svalues.append(item)
except: pass
# Initialize all stats to empty strings:
for stat in self.statistic_names:
data['%s-%s' % (stat,name)] = ''
count = len(values)
try: # Numeric statistics
n = float(count)
mean = sum/n
sumsq = sumsq/n - mean*mean
data['mean-%s' % name] = mean
data['total-%s' % name] = sum
data['variance-n-%s' % name] = sumsq
data['standard-deviation-n-%s' % name] = sqrt(sumsq)
if count > 1:
sumsq = sumsq * n/(n-1)
data['variance-%s' % name] = sumsq
data['standard-deviation-%s' % name] = sqrt(sumsq)
else:
data['variance-%s' % name] = ''
data['standard-deviation-%s' % name] = ''
except:
if min is None: min, max, values = smin, smax, svalues
else:
if smin < min:
min = smin
if smax > max:
max = smax
values = values + svalues
count = len(values)
data['count-%s' % name] = count
# data['_values']=values
if min is not None:
data['min-%s' % name] = min
data['max-%s' % name] = max
values.sort()
if count == 1:
data['median-%s' % name] = min
else:
n=count+1
if n/2 * 2 == n:
data['median-%s' % name] = values[n/2 - 1]
else:
n = n/2
try:
data['median-%s' % name] = (values[n]+values[n-1])/2
except:
try:
data['median-%s' % name] = (
"between %s and %s" % (values[n], values[n-1]))
except: pass
return data[key]
def next_batches(self, suffix='batches', key=''):
if suffix != 'batches':
raise KeyError(key)
data = self.data
sequence = self.items
try:
if not data['next-sequence']:
return ()
sz = data['sequence-step-size']
start = data['sequence-step-start']
end = data['sequence-step-end']
l = len(sequence)
orphan = data['sequence-step-orphan']
overlap = data['sequence-step-overlap']
except:
AttributeError, 'next-batches'
r = []
while end < l:
start, end, spam = opt(end+1-overlap, 0, sz, orphan, sequence)
v = sequence_variables(self.items,
self.query_string, self.start_name_re)
d = v.data
d['batch-start-index'] = start-1
d['batch-end-index'] = end-1
d['batch-size'] = end+1-start
d['mapping'] = data['mapping']
r.append(v)
data['next-batches'] = r
return r
def previous_batches(self, suffix='batches', key=''):
if suffix != 'batches':
raise KeyError(key)
data = self.data
sequence = self.items
try:
if not data['previous-sequence']:
return ()
sz = data['sequence-step-size']
start = data['sequence-step-start']
end = data['sequence-step-end']
l = len(sequence)
orphan = data['sequence-step-orphan']
overlap = data['sequence-step-overlap']
except:
AttributeError, 'previous-batches'
r = []
while start > 1:
start, end, spam = opt(0, start-1+overlap, sz, orphan, sequence)
v = sequence_variables(self.items,
self.query_string, self.start_name_re)
d = v.data
d['batch-start-index'] = start-1
d['batch-end-index'] = end-1
d['batch-size'] = end+1-start
d['mapping'] = data['mapping']
r.append(v)
r.reverse()
data['previous-batches'] = r
return r
special_prefixes = {
'first': first,
'last': last,
'previous': previous_batches,
'next': next_batches,
# These two are for backward compatability with a missfeature:
'sequence-index': lambda self, suffix, key: self['sequence-'+suffix],
'sequence-index-is': lambda self, suffix, key: self['sequence-'+suffix],
}
for n in statistic_names:
special_prefixes[n] = statistics
def __getitem__(self,key,
special_prefixes=special_prefixes,
special_prefix=special_prefixes.has_key
):
data = self.data
if data.has_key(key):
return data[key]
l = key.rfind('-')
if l < 0:
raise KeyError(key)
suffix = key[l+1:]
prefix = key[:l]
if hasattr(self, suffix):
try:
v = data[prefix+'-index']
except:
pass
else:
return getattr(self, suffix)(v)
if special_prefix(prefix):
return special_prefixes[prefix](self, suffix, key)
if prefix[-4:] == '-var':
prefix = prefix[:-4]
try:
return self.value(data[prefix+'-index'], suffix)
except:
pass
if key == 'sequence-query':
return self.query()
raise KeyError(key)
def sub(s1, s2, src):
return s2.join(src.split(s1))
def opt(start, end, size, orphan, sequence):
if size < 1:
if start > 0 and end > 0 and end >= start:
size = end+1 - start
else: size = 7
if start > 0:
try:
sequence[start-1]
except:
start = len(sequence)
if end > 0:
if end < start:
end = start
else:
end = start + size-1
try:
sequence[end+orphan-1]
except:
end = len(sequence)
elif end > 0:
try:
sequence[end-1]
except:
end=len(sequence)
start = end+1 - size
if start - 1 < orphan:
start = 1
else:
start = 1
end = start + size-1
try:
sequence[end+orphan-1]
except:
end = len(sequence)
return start, end, size
|
bigdatauniversity/edx-platform | refs/heads/master | lms/djangoapps/courseware/tests/test_discussion_module.py | 62 | # -*- coding: utf-8 -*-
"""Test for Discussion Xmodule functional logic."""
import ddt
from mock import Mock
from . import BaseTestXmodule
from courseware.module_render import get_module_for_descriptor_internal
from xmodule.discussion_module import DiscussionModule
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from student.tests.factories import UserFactory
@ddt.ddt
class DiscussionModuleTest(BaseTestXmodule):
"""Logic tests for Discussion Xmodule."""
CATEGORY = "discussion"
def test_html_with_user(self):
discussion = get_module_for_descriptor_internal(
user=self.users[0],
descriptor=self.item_descriptor,
student_data=Mock(name='student_data'),
course_id=self.course.id,
track_function=Mock(name='track_function'),
xqueue_callback_url_prefix=Mock(name='xqueue_callback_url_prefix'),
request_token='request_token',
)
fragment = discussion.render('student_view')
html = fragment.content
self.assertIn('data-user-create-comment="false"', html)
self.assertIn('data-user-create-subcomment="false"', html)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_discussion_render_successfully_with_orphan_parent(self, default_store):
"""
Test that discussion module render successfully
if discussion module is child of an orphan.
"""
user = UserFactory.create()
store = modulestore()
with store.default_store(default_store):
course = store.create_course('testX', 'orphan', '123X', user.id)
orphan_sequential = store.create_item(self.user.id, course.id, 'sequential')
vertical = store.create_child(
user.id,
orphan_sequential.location,
'vertical',
block_id=course.location.block_id
)
discussion = store.create_child(
user.id,
vertical.location,
'discussion',
block_id=course.location.block_id
)
discussion = store.get_item(discussion.location)
root = self.get_root(discussion)
# Assert that orphan sequential is root of the discussion module.
self.assertEqual(orphan_sequential.location.block_type, root.location.block_type)
self.assertEqual(orphan_sequential.location.block_id, root.location.block_id)
# Get module system bound to a user and a descriptor.
discussion_module = get_module_for_descriptor_internal(
user=user,
descriptor=discussion,
student_data=Mock(name='student_data'),
course_id=course.id,
track_function=Mock(name='track_function'),
xqueue_callback_url_prefix=Mock(name='xqueue_callback_url_prefix'),
request_token='request_token',
)
fragment = discussion_module.render('student_view')
html = fragment.content
self.assertIsInstance(discussion_module._xmodule, DiscussionModule) # pylint: disable=protected-access
self.assertIn('data-user-create-comment="false"', html)
self.assertIn('data-user-create-subcomment="false"', html)
def get_root(self, block):
"""
Return root of the block.
"""
while block.parent:
block = block.get_parent()
return block
|
nathanial/lettuce | refs/heads/master | tests/integration/lib/Django-1.2.5/django/contrib/gis/geometry/backend/__init__.py | 388 | from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
geom_backend = getattr(settings, 'GEOMETRY_BACKEND', 'geos')
try:
module = import_module('.%s' % geom_backend, 'django.contrib.gis.geometry.backend')
except ImportError, e:
try:
module = import_module(geom_backend)
except ImportError, e_user:
raise ImproperlyConfigured('Could not import user-defined GEOMETRY_BACKEND '
'"%s".' % geom_backend)
try:
Geometry = module.Geometry
GeometryException = module.GeometryException
except AttributeError:
raise ImproperlyConfigured('Cannot import Geometry from the "%s" '
'geometry backend.' % geom_backend)
|
Echelon85/volatility | refs/heads/master | volatility/plugins/procdump.py | 44 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
# Copyright (c) 2008 Brendan Dolan-Gavitt <bdolangavitt@wesleyan.edu>
#
# Additional Authors:
# Mike Auty <mike.auty@gmail.com>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
import os
import struct
import volatility.plugins.taskmods as taskmods
import volatility.debug as debug
import volatility.obj as obj
import volatility.exceptions as exceptions
class ProcExeDump(taskmods.DllList):
"""Dump a process to an executable file sample"""
def __init__(self, config, *args, **kwargs):
taskmods.DllList.__init__(self, config, *args, **kwargs)
config.add_option('DUMP-DIR', short_option = 'D', default = None,
cache_invalidator = False,
help = 'Directory in which to dump executable files')
config.add_option("UNSAFE", short_option = "u", default = False, action = 'store_true',
help = 'Bypasses certain sanity checks when creating image')
def dump_pe(self, space, base, dump_file):
"""
Dump a PE from an AS into a file.
@param space: an AS to use
@param base: PE base address
@param dump_file: dumped file name
@returns a string status message
"""
of = open(os.path.join(self._config.DUMP_DIR, dump_file), 'wb')
try:
for offset, code in self.get_image(space, base):
of.seek(offset)
of.write(code)
result = "OK: {0}".format(dump_file)
except ValueError, ve:
result = "Error: {0}".format(ve)
except exceptions.SanityCheckException, ve:
result = "Error: {0} Try -u/--unsafe".format(ve)
finally:
of.close()
return result
def render_text(self, outfd, data):
"""Renders the tasks to disk images, outputting progress as they go"""
if self._config.DUMP_DIR == None:
debug.error("Please specify a dump directory (--dump-dir)")
if not os.path.isdir(self._config.DUMP_DIR):
debug.error(self._config.DUMP_DIR + " is not a directory")
self.table_header(outfd,
[("Process(V)", "[addrpad]"),
("ImageBase", "[addrpad]"),
("Name", "20"),
("Result", "")])
for task in data:
task_space = task.get_process_address_space()
if task_space == None:
result = "Error: Cannot acquire process AS"
elif task.Peb == None:
# we must use m() here, because any other attempt to
# reference task.Peb will try to instantiate the _PEB
result = "Error: PEB at {0:#x} is paged".format(task.m('Peb'))
elif task_space.vtop(task.Peb.ImageBaseAddress) == None:
result = "Error: ImageBaseAddress at {0:#x} is paged".format(task.Peb.ImageBaseAddress)
else:
dump_file = "executable." + str(task.UniqueProcessId) + ".exe"
result = self.dump_pe(task_space,
task.Peb.ImageBaseAddress,
dump_file)
self.table_row(outfd,
task.obj_offset,
task.Peb.ImageBaseAddress,
task.ImageFileName,
result)
def round(self, addr, align, up = False):
"""Rounds down an address based on an alignment"""
if addr % align == 0:
return addr
else:
if up:
return (addr + (align - (addr % align)))
return (addr - (addr % align))
def get_nt_header(self, addr_space, base_addr):
"""Returns the NT Header object for a task"""
dos_header = obj.Object("_IMAGE_DOS_HEADER", offset = base_addr,
vm = addr_space)
return dos_header.get_nt_header()
def get_code(self, addr_space, data_start, data_size, offset):
"""Returns a single section of re-created data from a file image"""
first_block = 0x1000 - data_start % 0x1000
full_blocks = ((data_size + (data_start % 0x1000)) / 0x1000) - 1
left_over = (data_size + data_start) % 0x1000
paddr = addr_space.vtop(data_start)
code = ""
# Deal with reads that are smaller than a block
if data_size < first_block:
data_read = addr_space.zread(data_start, data_size)
if paddr == None:
if self._config.verbose:
debug.debug("Memory Not Accessible: Virtual Address: 0x{0:x} File Offset: 0x{1:x} Size: 0x{2:x}\n".format(data_start, offset, data_size))
code += data_read
return (offset, code)
data_read = addr_space.zread(data_start, first_block)
if paddr == None:
if self._config.verbose:
debug.debug("Memory Not Accessible: Virtual Address: 0x{0:x} File Offset: 0x{1:x} Size: 0x{2:x}\n".format(data_start, offset, first_block))
code += data_read
# The middle part of the read
new_vaddr = data_start + first_block
for _i in range(0, full_blocks):
data_read = addr_space.zread(new_vaddr, 0x1000)
if addr_space.vtop(new_vaddr) == None:
if self._config.verbose:
debug.debug("Memory Not Accessible: Virtual Address: 0x{0:x} File Offset: 0x{1:x} Size: 0x{2:x}\n".format(new_vaddr, offset, 0x1000))
code += data_read
new_vaddr = new_vaddr + 0x1000
# The last part of the read
if left_over > 0:
data_read = addr_space.zread(new_vaddr, left_over)
if addr_space.vtop(new_vaddr) == None:
if self._config.verbose:
debug.debug("Memory Not Accessible: Virtual Address: 0x{0:x} File Offset: 0x{1:x} Size: 0x{2:x}\n".format(new_vaddr, offset, left_over))
code += data_read
return (offset, code)
def get_image(self, addr_space, base_addr):
"""Outputs an executable disk image of a process"""
nt_header = self.get_nt_header(addr_space = addr_space,
base_addr = base_addr)
soh = nt_header.OptionalHeader.SizeOfHeaders
header = addr_space.zread(base_addr, soh)
yield (0, header)
fa = nt_header.OptionalHeader.FileAlignment
for sect in nt_header.get_sections(self._config.UNSAFE):
foa = self.round(sect.PointerToRawData, fa)
if foa != sect.PointerToRawData:
debug.warning("Section start on disk not aligned to file alignment.\n")
debug.warning("Adjusted section start from {0} to {1}.\n".format(sect.PointerToRawData, foa))
yield self.get_code(addr_space,
sect.VirtualAddress + base_addr,
sect.SizeOfRawData, foa)
class ProcMemDump(ProcExeDump):
"""Dump a process to an executable memory sample"""
def replace_header_field(self, sect, header, item, value):
"""Replaces a field in a sector header"""
field_size = item.size()
start = item.obj_offset - sect.obj_offset
end = start + field_size
newval = struct.pack(item.format_string, int(value))
result = header[:start] + newval + header[end:]
return result
def get_image(self, addr_space, base_addr):
"""Outputs an executable memory image of a process"""
nt_header = self.get_nt_header(addr_space, base_addr)
sa = nt_header.OptionalHeader.SectionAlignment
shs = addr_space.profile.get_obj_size('_IMAGE_SECTION_HEADER')
yield self.get_code(addr_space, base_addr, nt_header.OptionalHeader.SizeOfImage, 0)
prevsect = None
sect_sizes = []
for sect in nt_header.get_sections(self._config.UNSAFE):
if prevsect is not None:
sect_sizes.append(sect.VirtualAddress - prevsect.VirtualAddress)
prevsect = sect
if prevsect is not None:
sect_sizes.append(self.round(prevsect.Misc.VirtualSize, sa, up = True))
counter = 0
start_addr = nt_header.FileHeader.SizeOfOptionalHeader + (nt_header.OptionalHeader.obj_offset - base_addr)
for sect in nt_header.get_sections(self._config.UNSAFE):
sectheader = addr_space.read(sect.obj_offset, shs)
# Change the PointerToRawData
sectheader = self.replace_header_field(sect, sectheader, sect.PointerToRawData, sect.VirtualAddress)
sectheader = self.replace_header_field(sect, sectheader, sect.SizeOfRawData, sect_sizes[counter])
sectheader = self.replace_header_field(sect, sectheader, sect.Misc.VirtualSize, sect_sizes[counter])
yield (start_addr + (counter * shs), sectheader)
counter += 1
|
ihsanudin/odoo | refs/heads/8.0 | addons/account/wizard/account_open_closed_fiscalyear.py | 237 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_open_closed_fiscalyear(osv.osv_memory):
_name = "account.open.closed.fiscalyear"
_description = "Choose Fiscal Year"
_columns = {
'fyear_id': fields.many2one('account.fiscalyear', \
'Fiscal Year', required=True, help='Select Fiscal Year which you want to remove entries for its End of year entries journal'),
}
def remove_entries(self, cr, uid, ids, context=None):
move_obj = self.pool.get('account.move')
data = self.browse(cr, uid, ids, context=context)[0]
period_journal = data.fyear_id.end_journal_period_id or False
if not period_journal:
raise osv.except_osv(_('Error!'), _("You have to set the 'End of Year Entries Journal' for this Fiscal Year which is set after generating opening entries from 'Generate Opening Entries'."))
if period_journal.period_id.state == 'done':
raise osv.except_osv(_('Error!'), _("You can not cancel closing entries if the 'End of Year Entries Journal' period is closed."))
ids_move = move_obj.search(cr, uid, [('journal_id','=',period_journal.journal_id.id),('period_id','=',period_journal.period_id.id)])
if ids_move:
cr.execute('delete from account_move where id IN %s', (tuple(ids_move),))
self.invalidate_cache(cr, uid, context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
40123148/2015cdbg11_0420 | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/logging/config.py | 739 | # Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Configuration functions for the logging package for Python. The core package
is based on PEP 282 and comments thereto in comp.lang.python, and influenced
by Apache's log4j system.
Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, logging, logging.handlers, socket, struct, traceback, re
import io
try:
import _thread as thread
import threading
except ImportError: #pragma: no cover
thread = None
from socketserver import ThreadingTCPServer, StreamRequestHandler
DEFAULT_LOGGING_CONFIG_PORT = 9030
if sys.platform == "win32":
RESET_ERROR = 10054 #WSAECONNRESET
else:
RESET_ERROR = 104 #ECONNRESET
#
# The following code implements a socket listener for on-the-fly
# reconfiguration of logging.
#
# _listener holds the server object doing the listening
_listener = None
def fileConfig(fname, defaults=None, disable_existing_loggers=True):
"""
Read the logging configuration from a ConfigParser-format file.
This can be called several times from an application, allowing an end user
the ability to select from various pre-canned configurations (if the
developer provides a mechanism to present the choices and load the chosen
configuration).
"""
import configparser
cp = configparser.ConfigParser(defaults)
if hasattr(fname, 'readline'):
cp.read_file(fname)
else:
cp.read(fname)
formatters = _create_formatters(cp)
# critical section
logging._acquireLock()
try:
logging._handlers.clear()
del logging._handlerList[:]
# Handlers add themselves to logging._handlers
handlers = _install_handlers(cp, formatters)
_install_loggers(cp, handlers, disable_existing_loggers)
finally:
logging._releaseLock()
def _resolve(name):
"""Resolve a dotted name to a global object."""
name = name.split('.')
used = name.pop(0)
found = __import__(used)
for n in name:
used = used + '.' + n
try:
found = getattr(found, n)
except AttributeError:
__import__(used)
found = getattr(found, n)
return found
def _strip_spaces(alist):
return map(lambda x: x.strip(), alist)
def _create_formatters(cp):
"""Create and return formatters"""
flist = cp["formatters"]["keys"]
if not len(flist):
return {}
flist = flist.split(",")
flist = _strip_spaces(flist)
formatters = {}
for form in flist:
sectname = "formatter_%s" % form
fs = cp.get(sectname, "format", raw=True, fallback=None)
dfs = cp.get(sectname, "datefmt", raw=True, fallback=None)
c = logging.Formatter
class_name = cp[sectname].get("class")
if class_name:
c = _resolve(class_name)
f = c(fs, dfs)
formatters[form] = f
return formatters
def _install_handlers(cp, formatters):
"""Install and return handlers"""
hlist = cp["handlers"]["keys"]
if not len(hlist):
return {}
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
handlers = {}
fixups = [] #for inter-handler references
for hand in hlist:
section = cp["handler_%s" % hand]
klass = section["class"]
fmt = section.get("formatter", "")
try:
klass = eval(klass, vars(logging))
except (AttributeError, NameError):
klass = _resolve(klass)
args = section["args"]
args = eval(args, vars(logging))
h = klass(*args)
if "level" in section:
level = section["level"]
h.setLevel(logging._levelNames[level])
if len(fmt):
h.setFormatter(formatters[fmt])
if issubclass(klass, logging.handlers.MemoryHandler):
target = section.get("target", "")
if len(target): #the target handler may not be loaded yet, so keep for later...
fixups.append((h, target))
handlers[hand] = h
#now all handlers are loaded, fixup inter-handler references...
for h, t in fixups:
h.setTarget(handlers[t])
return handlers
def _handle_existing_loggers(existing, child_loggers, disable_existing):
"""
When (re)configuring logging, handle loggers which were in the previous
configuration but are not in the new configuration. There's no point
deleting them as other threads may continue to hold references to them;
and by disabling them, you stop them doing any logging.
However, don't disable children of named loggers, as that's probably not
what was intended by the user. Also, allow existing loggers to NOT be
disabled if disable_existing is false.
"""
root = logging.root
for log in existing:
logger = root.manager.loggerDict[log]
if log in child_loggers:
logger.level = logging.NOTSET
logger.handlers = []
logger.propagate = True
else:
logger.disabled = disable_existing
def _install_loggers(cp, handlers, disable_existing):
"""Create and install loggers"""
# configure the root first
llist = cp["loggers"]["keys"]
llist = llist.split(",")
llist = list(map(lambda x: x.strip(), llist))
llist.remove("root")
section = cp["logger_root"]
root = logging.root
log = root
if "level" in section:
level = section["level"]
log.setLevel(logging._levelNames[level])
for h in root.handlers[:]:
root.removeHandler(h)
hlist = section["handlers"]
if len(hlist):
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
for hand in hlist:
log.addHandler(handlers[hand])
#and now the others...
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
existing = list(root.manager.loggerDict.keys())
#The list needs to be sorted so that we can
#avoid disabling child loggers of explicitly
#named loggers. With a sorted list it is easier
#to find the child loggers.
existing.sort()
#We'll keep the list of existing loggers
#which are children of named loggers here...
child_loggers = []
#now set up the new ones...
for log in llist:
section = cp["logger_%s" % log]
qn = section["qualname"]
propagate = section.getint("propagate", fallback=1)
logger = logging.getLogger(qn)
if qn in existing:
i = existing.index(qn) + 1 # start with the entry after qn
prefixed = qn + "."
pflen = len(prefixed)
num_existing = len(existing)
while i < num_existing:
if existing[i][:pflen] == prefixed:
child_loggers.append(existing[i])
i += 1
existing.remove(qn)
if "level" in section:
level = section["level"]
logger.setLevel(logging._levelNames[level])
for h in logger.handlers[:]:
logger.removeHandler(h)
logger.propagate = propagate
logger.disabled = 0
hlist = section["handlers"]
if len(hlist):
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
for hand in hlist:
logger.addHandler(handlers[hand])
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
#However, don't disable children of named loggers, as that's
#probably not what was intended by the user.
#for log in existing:
# logger = root.manager.loggerDict[log]
# if log in child_loggers:
# logger.level = logging.NOTSET
# logger.handlers = []
# logger.propagate = 1
# elif disable_existing_loggers:
# logger.disabled = 1
_handle_existing_loggers(existing, child_loggers, disable_existing)
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
def valid_ident(s):
m = IDENTIFIER.match(s)
if not m:
raise ValueError('Not a valid Python identifier: %r' % s)
return True
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
# equivalents, whereas strings which match a conversion format are converted
# appropriately.
#
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
class ConvertingDict(dict):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def get(self, key, default=None):
value = dict.get(self, key, default)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, key, default=None):
value = dict.pop(self, key, default)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class ConvertingList(list):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, idx=-1):
value = list.pop(self, idx)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
return result
class ConvertingTuple(tuple):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class BaseConfigurator(object):
"""
The configurator base class which defines some useful defaults.
"""
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
'ext' : 'ext_convert',
'cfg' : 'cfg_convert',
}
# We might want to use a different one, e.g. importlib
importer = staticmethod(__import__)
def __init__(self, config):
self.config = ConvertingDict(config)
self.config.configurator = self
def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
def ext_convert(self, value):
"""Default converter for the ext:// protocol."""
return self.resolve(value)
def cfg_convert(self, value):
"""Default converter for the cfg:// protocol."""
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
#print d, rest
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx) # try as number first (most likely)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError('Unable to convert '
'%r at %r' % (value, rest))
#rest should be empty
return d
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, str): # str for py3k
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
def configure_custom(self, config):
"""Configure an object with a user-supplied factory."""
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
result = c(**kwargs)
if props:
for name, value in props.items():
setattr(result, name, value)
return result
def as_tuple(self, value):
"""Utility function which converts lists to tuples."""
if isinstance(value, list):
value = tuple(value)
return value
class DictConfigurator(BaseConfigurator):
"""
Configure logging using a dictionary-like object to describe the
configuration.
"""
def configure(self):
"""Do the configuration."""
config = self.config
if 'version' not in config:
raise ValueError("dictionary doesn't specify a version")
if config['version'] != 1:
raise ValueError("Unsupported version: %s" % config['version'])
incremental = config.pop('incremental', False)
EMPTY_DICT = {}
logging._acquireLock()
try:
if incremental:
handlers = config.get('handlers', EMPTY_DICT)
for name in handlers:
if name not in logging._handlers:
raise ValueError('No handler found with '
'name %r' % name)
else:
try:
handler = logging._handlers[name]
handler_config = handlers[name]
level = handler_config.get('level', None)
if level:
handler.setLevel(logging._checkLevel(level))
except Exception as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
try:
self.configure_logger(name, loggers[name], True)
except Exception as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
root = config.get('root', None)
if root:
try:
self.configure_root(root, True)
except Exception as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
else:
disable_existing = config.pop('disable_existing_loggers', True)
logging._handlers.clear()
del logging._handlerList[:]
# Do formatters first - they don't refer to anything else
formatters = config.get('formatters', EMPTY_DICT)
for name in formatters:
try:
formatters[name] = self.configure_formatter(
formatters[name])
except Exception as e:
raise ValueError('Unable to configure '
'formatter %r: %s' % (name, e))
# Next, do filters - they don't refer to anything else, either
filters = config.get('filters', EMPTY_DICT)
for name in filters:
try:
filters[name] = self.configure_filter(filters[name])
except Exception as e:
raise ValueError('Unable to configure '
'filter %r: %s' % (name, e))
# Next, do handlers - they refer to formatters and filters
# As handlers can refer to other handlers, sort the keys
# to allow a deterministic order of configuration
handlers = config.get('handlers', EMPTY_DICT)
deferred = []
for name in sorted(handlers):
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except Exception as e:
if 'target not configured yet' in str(e):
deferred.append(name)
else:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Now do any that were deferred
for name in deferred:
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except Exception as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Next, do loggers - they refer to handlers and filters
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
root = logging.root
existing = list(root.manager.loggerDict.keys())
#The list needs to be sorted so that we can
#avoid disabling child loggers of explicitly
#named loggers. With a sorted list it is easier
#to find the child loggers.
existing.sort()
#We'll keep the list of existing loggers
#which are children of named loggers here...
child_loggers = []
#now set up the new ones...
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
if name in existing:
i = existing.index(name) + 1 # look after name
prefixed = name + "."
pflen = len(prefixed)
num_existing = len(existing)
while i < num_existing:
if existing[i][:pflen] == prefixed:
child_loggers.append(existing[i])
i += 1
existing.remove(name)
try:
self.configure_logger(name, loggers[name])
except Exception as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
#However, don't disable children of named loggers, as that's
#probably not what was intended by the user.
#for log in existing:
# logger = root.manager.loggerDict[log]
# if log in child_loggers:
# logger.level = logging.NOTSET
# logger.handlers = []
# logger.propagate = True
# elif disable_existing:
# logger.disabled = True
_handle_existing_loggers(existing, child_loggers,
disable_existing)
# And finally, do the root logger
root = config.get('root', None)
if root:
try:
self.configure_root(root)
except Exception as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
finally:
logging._releaseLock()
def configure_formatter(self, config):
"""Configure a formatter from a dictionary."""
if '()' in config:
factory = config['()'] # for use in exception handler
try:
result = self.configure_custom(config)
except TypeError as te:
if "'format'" not in str(te):
raise
#Name of parameter changed from fmt to format.
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
config['fmt'] = config.pop('format')
config['()'] = factory
result = self.configure_custom(config)
else:
fmt = config.get('format', None)
dfmt = config.get('datefmt', None)
style = config.get('style', '%')
result = logging.Formatter(fmt, dfmt, style)
return result
def configure_filter(self, config):
"""Configure a filter from a dictionary."""
if '()' in config:
result = self.configure_custom(config)
else:
name = config.get('name', '')
result = logging.Filter(name)
return result
def add_filters(self, filterer, filters):
"""Add filters to a filterer from a list of names."""
for f in filters:
try:
filterer.addFilter(self.config['filters'][f])
except Exception as e:
raise ValueError('Unable to add filter %r: %s' % (f, e))
def configure_handler(self, config):
"""Configure a handler from a dictionary."""
config_copy = dict(config) # for restoring in case of error
formatter = config.pop('formatter', None)
if formatter:
try:
formatter = self.config['formatters'][formatter]
except Exception as e:
raise ValueError('Unable to set formatter '
'%r: %s' % (formatter, e))
level = config.pop('level', None)
filters = config.pop('filters', None)
if '()' in config:
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
factory = c
else:
cname = config.pop('class')
klass = self.resolve(cname)
#Special case for handler which refers to another handler
if issubclass(klass, logging.handlers.MemoryHandler) and\
'target' in config:
try:
th = self.config['handlers'][config['target']]
if not isinstance(th, logging.Handler):
config.update(config_copy) # restore for deferred cfg
raise TypeError('target not configured yet')
config['target'] = th
except Exception as e:
raise ValueError('Unable to set target handler '
'%r: %s' % (config['target'], e))
elif issubclass(klass, logging.handlers.SMTPHandler) and\
'mailhost' in config:
config['mailhost'] = self.as_tuple(config['mailhost'])
elif issubclass(klass, logging.handlers.SysLogHandler) and\
'address' in config:
config['address'] = self.as_tuple(config['address'])
factory = klass
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
try:
result = factory(**kwargs)
except TypeError as te:
if "'stream'" not in str(te):
raise
#The argument name changed from strm to stream
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
kwargs['strm'] = kwargs.pop('stream')
result = factory(**kwargs)
if formatter:
result.setFormatter(formatter)
if level is not None:
result.setLevel(logging._checkLevel(level))
if filters:
self.add_filters(result, filters)
return result
def add_handlers(self, logger, handlers):
"""Add handlers to a logger from a list of names."""
for h in handlers:
try:
logger.addHandler(self.config['handlers'][h])
except Exception as e:
raise ValueError('Unable to add handler %r: %s' % (h, e))
def common_logger_config(self, logger, config, incremental=False):
"""
Perform configuration which is common to root and non-root loggers.
"""
level = config.get('level', None)
if level is not None:
logger.setLevel(logging._checkLevel(level))
if not incremental:
#Remove any existing handlers
for h in logger.handlers[:]:
logger.removeHandler(h)
handlers = config.get('handlers', None)
if handlers:
self.add_handlers(logger, handlers)
filters = config.get('filters', None)
if filters:
self.add_filters(logger, filters)
def configure_logger(self, name, config, incremental=False):
"""Configure a non-root logger from a dictionary."""
logger = logging.getLogger(name)
self.common_logger_config(logger, config, incremental)
propagate = config.get('propagate', None)
if propagate is not None:
logger.propagate = propagate
def configure_root(self, config, incremental=False):
"""Configure a root logger from a dictionary."""
root = logging.getLogger()
self.common_logger_config(root, config, incremental)
dictConfigClass = DictConfigurator
def dictConfig(config):
"""Configure logging using a dictionary."""
dictConfigClass(config).configure()
def listen(port=DEFAULT_LOGGING_CONFIG_PORT):
"""
Start up a socket server on the specified port, and listen for new
configurations.
These will be sent as a file suitable for processing by fileConfig().
Returns a Thread object on which you can call start() to start the server,
and which you can join() when appropriate. To stop the server, call
stopListening().
"""
if not thread: #pragma: no cover
raise NotImplementedError("listen() needs threading to work")
class ConfigStreamHandler(StreamRequestHandler):
"""
Handler for a logging configuration request.
It expects a completely new logging configuration and uses fileConfig
to install it.
"""
def handle(self):
"""
Handle a request.
Each request is expected to be a 4-byte length, packed using
struct.pack(">L", n), followed by the config file.
Uses fileConfig() to do the grunt work.
"""
try:
conn = self.connection
chunk = conn.recv(4)
if len(chunk) == 4:
slen = struct.unpack(">L", chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
chunk = chunk.decode("utf-8")
try:
import json
d =json.loads(chunk)
assert isinstance(d, dict)
dictConfig(d)
except:
#Apply new configuration.
file = io.StringIO(chunk)
try:
fileConfig(file)
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
traceback.print_exc()
if self.server.ready:
self.server.ready.set()
except socket.error as e:
if not isinstance(e.args, tuple):
raise
else:
errcode = e.args[0]
if errcode != RESET_ERROR:
raise
class ConfigSocketReceiver(ThreadingTCPServer):
"""
A simple TCP socket-based logging config receiver.
"""
allow_reuse_address = 1
def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT,
handler=None, ready=None):
ThreadingTCPServer.__init__(self, (host, port), handler)
logging._acquireLock()
self.abort = 0
logging._releaseLock()
self.timeout = 1
self.ready = ready
def serve_until_stopped(self):
import select
abort = 0
while not abort:
rd, wr, ex = select.select([self.socket.fileno()],
[], [],
self.timeout)
if rd:
self.handle_request()
logging._acquireLock()
abort = self.abort
logging._releaseLock()
self.socket.close()
class Server(threading.Thread):
def __init__(self, rcvr, hdlr, port):
super(Server, self).__init__()
self.rcvr = rcvr
self.hdlr = hdlr
self.port = port
self.ready = threading.Event()
def run(self):
server = self.rcvr(port=self.port, handler=self.hdlr,
ready=self.ready)
if self.port == 0:
self.port = server.server_address[1]
self.ready.set()
global _listener
logging._acquireLock()
_listener = server
logging._releaseLock()
server.serve_until_stopped()
return Server(ConfigSocketReceiver, ConfigStreamHandler, port)
def stopListening():
"""
Stop the listening server which was created with a call to listen().
"""
global _listener
logging._acquireLock()
try:
if _listener:
_listener.abort = 1
_listener = None
finally:
logging._releaseLock()
|
marmyshev/bug_1117098 | refs/heads/master | openlp/core/__init__.py | 2 | # -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The :mod:`core` module provides all core application functions
All the core functions of the OpenLP application including the GUI, settings,
logging and a plugin framework are contained within the openlp.core module.
"""
import os
import sys
import platform
import logging
from optparse import OptionParser
from traceback import format_exception
from PyQt4 import QtCore, QtGui
from openlp.core.lib import Settings, ScreenList, UiStrings, Registry, check_directory_exists
from openlp.core.resources import qInitResources
from openlp.core.ui.mainwindow import MainWindow
from openlp.core.ui.firsttimelanguageform import FirstTimeLanguageForm
from openlp.core.ui.firsttimeform import FirstTimeForm
from openlp.core.ui.exceptionform import ExceptionForm
from openlp.core.ui import SplashScreen
from openlp.core.utils import AppLocation, LanguageManager, VersionThread, get_application_version
__all__ = ['OpenLP', 'main']
log = logging.getLogger()
NT_REPAIR_STYLESHEET = """
QMainWindow::separator
{
border: none;
}
QDockWidget::title
{
border: 1px solid palette(dark);
padding-left: 5px;
padding-top: 2px;
margin: 1px 0;
}
QToolBar
{
border: none;
margin: 0;
padding: 0;
}
"""
class OpenLP(QtGui.QApplication):
"""
The core application class. This class inherits from Qt's QApplication
class in order to provide the core of the application.
"""
args = []
def exec_(self):
"""
Override exec method to allow the shared memory to be released on exit
"""
self.is_event_loop_active = True
result = QtGui.QApplication.exec_()
self.shared_memory.detach()
return result
def run(self, args):
"""
Run the OpenLP application.
"""
self.is_event_loop_active = False
# On Windows, the args passed into the constructor are ignored. Not very handy, so set the ones we want to use.
# On Linux and FreeBSD, in order to set the WM_CLASS property for X11, we pass "OpenLP" in as a command line
# argument. This interferes with files being passed in as command line arguments, so we remove it from the list.
if 'OpenLP' in args:
args.remove('OpenLP')
self.args.extend(args)
# Decide how many screens we have and their size
screens = ScreenList.create(self.desktop())
# First time checks in settings
has_run_wizard = Settings().value('core/has run wizard')
if not has_run_wizard:
if FirstTimeForm(screens).exec_() == QtGui.QDialog.Accepted:
Settings().setValue('core/has run wizard', True)
# Correct stylesheet bugs
application_stylesheet = ''
if not Settings().value('advanced/alternate rows'):
base_color = self.palette().color(QtGui.QPalette.Active, QtGui.QPalette.Base)
alternate_rows_repair_stylesheet = \
'QTableWidget, QListWidget, QTreeWidget {alternate-background-color: ' + base_color.name() + ';}\n'
application_stylesheet += alternate_rows_repair_stylesheet
if os.name == 'nt':
application_stylesheet += NT_REPAIR_STYLESHEET
if application_stylesheet:
self.setStyleSheet(application_stylesheet)
show_splash = Settings().value('core/show splash')
if show_splash:
self.splash = SplashScreen()
self.splash.show()
# make sure Qt really display the splash screen
self.processEvents()
# start the main app window
self.main_window = MainWindow()
Registry().execute('bootstrap_initialise')
Registry().execute('bootstrap_post_set_up')
self.main_window.show()
if show_splash:
# now kill the splashscreen
self.splash.finish(self.main_window)
log.debug('Splashscreen closed')
# make sure Qt really display the splash screen
self.processEvents()
self.main_window.repaint()
self.processEvents()
if not has_run_wizard:
self.main_window.first_time()
update_check = Settings().value('core/update check')
if update_check:
VersionThread(self.main_window).start()
self.main_window.is_display_blank()
self.main_window.app_startup()
return self.exec_()
def is_already_running(self):
"""
Look to see if OpenLP is already running and ask if a 2nd instance is to be started.
"""
self.shared_memory = QtCore.QSharedMemory('OpenLP')
if self.shared_memory.attach():
status = QtGui.QMessageBox.critical(None, UiStrings().Error, UiStrings().OpenLPStart,
QtGui.QMessageBox.StandardButtons(QtGui.QMessageBox.Yes | QtGui.QMessageBox.No))
if status == QtGui.QMessageBox.No:
return True
return False
else:
self.shared_memory.create(1)
return False
def hook_exception(self, exctype, value, traceback):
"""
Add an exception hook so that any uncaught exceptions are displayed in this window rather than somewhere where
users cannot see it and cannot report when we encounter these problems.
``exctype``
The class of exception.
``value``
The actual exception object.
``traceback``
A traceback object with the details of where the exception occurred.
"""
log.exception(''.join(format_exception(exctype, value, traceback)))
if not hasattr(self, 'exception_form'):
self.exception_form = ExceptionForm()
self.exception_form.exception_text_edit.setPlainText(''.join(format_exception(exctype, value, traceback)))
self.set_normal_cursor()
self.exception_form.exec_()
def process_events(self):
"""
Wrapper to make ProcessEvents visible and named correctly
"""
log.debug('processing event flush')
self.processEvents()
def set_busy_cursor(self):
"""
Sets the Busy Cursor for the Application
"""
self.setOverrideCursor(QtCore.Qt.BusyCursor)
self.processEvents()
def set_normal_cursor(self):
"""
Sets the Normal Cursor for the Application
"""
self.restoreOverrideCursor()
self.processEvents()
def event(self, event):
"""
Enables direct file opening on OS X
"""
if event.type() == QtCore.QEvent.FileOpen:
file_name = event.file()
log.debug('Got open file event for %s!', file_name)
self.args.insert(0, file_name)
return True
else:
return QtGui.QApplication.event(self, event)
def set_up_logging(log_path):
"""
Setup our logging using log_path
"""
check_directory_exists(log_path, True)
filename = os.path.join(log_path, 'openlp.log')
logfile = logging.FileHandler(filename, 'w')
logfile.setFormatter(logging.Formatter('%(asctime)s %(name)-55s %(levelname)-8s %(message)s'))
log.addHandler(logfile)
if log.isEnabledFor(logging.DEBUG):
print('Logging to: %s' % filename)
def main(args=None):
"""
The main function which parses command line options and then runs
the PyQt4 Application.
"""
# Set up command line options.
usage = 'Usage: %prog [options] [qt-options]'
parser = OptionParser(usage=usage)
parser.add_option('-e', '--no-error-form', dest='no_error_form', action='store_true',
help='Disable the error notification form.')
parser.add_option('-l', '--log-level', dest='loglevel', default='warning', metavar='LEVEL',
help='Set logging to LEVEL level. Valid values are "debug", "info", "warning".')
parser.add_option('-p', '--portable', dest='portable', action='store_true',
help='Specify if this should be run as a portable app, off a USB flash drive (not implemented).')
parser.add_option('-d', '--dev-version', dest='dev_version', action='store_true',
help='Ignore the version file and pull the version directly from Bazaar')
parser.add_option('-s', '--style', dest='style', help='Set the Qt4 style (passed directly to Qt4).')
# Parse command line options and deal with them.
# Use args supplied programatically if possible.
(options, args) = parser.parse_args(args) if args else parser.parse_args()
qt_args = []
if options.loglevel.lower() in ['d', 'debug']:
log.setLevel(logging.DEBUG)
elif options.loglevel.lower() in ['w', 'warning']:
log.setLevel(logging.WARNING)
else:
log.setLevel(logging.INFO)
if options.style:
qt_args.extend(['-style', options.style])
# Throw the rest of the arguments at Qt, just in case.
qt_args.extend(args)
# Bug #1018855: Set the WM_CLASS property in X11
if platform.system() not in ['Windows', 'Darwin']:
qt_args.append('OpenLP')
# Initialise the resources
qInitResources()
# Now create and actually run the application.
application = OpenLP(qt_args)
application.setOrganizationName('OpenLP')
application.setOrganizationDomain('openlp.org')
if options.portable:
application.setApplicationName('OpenLPPortable')
Settings.setDefaultFormat(Settings.IniFormat)
# Get location OpenLPPortable.ini
application_path = AppLocation.get_directory(AppLocation.AppDir)
set_up_logging(os.path.abspath(os.path.join(application_path, '..', '..', 'Other')))
log.info('Running portable')
portable_settings_file = os.path.abspath(os.path.join(application_path, '..', '..', 'Data', 'OpenLP.ini'))
# Make this our settings file
log.info('INI file: %s', portable_settings_file)
Settings.set_filename(portable_settings_file)
portable_settings = Settings()
# Set our data path
data_path = os.path.abspath(os.path.join(application_path, '..', '..', 'Data',))
log.info('Data path: %s', data_path)
# Point to our data path
portable_settings.setValue('advanced/data path', data_path)
portable_settings.setValue('advanced/is portable', True)
portable_settings.sync()
else:
application.setApplicationName('OpenLP')
set_up_logging(AppLocation.get_directory(AppLocation.CacheDir))
Registry.create()
Registry().register('application', application)
application.setApplicationVersion(get_application_version()['version'])
# Instance check
if application.is_already_running():
sys.exit()
# Remove/convert obsolete settings.
Settings().remove_obsolete_settings()
# First time checks in settings
if not Settings().value('core/has run wizard'):
if not FirstTimeLanguageForm().exec_():
# if cancel then stop processing
sys.exit()
# i18n Set Language
language = LanguageManager.get_language()
application_translator, default_translator = LanguageManager.get_translator(language)
if not application_translator.isEmpty():
application.installTranslator(application_translator)
if not default_translator.isEmpty():
application.installTranslator(default_translator)
else:
log.debug('Could not find default_translator.')
if not options.no_error_form:
sys.excepthook = application.hook_exception
sys.exit(application.run(qt_args))
|
abzaloid/maps | refs/heads/master | django-project/lib/python2.7/site-packages/django/conf/locale/fr/formats.py | 116 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j N Y'
SHORT_DATETIME_FORMAT = 'j N Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
'%d.%m.%Y', '%d.%m.%y', # Swiss (fr_CH), '25.10.2006', '25.10.06'
# '%d %B %Y', '%d %b %Y', # '25 octobre 2006', '25 oct. 2006'
)
DATETIME_INPUT_FORMATS = (
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d.%m.%Y %H:%M:%S', # Swiss (fr_CH), '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # Swiss (fr_CH), '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # Swiss (fr_CH), '25.10.2006 14:30'
'%d.%m.%Y', # Swiss (fr_CH), '25.10.2006'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
|
xshotD/pyglet | refs/heads/master | contrib/layout/layout/gl/device.py | 29 | #!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from ctypes import *
import re
from pyglet.gl import *
import pyglet.font
from layout.base import *
from layout.frame import *
from layout.locator import *
from pyglet import image
class GLRenderDevice(RenderDevice):
_stock_font_names = {
'serif': 'Bitstream Vera Serif',
'sans-serif': 'Bitstream Vera Sans',
'monospace': 'Bitstream Vera Sans Mono',
'fantasy': 'Bistream Vera Serif',
'cursive': 'Bistream Vera Serif',
}
def __init__(self, locator=LocalFileLocator):
self.locator = locator
self.texture_cache = {}
def get_font(self, names, size, style, weight):
names = names[:]
for i, name in enumerate(names):
if isinstance(name, Ident) and name in self._stock_font_names:
names[i] = self._stock_font_names[name]
italic = style == 'italic'
bold = weight >= 700
assert type(size) == Dimension and size.unit == 'pt'
return pyglet.font.load(names, size, italic=italic, bold=bold)
def create_text_frame(self, style, element, text):
return GLTextFrame(style, element, text)
def draw_solid_border(self, x1, y1, x2, y2, x3, y3, x4, y4,
color, style):
'''Draw one side of a border, which is not 'dotted' or 'dashed'.
'''
glColor4f(*color)
glBegin(GL_QUADS)
glVertex2f(x1, y1)
glVertex2f(x2, y2)
glVertex2f(x3, y3)
glVertex2f(x4, y4)
glEnd()
def draw_vertical_border(self, x1, y1, x2, y2, x3, y3, x4, y4,
color, style):
'''Draw one vertical edge of a border.
Order of vertices is inner-top, inner-bottom, outer-bottom, outer-top
'''
if style in ('dotted', 'dashed'):
width = max(abs(x1 - x4), 1)
height = y1 - y2
if style == 'dotted':
period = width
else:
period = width * 3
cycles = int(height / period)
padding = (height - cycles * period) / 2
vertices = [
# Top cap
x1, y1, x1, y1 - padding, x4, y1 - padding, x4, y4,
# Bottom cap
x2, y2, x2, y2 + padding, x3, y2 + padding, x3, y3]
y = y1 - padding
phase = cycles % 2
if phase == 0:
y -= period / 2
for i in range(cycles):
if i % 2 == phase:
vertices += [x1, y, x1, y - period, x3, y - period, x3, y]
y -= period
self.vertices = (c_float * len(vertices))(*vertices)
glColor4f(*color)
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT)
glEnableClientState(GL_VERTEX_ARRAY)
glVertexPointer(2, GL_FLOAT, 0, self.vertices)
glDrawArrays(GL_QUADS, 0, len(self.vertices)/2)
glPopClientAttrib()
else:
self.draw_solid_border(x1, y1, x2, y2, x3, y3, x4, y4, color, style)
def draw_horizontal_border(self, x1, y1, x2, y2, x3, y3, x4, y4,
color, style):
'''Draw one horizontal edge of a border.
Order of vertices is inner-left, inner-right, outer-right, outer-left.
'''
if style in ('dotted', 'dashed'):
height = max(abs(y1 - y4), 1)
width = x2 - x1
if style == 'dotted':
period = height
else:
period = height * 3
cycles = int(width / period)
padding = (width - cycles * period) / 2
vertices = [
# Left cap
x1, y1, x1 + padding, y1, x1 + padding, y4, x4, y4,
# Right cap
x2, y2, x2 - padding, y2, x2 - padding, y3, x3, y3]
x = x1 + padding
phase = cycles % 2
if phase == 0:
x += period / 2
for i in range(cycles):
if i % 2 == phase:
vertices += [x, y1, x + period, y1, x + period, y3, x, y3]
x += period
self.vertices = (c_float * len(vertices))(*vertices)
glColor4f(*color)
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT)
glEnableClientState(GL_VERTEX_ARRAY)
glVertexPointer(2, GL_FLOAT, 0, self.vertices)
glDrawArrays(GL_QUADS, 0, len(self.vertices)/2)
glPopClientAttrib()
else:
self.draw_solid_border(x1, y1, x2, y2, x3, y3, x4, y4, color, style)
def draw_background(self, x1, y1, x2, y2, frame):
compute = frame.get_computed_property
background_color = compute('background-color')
if background_color != 'transparent':
glPushAttrib(GL_CURRENT_BIT)
glColor4f(*background_color)
glBegin(GL_QUADS)
glVertex2f(x1, y1)
glVertex2f(x1, y2)
glVertex2f(x2, y2)
glVertex2f(x2, y1)
glEnd()
glPopAttrib()
background_image = compute('background-image')
if background_image != 'none':
repeat = compute('background-repeat')
# TODO tileable texture in cache vs non-tileable, vice-versa
if background_image not in self.texture_cache:
self.texture_cache[background_image] = None
stream = self.locator.get_stream(background_image)
if stream:
img = image.load('', file=stream)
if repeat != 'no-repeat':
texture = image.TileableTexture.create_for_image(img)
else:
texture = img.texture
self.texture_cache[background_image] = texture
texture = self.texture_cache[background_image]
if texture:
glPushAttrib(GL_ENABLE_BIT | GL_CURRENT_BIT)
glColor3f(1, 1, 1)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
if isinstance(texture, image.TileableTexture):
width, height = texture.width, texture.height
if repeat in ('repeat', 'repeat-x'):
width = x2 - x1
if repeat in ('repeat', 'repeat-y'):
height = y1 - y2
texture.blit_tiled(x1, y2, 0, width, height)
else:
texture.blit(x1, y2, 0)
glPopAttrib()
class GLTextFrame(TextFrame):
glyph_string = None
from_index = 0
to_index = None
content_ascent = 0
def __init__(self, style, element, text):
super(GLTextFrame, self).__init__(style, element, text)
def lstrip(self):
text = self.text[self.from_index:self.to_index]
old_index = self.from_index
self.from_index += len(text) - len(text.lstrip())
if old_index != self.from_index:
self.border_edge_width -= self.glyph_string.get_subwidth(
old_index, self.from_index)
contains_ws = re.compile(u'[\n\u0020\u200b]')
def purge_style_cache(self, properties):
super(GLTextFrame, self).purge_style_cache(properties)
if ('font-name' in properties or
'font-size' in properties or
'font-weight' in properties or
'font-style' in properties):
self.glyph_string = None
def flow_inline(self, context):
context = context.copy()
# Reset after last flow
self.from_index = 0
self.strip_next = False
self.continuation = None
self.close_border = True
# Final white-space processing step (besides line beginning strip)
# from 16.6.1 step 4.
if context.strip_next and \
self.get_computed_property('white-space') in \
('normal', 'nowrap', 'pre-line'):
self.from_index = len(self.text) - len(self.text.lstrip())
# Get GL glyph sequence if not already cached
font = self.get_computed_property('--font')
if not self.glyph_string:
self.glyph_string = pyglet.font.GlyphString(
self.text, font.get_glyphs(self.text))
computed = self.get_computed_property
def used(property):
value = computed(property)
if type(value) == Percentage:
value = value * self.containing_block.width
return value
# Calculate computed and used values of box properties when
# relative to containing block width.
# margin top/bottom remain at class default 0
content_right = computed('border-right-width') + used('padding-right')
content_bottom = computed('border-bottom-width') + \
used('padding-bottom')
self.content_top = computed('border-top-width') + used('padding-top')
self.margin_right = used('margin-right')
self.margin_left = used('margin-left')
self.content_left = computed('border-left-width') + used('padding-left')
# Calculate text metrics (actually not dependent on flow, could
# optimise out).
self.content_ascent = font.ascent
self.content_descent = font.descent
line_height = self.get_computed_property('line-height')
if line_height != 'normal':
half_leading = (line_height - \
(self.content_ascent - self.content_descent)) / 2
else:
half_leading = 0
self.line_ascent = self.content_ascent + half_leading
self.line_descent = self.content_descent - half_leading
self.border_edge_height = self.content_ascent - self.content_descent +\
self.content_top + content_bottom
self.border_edge_width = self.content_left
self.baseline = self.content_ascent + self.content_top
context.add(self.margin_left + self.content_left)
context.reserve(content_right + self.margin_right)
# Break into continuations
frame = self
while True:
frame.to_index = self.glyph_string.get_break_index(
frame.from_index,
context.remaining_width - context.reserved_width)
if frame.to_index == frame.from_index:
ws = self.contains_ws.search(self.text[frame.from_index:])
if ws:
frame.to_index = frame.from_index + ws.start() + 1
else:
frame.to_index = len(self.text)
text_width = self.glyph_string.get_subwidth(
frame.from_index, frame.to_index)
frame.border_edge_width += text_width
if frame.to_index < len(self.text):
continuation = GLTextFrame(
self.style, self.element, self.text)
continuation.parent = self.parent
continuation.glyph_string = self.glyph_string
continuation.open_border = False
continuation.from_index = continuation.to_index = frame.to_index
continuation.border_edge_height = self.border_edge_height
continuation.border_edge_width = 0
continuation.margin_right = self.margin_right
continuation.line_ascent = self.line_ascent
continuation.line_descent = self.line_descent
continuation.content_ascent = self.content_ascent
continuation.content_descent = self.content_descent
continuation.baseline = self.baseline
# Remove right-margin from continued frame
frame.margin_right = 0
if not context.can_add(text_width, True):
context.newline()
context.add(text_width)
context.breakpoint()
frame.soft_break = True
# Force line break
if frame.to_index and self.text[frame.to_index-1] == '\n':
frame.to_index -= 1
frame.line_break = True
context.newline()
# Ready for next iteration
frame.continuation = continuation
frame.close_border = False
frame = continuation
context.newline()
if frame.to_index >= len(self.text):
break
frame.strip_next = self.text[-1] == ' '
frame.soft_break = self.text[-1] == ' '
frame.border_edge_width += content_right
self.flow_dirty = False
def draw_text(self, x, y, render_context):
glPushAttrib(GL_CURRENT_BIT | GL_ENABLE_BIT)
glEnable(GL_TEXTURE_2D)
glColor4f(*self.get_computed_property('color'))
glPushMatrix()
glTranslatef(x, y, 0)
self.glyph_string.draw(self.from_index, self.to_index)
glPopMatrix()
glPopAttrib()
def __repr__(self):
return '%s(%r)' % \
(self.__class__.__name__, self.text[self.from_index:self.to_index])
|
rajanandakumar/DIRAC | refs/heads/integration | DataManagementSystem/Client/DataManager.py | 1 | """
:mod: DataManager
.. module: DataManager
:synopsis: DataManager links the functionalities of StorageElement and FileCatalog.
This module consists of DataManager and related classes.
"""
# # RSCID
__RCSID__ = "$Id$"
# # imports
from datetime import datetime, timedelta
import fnmatch
import os
import time
from types import StringTypes, ListType, DictType, StringType, TupleType
# # from DIRAC
import DIRAC
from DIRAC import S_OK, S_ERROR, gLogger, gConfig
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.AccountingSystem.Client.DataStoreClient import gDataStoreClient
from DIRAC.AccountingSystem.Client.Types.DataOperation import DataOperation
from DIRAC.Core.Utilities.Adler import fileAdler, compareAdler
from DIRAC.Core.Utilities.File import makeGuid, getSize
from DIRAC.Core.Utilities.List import randomize
from DIRAC.Core.Utilities.SiteSEMapping import getSEsForSite, isSameSiteSE, getSEsForCountry
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Resources.Storage.StorageFactory import StorageFactory
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
class DataManager( object ):
"""
.. class:: DataManager
A DataManager is taking all the actions that impact or require the FileCatalog and the StorageElement together
"""
def __init__( self, catalogs = [], masterCatalogOnly = False, vo = False ):
""" c'tor
:param self: self reference
:param catalogs: the list of catalog in which to perform the operations. This
list will be ignored if masterCatalogOnly is set to True
:param masterCatalogOnly: if set to True, the operations will be performed only on the master catalog.
The catalogs parameter will be ignored.
:param vo: the VO for which the DataManager is created, get VO from the current proxy if not specified
"""
self.log = gLogger.getSubLogger( self.__class__.__name__, True )
self.vo = vo
catalogsToUse = FileCatalog( vo = self.vo ).getMasterCatalogNames()['Value'] if masterCatalogOnly else catalogs
self.fc = FileCatalog( catalogs = catalogsToUse, vo = self.vo )
self.accountingClient = None
self.registrationProtocol = ['SRM2', 'DIP']
self.thirdPartyProtocols = ['SRM2', 'DIP']
self.resourceStatus = ResourceStatus()
self.ignoreMissingInFC = Operations( self.vo ).getValue( 'DataManagement/IgnoreMissingInFC', False )
self.useCatalogPFN = Operations( self.vo ).getValue( 'DataManagement/UseCatalogPFN', True )
def setAccountingClient( self, client ):
""" Set Accounting Client instance
"""
self.accountingClient = client
def __verifyOperationWritePermission( self, path ):
""" Check if we have write permission to the given directory
"""
if type( path ) in StringTypes:
paths = [ path ]
else:
paths = path
res = self.fc.getPathPermissions( paths )
if not res['OK']:
return res
for path in paths:
if not res['Value']['Successful'].get( path, {} ).get( 'Write', False ):
return S_OK( False )
return S_OK( True )
##########################################################################
#
# These are the bulk removal methods
#
def cleanLogicalDirectory( self, lfnDir ):
""" Clean the logical directory from the catalog and storage
"""
if type( lfnDir ) in StringTypes:
lfnDir = [ lfnDir ]
retDict = { "Successful" : {}, "Failed" : {} }
for folder in lfnDir:
res = self.__cleanDirectory( folder )
if not res['OK']:
self.log.debug( "Failed to clean directory.", "%s %s" % ( folder, res['Message'] ) )
retDict["Failed"][folder] = res['Message']
else:
self.log.debug( "Successfully removed directory.", folder )
retDict["Successful"][folder] = res['Value']
return S_OK( retDict )
def __cleanDirectory( self, folder ):
""" delete all files from directory :folder: in FileCatalog and StorageElement
:param self: self reference
:param str folder: directory name
"""
res = self.__verifyOperationWritePermission( folder )
if not res['OK']:
return res
if not res['Value']:
errStr = "__cleanDirectory: Write access not permitted for this credential."
self.log.debug( errStr, folder )
return S_ERROR( errStr )
res = self.__getCatalogDirectoryContents( [ folder ] )
if not res['OK']:
return res
res = self.removeFile( res['Value'].keys() + [ '%s/dirac_directory' % folder ] )
if not res['OK']:
return res
for lfn, reason in res['Value']['Failed'].items():
gLogger.error( "Failed to remove file found in the catalog", "%s %s" % ( lfn, reason ) )
storageElements = gConfig.getValue( 'Resources/StorageElementGroups/SE_Cleaning_List', [] )
failed = False
for storageElement in sorted( storageElements ):
res = self.__removeStorageDirectory( folder, storageElement )
if not res['OK']:
failed = True
if failed:
return S_ERROR( "Failed to clean storage directory at all SEs" )
res = returnSingleResult( self.fc.removeDirectory( folder, recursive = True ) )
if not res['OK']:
return res
return S_OK()
def __removeStorageDirectory( self, directory, storageElement ):
""" delete SE directory
:param self: self reference
:param str directory: folder to be removed
:param str storageElement: DIRAC SE name
"""
se = StorageElement( storageElement )
res = returnSingleResult( se.exists( directory ) )
if not res['OK']:
self.log.debug( "Failed to obtain existance of directory", res['Message'] )
return res
exists = res['Value']
if not exists:
self.log.debug( "The directory %s does not exist at %s " % ( directory, storageElement ) )
return S_OK()
res = returnSingleResult( se.removeDirectory( directory, recursive = True ) )
if not res['OK']:
self.log.debug( "Failed to remove storage directory", res['Message'] )
return res
self.log.debug( "Successfully removed %d files from %s at %s" % ( res['Value']['FilesRemoved'],
directory,
storageElement ) )
return S_OK()
def __getCatalogDirectoryContents( self, directories ):
""" ls recursively all files in directories
:param self: self reference
:param list directories: folder names
"""
self.log.debug( 'Obtaining the catalog contents for %d directories:' % len( directories ) )
activeDirs = directories
allFiles = {}
while len( activeDirs ) > 0:
currentDir = activeDirs[0]
res = returnSingleResult( self.fc.listDirectory( currentDir ) )
activeDirs.remove( currentDir )
if not res['OK']:
self.log.debug( "Problem getting the %s directory content" % currentDir, res['Message'] )
else:
dirContents = res['Value']
activeDirs.extend( dirContents['SubDirs'] )
allFiles.update( dirContents['Files'] )
self.log.debug( "Found %d files" % len( allFiles ) )
return S_OK( allFiles )
def getReplicasFromDirectory( self, directory ):
""" get all replicas from a given directory
:param self: self reference
:param mixed directory: list of directories or one directory
"""
if type( directory ) in StringTypes:
directories = [directory]
else:
directories = directory
res = self.__getCatalogDirectoryContents( directories )
if not res['OK']:
return res
allReplicas = {}
for lfn, metadata in res['Value'].items():
allReplicas[lfn] = metadata['Replicas']
return S_OK( allReplicas )
def getFilesFromDirectory( self, directory, days = 0, wildcard = '*' ):
""" get all files from :directory: older than :days: days matching to :wildcard:
:param self: self reference
:param mixed directory: list of directories or directory name
:param int days: ctime days
:param str wildcard: pattern to match
"""
if type( directory ) in StringTypes:
directories = [directory]
else:
directories = directory
self.log.debug( "Obtaining the files older than %d days in %d directories:" % ( days, len( directories ) ) )
for folder in directories:
self.log.debug( folder )
activeDirs = directories
allFiles = []
while len( activeDirs ) > 0:
currentDir = activeDirs[0]
# We only need the metadata (verbose) if a limit date is given
res = returnSingleResult( self.fc.listDirectory( currentDir, verbose = ( days != 0 ) ) )
activeDirs.remove( currentDir )
if not res['OK']:
self.log.debug( "Error retrieving directory contents", "%s %s" % ( currentDir, res['Message'] ) )
else:
dirContents = res['Value']
subdirs = dirContents['SubDirs']
files = dirContents['Files']
self.log.debug( "%s: %d files, %d sub-directories" % ( currentDir, len( files ), len( subdirs ) ) )
for subdir in subdirs:
if ( not days ) or self.__isOlderThan( subdirs[subdir]['CreationDate'], days ):
if subdir[0] != '/':
subdir = currentDir + '/' + subdir
activeDirs.append( subdir )
for fileName in files:
fileInfo = files[fileName]
fileInfo = fileInfo.get( 'Metadata', fileInfo )
if ( not days ) or not fileInfo.get( 'CreationDate' ) or self.__isOlderThan( fileInfo['CreationDate'], days ):
if wildcard == '*' or fnmatch.fnmatch( fileName, wildcard ):
fileName = fileInfo.get( 'LFN', fileName )
allFiles.append( fileName )
return S_OK( allFiles )
def __isOlderThan( self, stringTime, days ):
timeDelta = timedelta( days = days )
maxCTime = datetime.utcnow() - timeDelta
# st = time.strptime( stringTime, "%a %b %d %H:%M:%S %Y" )
# cTimeStruct = datetime( st[0], st[1], st[2], st[3], st[4], st[5], st[6], None )
cTimeStruct = stringTime
if cTimeStruct < maxCTime:
return True
return False
##########################################################################
#
# These are the data transfer methods
#
def getFile( self, lfn, destinationDir = '' ):
""" Get a local copy of a LFN from Storage Elements.
'lfn' is the logical file name for the desired file
"""
if type( lfn ) == ListType:
lfns = lfn
elif type( lfn ) == StringType:
lfns = [lfn]
else:
errStr = "getFile: Supplied lfn must be string or list of strings."
self.log.debug( errStr )
return S_ERROR( errStr )
self.log.debug( "getFile: Attempting to get %s files." % len( lfns ) )
res = self.getActiveReplicas( lfns )
if not res['OK']:
return res
failed = res['Value']['Failed']
lfnReplicas = res['Value']['Successful']
res = self.fc.getFileMetadata( lfnReplicas.keys() )
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
fileMetadata = res['Value']['Successful']
successful = {}
for lfn in fileMetadata:
res = self.__getFile( lfn, lfnReplicas[lfn], fileMetadata[lfn], destinationDir )
if not res['OK']:
failed[lfn] = res['Message']
else:
successful[lfn] = res['Value']
gDataStoreClient.commit()
return S_OK( { 'Successful': successful, 'Failed' : failed } )
def __getFile( self, lfn, replicas, metadata, destinationDir ):
if not replicas:
self.log.debug( "No accessible replicas found" )
return S_ERROR( "No accessible replicas found" )
# Determine the best replicas
res = self._getSEProximity( replicas.keys() )
if not res['OK']:
return res
for storageElementName in res['Value']:
se = StorageElement( storageElementName )
physicalFile = replicas[storageElementName]
oDataOperation = self.__initialiseAccountingObject( 'getFile', storageElementName, 1 )
oDataOperation.setStartTime()
startTime = time.time()
res = returnSingleResult( se.getFile( physicalFile, localPath = os.path.realpath( destinationDir ) ) )
getTime = time.time() - startTime
oDataOperation.setValueByKey( 'TransferTime', getTime )
if not res['OK']:
self.log.debug( "Failed to get %s from %s" % ( lfn, storageElementName ), res['Message'] )
oDataOperation.setValueByKey( 'TransferOK', 0 )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
oDataOperation.setEndTime()
else:
oDataOperation.setValueByKey( 'TransferSize', res['Value'] )
localFile = os.path.realpath( os.path.join( destinationDir, os.path.basename( lfn ) ) )
localAdler = fileAdler( localFile )
if ( metadata['Size'] != res['Value'] ):
oDataOperation.setValueByKey( 'FinalStatus', 'FinishedDirty' )
self.log.debug( "Size of downloaded file (%d) does not match catalog (%d)" % ( res['Value'],
metadata['Size'] ) )
elif ( metadata['Checksum'] ) and ( not compareAdler( metadata['Checksum'], localAdler ) ):
oDataOperation.setValueByKey( 'FinalStatus', 'FinishedDirty' )
self.log.debug( "Checksum of downloaded file (%s) does not match catalog (%s)" % ( localAdler,
metadata['Checksum'] ) )
else:
oDataOperation.setEndTime()
gDataStoreClient.addRegister( oDataOperation )
return S_OK( localFile )
gDataStoreClient.addRegister( oDataOperation )
self.log.debug( "getFile: Failed to get local copy from any replicas.", lfn )
return S_ERROR( "DataManager.getFile: Failed to get local copy from any replicas." )
def _getSEProximity( self, ses ):
""" get SE proximity """
siteName = DIRAC.siteName()
localSEs = [se for se in getSEsForSite( siteName )['Value'] if se in ses]
countrySEs = []
countryCode = str( siteName ).split( '.' )[-1]
res = getSEsForCountry( countryCode )
if res['OK']:
countrySEs = [se for se in res['Value'] if se in ses and se not in localSEs]
sortedSEs = randomize( localSEs ) + randomize( countrySEs )
sortedSEs += randomize( [se for se in ses if se not in sortedSEs] )
return S_OK( sortedSEs )
def putAndRegister( self, lfn, fileName, diracSE, guid = None, path = None, checksum = None ):
""" Put a local file to a Storage Element and register in the File Catalogues
'lfn' is the file LFN
'file' is the full path to the local file
'diracSE' is the Storage Element to which to put the file
'guid' is the guid with which the file is to be registered (if not provided will be generated)
'path' is the path on the storage where the file will be put (if not provided the LFN will be used)
"""
# ancestors = ancestors if ancestors else list()
res = self.__verifyOperationWritePermission( os.path.dirname( lfn ) )
if not res['OK']:
return res
if not res['Value']:
errStr = "putAndRegister: Write access not permitted for this credential."
self.log.debug( errStr, lfn )
return S_ERROR( errStr )
# Check that the local file exists
if not os.path.exists( fileName ):
errStr = "putAndRegister: Supplied file does not exist."
self.log.debug( errStr, fileName )
return S_ERROR( errStr )
# If the path is not provided then use the LFN path
if not path:
path = os.path.dirname( lfn )
# Obtain the size of the local file
size = getSize( fileName )
if size == 0:
errStr = "putAndRegister: Supplied file is zero size."
self.log.debug( errStr, fileName )
return S_ERROR( errStr )
# If the GUID is not given, generate it here
if not guid:
guid = makeGuid( fileName )
if not checksum:
self.log.debug( "putAndRegister: Checksum information not provided. Calculating adler32." )
checksum = fileAdler( fileName )
self.log.debug( "putAndRegister: Checksum calculated to be %s." % checksum )
res = self.fc.exists( {lfn:guid} )
if not res['OK']:
errStr = "putAndRegister: Completey failed to determine existence of destination LFN."
self.log.debug( errStr, lfn )
return res
if lfn not in res['Value']['Successful']:
errStr = "putAndRegister: Failed to determine existence of destination LFN."
self.log.debug( errStr, lfn )
return S_ERROR( errStr )
if res['Value']['Successful'][lfn]:
if res['Value']['Successful'][lfn] == lfn:
errStr = "putAndRegister: The supplied LFN already exists in the File Catalog."
self.log.debug( errStr, lfn )
else:
errStr = "putAndRegister: This file GUID already exists for another file. " \
"Please remove it and try again."
self.log.debug( errStr, res['Value']['Successful'][lfn] )
return S_ERROR( "%s %s" % ( errStr, res['Value']['Successful'][lfn] ) )
##########################################################
# Instantiate the destination storage element here.
storageElement = StorageElement( diracSE )
res = storageElement.isValid()
if not res['OK']:
errStr = "putAndRegister: The storage element is not currently valid."
self.log.debug( errStr, "%s %s" % ( diracSE, res['Message'] ) )
return S_ERROR( errStr )
destinationSE = storageElement.getStorageElementName()['Value']
res = returnSingleResult( storageElement.getPfnForLfn( lfn ) )
if not res['OK']:
errStr = "putAndRegister: Failed to generate destination PFN."
self.log.debug( errStr, res['Message'] )
return S_ERROR( errStr )
destPfn = res['Value']
fileDict = {destPfn:fileName}
successful = {}
failed = {}
##########################################################
# Perform the put here.
oDataOperation = self.__initialiseAccountingObject( 'putAndRegister', diracSE, 1 )
oDataOperation.setStartTime()
oDataOperation.setValueByKey( 'TransferSize', size )
startTime = time.time()
res = storageElement.putFile( fileDict, singleFile = True )
putTime = time.time() - startTime
oDataOperation.setValueByKey( 'TransferTime', putTime )
if not res['OK']:
errStr = "putAndRegister: Failed to put file to Storage Element."
oDataOperation.setValueByKey( 'TransferOK', 0 )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
oDataOperation.setEndTime()
gDataStoreClient.addRegister( oDataOperation )
startTime = time.time()
gDataStoreClient.commit()
self.log.debug( 'putAndRegister: Sending accounting took %.1f seconds' % ( time.time() - startTime ) )
self.log.debug( errStr, "%s: %s" % ( fileName, res['Message'] ) )
return S_ERROR( "%s %s" % ( errStr, res['Message'] ) )
successful[lfn] = {'put': putTime}
###########################################################
# Perform the registration here
oDataOperation.setValueByKey( 'RegistrationTotal', 1 )
fileTuple = ( lfn, destPfn, size, destinationSE, guid, checksum )
registerDict = {'LFN':lfn, 'PFN':destPfn, 'Size':size, 'TargetSE':destinationSE, 'GUID':guid, 'Addler':checksum}
startTime = time.time()
res = self.registerFile( fileTuple )
registerTime = time.time() - startTime
oDataOperation.setValueByKey( 'RegistrationTime', registerTime )
if not res['OK']:
errStr = "putAndRegister: Completely failed to register file."
self.log.debug( errStr, res['Message'] )
failed[lfn] = { 'register' : registerDict }
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
elif lfn in res['Value']['Failed']:
errStr = "putAndRegister: Failed to register file."
self.log.debug( errStr, "%s %s" % ( lfn, res['Value']['Failed'][lfn] ) )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
failed[lfn] = { 'register' : registerDict }
else:
successful[lfn]['register'] = registerTime
oDataOperation.setValueByKey( 'RegistrationOK', 1 )
oDataOperation.setEndTime()
gDataStoreClient.addRegister( oDataOperation )
startTime = time.time()
gDataStoreClient.commit()
self.log.debug( 'putAndRegister: Sending accounting took %.1f seconds' % ( time.time() - startTime ) )
return S_OK( {'Successful': successful, 'Failed': failed } )
def replicateAndRegister( self, lfn, destSE, sourceSE = '', destPath = '', localCache = '' , catalog = '' ):
""" Replicate a LFN to a destination SE and register the replica.
'lfn' is the LFN to be replicated
'destSE' is the Storage Element the file should be replicated to
'sourceSE' is the source for the file replication (where not specified all replicas will be attempted)
'destPath' is the path on the destination storage element, if to be different from LHCb convention
'localCache' is the local file system location to be used as a temporary cache
"""
successful = {}
failed = {}
self.log.debug( "replicateAndRegister: Attempting to replicate %s to %s." % ( lfn, destSE ) )
startReplication = time.time()
res = self.__replicate( lfn, destSE, sourceSE, destPath, localCache )
replicationTime = time.time() - startReplication
if not res['OK']:
errStr = "DataManager.replicateAndRegister: Completely failed to replicate file."
self.log.debug( errStr, res['Message'] )
return S_ERROR( errStr )
if not res['Value']:
# The file was already present at the destination SE
self.log.debug( "replicateAndRegister: %s already present at %s." % ( lfn, destSE ) )
successful[lfn] = { 'replicate' : 0, 'register' : 0 }
resDict = { 'Successful' : successful, 'Failed' : failed }
return S_OK( resDict )
successful[lfn] = { 'replicate' : replicationTime }
destPfn = res['Value']['DestPfn']
destSE = res['Value']['DestSE']
self.log.debug( "replicateAndRegister: Attempting to register %s at %s." % ( destPfn, destSE ) )
replicaTuple = ( lfn, destPfn, destSE )
startRegistration = time.time()
res = self.registerReplica( replicaTuple, catalog = catalog )
registrationTime = time.time() - startRegistration
if not res['OK']:
# Need to return to the client that the file was replicated but not registered
errStr = "replicateAndRegister: Completely failed to register replica."
self.log.debug( errStr, res['Message'] )
failed[lfn] = { 'Registration' : { 'LFN' : lfn, 'TargetSE' : destSE, 'PFN' : destPfn } }
else:
if lfn in res['Value']['Successful']:
self.log.debug( "replicateAndRegister: Successfully registered replica." )
successful[lfn]['register'] = registrationTime
else:
errStr = "replicateAndRegister: Failed to register replica."
self.log.debug( errStr, res['Value']['Failed'][lfn] )
failed[lfn] = { 'Registration' : { 'LFN' : lfn, 'TargetSE' : destSE, 'PFN' : destPfn } }
return S_OK( {'Successful': successful, 'Failed': failed} )
def replicate( self, lfn, destSE, sourceSE = '', destPath = '', localCache = '' ):
""" Replicate a LFN to a destination SE and register the replica.
'lfn' is the LFN to be replicated
'destSE' is the Storage Element the file should be replicated to
'sourceSE' is the source for the file replication (where not specified all replicas will be attempted)
'destPath' is the path on the destination storage element, if to be different from LHCb convention
'localCache' is the local file system location to be used as a temporary cache
"""
self.log.debug( "replicate: Attempting to replicate %s to %s." % ( lfn, destSE ) )
res = self.__replicate( lfn, destSE, sourceSE, destPath, localCache )
if not res['OK']:
errStr = "replicate: Replication failed."
self.log.debug( errStr, "%s %s" % ( lfn, destSE ) )
return res
if not res['Value']:
# The file was already present at the destination SE
self.log.debug( "replicate: %s already present at %s." % ( lfn, destSE ) )
return res
return S_OK( lfn )
def __replicate( self, lfn, destSE, sourceSE = '', destPath = '', localCache = '' ):
""" Replicate a LFN to a destination SE.
'lfn' is the LFN to be replicated
'destSE' is the Storage Element the file should be replicated to
'sourceSE' is the source for the file replication (where not specified all replicas will be attempted)
'destPath' is the path on the destination storage element, if to be different from LHCb convention
"""
###########################################################
# Check that we have write permissions to this directory.
res = self.__verifyOperationWritePermission( lfn )
if not res['OK']:
return res
if not res['Value']:
errStr = "__replicate: Write access not permitted for this credential."
self.log.debug( errStr, lfn )
return S_ERROR( errStr )
self.log.debug( "__replicate: Performing replication initialization." )
res = self.__initializeReplication( lfn, sourceSE, destSE )
if not res['OK']:
self.log.debug( "__replicate: Replication initialisation failed.", lfn )
return res
destStorageElement = res['Value']['DestStorage']
lfnReplicas = res['Value']['Replicas']
destSE = res['Value']['DestSE']
catalogueSize = res['Value']['CatalogueSize']
###########################################################
# If the LFN already exists at the destination we have nothing to do
if destSE in lfnReplicas:
self.log.debug( "__replicate: LFN is already registered at %s." % destSE )
return S_OK()
###########################################################
# Resolve the best source storage elements for replication
self.log.debug( "__replicate: Determining the best source replicas." )
res = self.__resolveBestReplicas( lfn, sourceSE, lfnReplicas, catalogueSize )
if not res['OK']:
self.log.debug( "__replicate: Best replica resolution failed.", lfn )
return res
replicaPreference = res['Value']
###########################################################
# Now perform the replication for the file
if destPath:
destPath = '%s/%s' % ( destPath, os.path.basename( lfn ) )
else:
destPath = lfn
res = returnSingleResult( destStorageElement.getPfnForLfn( destPath ) )
if not res['OK']:
errStr = "__replicate: Failed to generate destination PFN."
self.log.debug( errStr, res['Message'] )
return S_ERROR( errStr )
destPfn = res['Value']
# Find out if there is a replica already at the same site
localReplicas = []
otherReplicas = []
for sourceSE, sourcePfn in replicaPreference:
if sourcePfn == destPfn:
continue
res = isSameSiteSE( sourceSE, destSE )
if res['OK'] and res['Value']:
localReplicas.append( ( sourceSE, sourcePfn ) )
else:
otherReplicas.append( ( sourceSE, sourcePfn ) )
replicaPreference = localReplicas + otherReplicas
for sourceSE, sourcePfn in replicaPreference:
self.log.debug( "__replicate: Attempting replication from %s to %s." % ( sourceSE, destSE ) )
fileDict = {destPfn:sourcePfn}
if sourcePfn == destPfn:
continue
localFile = ''
#FIXME: this should not be hardcoded!!!
if sourcePfn.find( 'srm' ) == -1 or destPfn.find( 'srm' ) == -1:
# No third party transfer is possible, we have to replicate through the local cache
localDir = '.'
if localCache:
localDir = localCache
self.getFile( lfn, destinationDir = localDir )
localFile = os.path.join( localDir, os.path.basename( lfn ) )
fileDict = {destPfn:localFile}
res = destStorageElement.replicateFile( fileDict, sourceSize = catalogueSize, singleFile = True )
if localFile and os.path.exists( localFile ):
os.remove( localFile )
if res['OK']:
self.log.debug( "__replicate: Replication successful." )
resDict = {'DestSE':destSE, 'DestPfn':destPfn}
return S_OK( resDict )
else:
errStr = "__replicate: Replication failed."
self.log.debug( errStr, "%s from %s to %s." % ( lfn, sourceSE, destSE ) )
##########################################################
# If the replication failed for all sources give up
errStr = "__replicate: Failed to replicate with all sources."
self.log.debug( errStr, lfn )
return S_ERROR( errStr )
def __initializeReplication( self, lfn, sourceSE, destSE ):
# Horrible, but kept to not break current log messages
logStr = "__initializeReplication:"
###########################################################
# Check the sourceSE if specified
self.log.verbose( "%s: Determining whether source Storage Element is sane." % logStr )
if sourceSE:
if not self.__SEActive( sourceSE ).get( 'Value', {} ).get( 'Read' ):
infoStr = "%s Supplied source Storage Element is not currently allowed for Read." % ( logStr )
self.log.info( infoStr, sourceSE )
return S_ERROR( infoStr )
###########################################################
# Check that the destination storage element is sane and resolve its name
self.log.debug( "%s Verifying dest StorageElement validity (%s)." % ( logStr, destSE ) )
destStorageElement = StorageElement( destSE )
res = destStorageElement.isValid()
if not res['OK']:
errStr = "%s The storage element is not currently valid." % logStr
self.log.debug( errStr, "%s %s" % ( destSE, res['Message'] ) )
return S_ERROR( errStr )
destSE = destStorageElement.getStorageElementName()['Value']
self.log.verbose( "%s Destination Storage Element verified." % logStr )
###########################################################
# Check whether the destination storage element is banned
self.log.verbose( "%s Determining whether %s ( destination ) is Write-banned." % ( logStr, destSE ) )
if not self.__SEActive( destSE ).get( 'Value', {} ).get( 'Write' ):
infoStr = "%s Supplied destination Storage Element is not currently allowed for Write." % ( logStr )
self.log.debug( infoStr, destSE )
return S_ERROR( infoStr )
###########################################################
# Get the LFN replicas from the file catalogue
self.log.debug( "%s Attempting to obtain replicas for %s." % ( logStr, lfn ) )
res = self.getReplicas( lfn )
if not res[ 'OK' ]:
errStr = "%s Completely failed to get replicas for LFN." % logStr
self.log.debug( errStr, "%s %s" % ( lfn, res['Message'] ) )
return res
if lfn not in res['Value']['Successful']:
errStr = "%s Failed to get replicas for LFN." % logStr
self.log.debug( errStr, "%s %s" % ( lfn, res['Value']['Failed'][lfn] ) )
return S_ERROR( "%s %s" % ( errStr, res['Value']['Failed'][lfn] ) )
self.log.debug( "%s Successfully obtained replicas for LFN." % logStr )
lfnReplicas = res['Value']['Successful'][lfn]
###########################################################
# Check the file is at the sourceSE
self.log.debug( "%s: Determining whether source Storage Element is sane." % logStr )
if sourceSE and sourceSE not in lfnReplicas:
errStr = "%s LFN does not exist at supplied source SE." % logStr
self.log.error( errStr, "%s %s" % ( lfn, sourceSE ) )
return S_ERROR( errStr )
###########################################################
# If the file catalogue size is zero fail the transfer
self.log.debug( "%s Attempting to obtain size for %s." % ( logStr, lfn ) )
res = self.fc.getFileSize( lfn )
if not res['OK']:
errStr = "%s Completely failed to get size for LFN." % logStr
self.log.debug( errStr, "%s %s" % ( lfn, res['Message'] ) )
return res
if lfn not in res['Value']['Successful']:
errStr = "%s Failed to get size for LFN." % logStr
self.log.debug( errStr, "%s %s" % ( lfn, res['Value']['Failed'][lfn] ) )
return S_ERROR( "%s %s" % ( errStr, res['Value']['Failed'][lfn] ) )
catalogueSize = res['Value']['Successful'][lfn]
if catalogueSize == 0:
errStr = "%s Registered file size is 0." % logStr
self.log.debug( errStr, lfn )
return S_ERROR( errStr )
self.log.debug( "%s File size determined to be %s." % ( logStr, catalogueSize ) )
self.log.verbose( "%s Replication initialization successful." % logStr )
resDict = {
'DestStorage' : destStorageElement,
'DestSE' : destSE,
'Replicas' : lfnReplicas,
'CatalogueSize' : catalogueSize
}
return S_OK( resDict )
def __resolveBestReplicas( self, lfn, sourceSE, lfnReplicas, catalogueSize ):
""" find best replicas """
###########################################################
# Determine the best replicas (remove banned sources, invalid storage elements and file with the wrong size)
# It's not really the best, but the one authorized
logStr = "__resolveBestReplicas:"
replicaPreference = []
for diracSE, pfn in lfnReplicas.items():
if sourceSE and diracSE != sourceSE:
self.log.debug( "%s %s replica not requested." % ( logStr, diracSE ) )
continue
if not self.__SEActive( diracSE ).get( 'Value', {} ).get( 'Read' ):
self.log.debug( "%s %s is currently not allowed as a source." % ( logStr, diracSE ) )
else:
self.log.debug( "%s %s is available for use." % ( logStr, diracSE ) )
storageElement = StorageElement( diracSE )
res = storageElement.isValid()
if not res['OK']:
errStr = "%s The storage element is not currently valid." % logStr
self.log.debug( errStr, "%s %s" % ( diracSE, res['Message'] ) )
else:
# pfn = returnSingleResult( storageElement.getPfnForLfn( lfn ) ).get( 'Value', pfn )
remoteProtocols = storageElement.getRemoteProtocols()
if not remoteProtocols['OK']:
self.log.debug( "%s : could not get remote protocols %s" % ( diracSE, remoteProtocols['Message'] ) )
continue
remoteProtocols = remoteProtocols['Value']
if remoteProtocols:
self.log.debug( "%s Attempting to get source pfns for remote protocols." % logStr )
res = returnSingleResult( storageElement.getPfnForProtocol( pfn, protocol = self.thirdPartyProtocols ) )
if res['OK']:
sourcePfn = res['Value']
# print pfn, sourcePfn
self.log.debug( "%s Attempting to get source file size." % logStr )
res = storageElement.getFileSize( sourcePfn )
if res['OK']:
if sourcePfn in res['Value']['Successful']:
sourceFileSize = res['Value']['Successful'][sourcePfn]
self.log.debug( "%s Source file size determined to be %s." % ( logStr, sourceFileSize ) )
if catalogueSize == sourceFileSize:
fileTuple = ( diracSE, sourcePfn )
replicaPreference.append( fileTuple )
else:
errStr = "%s Catalogue size and physical file size mismatch." % logStr
self.log.debug( errStr, "%s %s" % ( diracSE, sourcePfn ) )
else:
errStr = "%s Failed to get physical file size." % logStr
self.log.always( errStr, "%s %s: %s" % ( sourcePfn, diracSE, res['Value']['Failed'][sourcePfn] ) )
else:
errStr = "%s Completely failed to get physical file size." % logStr
self.log.debug( errStr, "%s %s: %s" % ( sourcePfn, diracSE, res['Message'] ) )
else:
errStr = "%s Failed to get PFN for replication for StorageElement." % logStr
self.log.debug( errStr, "%s %s" % ( diracSE, res['Message'] ) )
else:
errStr = "%s Source Storage Element has no remote protocols." % logStr
self.log.debug( errStr, diracSE )
if not replicaPreference:
errStr = "%s Failed to find any valid source Storage Elements." % logStr
self.log.debug( errStr )
return S_ERROR( errStr )
else:
return S_OK( replicaPreference )
###################################################################
#
# These are the file catalog write methods
#
def registerFile( self, fileTuple, catalog = '' ):
""" Register a file or a list of files
:param self: self reference
:param tuple fileTuple: (lfn, physicalFile, fileSize, storageElementName, fileGuid, checksum )
:param str catalog: catalog name
"""
if type( fileTuple ) == ListType:
fileTuples = fileTuple
elif type( fileTuple ) == TupleType:
fileTuples = [fileTuple]
else:
errStr = "registerFile: Supplied file info must be tuple of list of tuples."
self.log.debug( errStr )
return S_ERROR( errStr )
self.log.debug( "registerFile: Attempting to register %s files." % len( fileTuples ) )
res = self.__registerFile( fileTuples, catalog )
if not res['OK']:
errStr = "registerFile: Completely failed to register files."
self.log.debug( errStr, res['Message'] )
return res
# Remove Failed LFNs if they are in success
success = res['Value']['Successful']
failed = res['Value']['Failed']
for lfn in success:
failed.pop( lfn, None )
return res
def __registerFile( self, fileTuples, catalog ):
""" register file to cataloge """
fileDict = {}
for lfn, physicalFile, fileSize, storageElementName, fileGuid, checksum in fileTuples:
fileDict[lfn] = {'PFN':physicalFile, 'Size':fileSize, 'SE':storageElementName, 'GUID':fileGuid, 'Checksum':checksum}
if catalog:
fileCatalog = FileCatalog( catalog, vo = self.vo )
if not fileCatalog.isOK():
return S_ERROR( "Can't get FileCatalog %s" % catalog )
else:
fileCatalog = self.fc
res = fileCatalog.addFile( fileDict )
if not res['OK']:
errStr = "__registerFile: Completely failed to register files."
self.log.debug( errStr, res['Message'] )
return res
def registerReplica( self, replicaTuple, catalog = '' ):
""" Register a replica (or list of) supplied in the replicaTuples.
'replicaTuple' is a tuple or list of tuples of the form (lfn,pfn,se)
"""
if type( replicaTuple ) == ListType:
replicaTuples = replicaTuple
elif type( replicaTuple ) == TupleType:
replicaTuples = [ replicaTuple ]
else:
errStr = "registerReplica: Supplied file info must be tuple of list of tuples."
self.log.debug( errStr )
return S_ERROR( errStr )
self.log.debug( "registerReplica: Attempting to register %s replicas." % len( replicaTuples ) )
res = self.__registerReplica( replicaTuples, catalog )
if not res['OK']:
errStr = "registerReplica: Completely failed to register replicas."
self.log.debug( errStr, res['Message'] )
return res
# Remove Failed LFNs if they are in success
success = res['Value']['Successful']
failed = res['Value']['Failed']
for lfn in success:
failed.pop( lfn, None )
return res
def __registerReplica( self, replicaTuples, catalog ):
""" register replica to catalogue """
seDict = {}
for lfn, pfn, storageElementName in replicaTuples:
seDict.setdefault( storageElementName, [] ).append( ( lfn, pfn ) )
failed = {}
replicaTuples = []
for storageElementName, replicaTuple in seDict.items():
destStorageElement = StorageElement( storageElementName )
res = destStorageElement.isValid()
if not res['OK']:
errStr = "__registerReplica: The storage element is not currently valid."
self.log.debug( errStr, "%s %s" % ( storageElementName, res['Message'] ) )
for lfn, pfn in replicaTuple:
failed[lfn] = errStr
else:
storageElementName = destStorageElement.getStorageElementName()['Value']
for lfn, pfn in replicaTuple:
res = returnSingleResult( destStorageElement.getPfnForProtocol( pfn, protocol = self.registrationProtocol, withPort = False ) )
if not res['OK']:
failed[lfn] = res['Message']
else:
replicaTuple = ( lfn, res['Value'], storageElementName, False )
replicaTuples.append( replicaTuple )
self.log.debug( "__registerReplica: Successfully resolved %s replicas for registration." % len( replicaTuples ) )
# HACK!
replicaDict = {}
for lfn, pfn, se, _master in replicaTuples:
replicaDict[lfn] = {'SE':se, 'PFN':pfn}
if catalog:
fileCatalog = FileCatalog( catalog, vo = self.vo )
res = fileCatalog.addReplica( replicaDict )
else:
res = self.fc.addReplica( replicaDict )
if not res['OK']:
errStr = "__registerReplica: Completely failed to register replicas."
self.log.debug( errStr, res['Message'] )
return S_ERROR( errStr )
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
resDict = {'Successful':successful, 'Failed':failed}
return S_OK( resDict )
###################################################################
#
# These are the removal methods for physical and catalogue removal
#
def removeFile( self, lfn, force = None ):
""" Remove the file (all replicas) from Storage Elements and file catalogue
'lfn' is the file to be removed
"""
if force == None:
force = self.ignoreMissingInFC
if type( lfn ) == ListType:
lfns = lfn
elif type( lfn ) == StringType:
lfns = [lfn]
else:
errStr = "removeFile: Supplied lfns must be string or list of strings."
self.log.debug( errStr )
return S_ERROR( errStr )
# First check if the file exists in the FC
res = self.fc.exists( lfns )
if not res['OK']:
return res
success = res['Value']['Successful']
lfns = [lfn for lfn in success if success[lfn] ]
if force:
# Files that don't exist are removed successfully
successful = dict.fromkeys( [lfn for lfn in success if not success[lfn] ], True )
failed = {}
else:
successful = {}
failed = dict.fromkeys( [lfn for lfn in success if not success[lfn] ], 'No such file or directory' )
# Check that we have write permissions to this directory.
if lfns:
res = self.__verifyOperationWritePermission( lfns )
if not res['OK']:
return res
if not res['Value']:
errStr = "removeFile: Write access not permitted for this credential."
self.log.error( errStr, lfns )
return S_ERROR( errStr )
self.log.debug( "removeFile: Attempting to remove %s files from Storage and Catalogue. Get replicas first" % len( lfns ) )
res = self.fc.getReplicas( lfns, True )
if not res['OK']:
errStr = "DataManager.removeFile: Completely failed to get replicas for lfns."
self.log.debug( errStr, res['Message'] )
return res
lfnDict = res['Value']['Successful']
for lfn, reason in res['Value'].get( 'Failed', {} ).items():
# Ignore files missing in FC if force is set
if reason == 'No such file or directory' and force:
successful[lfn] = True
elif reason == 'File has zero replicas':
lfnDict[lfn] = {}
else:
failed[lfn] = reason
res = self.__removeFile( lfnDict )
if not res['OK']:
errStr = "removeFile: Completely failed to remove files."
self.log.debug( errStr, res['Message'] )
return res
failed.update( res['Value']['Failed'] )
successful.update( res['Value']['Successful'] )
resDict = {'Successful':successful, 'Failed':failed}
gDataStoreClient.commit()
return S_OK( resDict )
def __removeFile( self, lfnDict ):
""" remove file """
storageElementDict = {}
# # sorted and reversed
for lfn, repDict in sorted( lfnDict.items(), reverse = True ):
for se, pfn in repDict.items():
storageElementDict.setdefault( se, [] ).append( ( lfn, pfn ) )
failed = {}
successful = {}
for storageElementName in sorted( storageElementDict ):
fileTuple = storageElementDict[storageElementName]
res = self.__removeReplica( storageElementName, fileTuple )
if not res['OK']:
errStr = res['Message']
for lfn, pfn in fileTuple:
failed[lfn] = failed.setdefault( lfn, '' ) + " %s" % errStr
else:
for lfn, errStr in res['Value']['Failed'].items():
failed[lfn] = failed.setdefault( lfn, '' ) + " %s" % errStr
completelyRemovedFiles = []
for lfn in [lfn for lfn in lfnDict if lfn not in failed]:
completelyRemovedFiles.append( lfn )
if completelyRemovedFiles:
res = self.fc.removeFile( completelyRemovedFiles )
if not res['OK']:
for lfn in completelyRemovedFiles:
failed[lfn] = "Failed to remove file from the catalog: %s" % res['Message']
else:
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
return S_OK( { 'Successful' : successful, 'Failed' : failed } )
def removeReplica( self, storageElementName, lfn ):
""" Remove replica at the supplied Storage Element from Storage Element then file catalogue
'storageElementName' is the storage where the file is to be removed
'lfn' is the file to be removed
"""
if type( lfn ) == ListType:
lfns = lfn
elif type( lfn ) == StringType:
lfns = [lfn]
else:
errStr = "removeReplica: Supplied lfns must be string or list of strings."
self.log.debug( errStr )
return S_ERROR( errStr )
# Check that we have write permissions to this directory.
res = self.__verifyOperationWritePermission( lfns )
if not res['OK']:
return res
if not res['Value']:
errStr = "removeReplica: Write access not permitted for this credential."
self.log.debug( errStr, lfns )
return S_ERROR( errStr )
self.log.debug( "removeReplica: Will remove catalogue entry for %s lfns at %s." % ( len( lfns ),
storageElementName ) )
res = self.fc.getReplicas( lfns, True )
if not res['OK']:
errStr = "removeReplica: Completely failed to get replicas for lfns."
self.log.debug( errStr, res['Message'] )
return res
failed = res['Value']['Failed']
successful = {}
replicaTuples = []
for lfn, repDict in res['Value']['Successful'].items():
if storageElementName not in repDict:
# The file doesn't exist at the storage element so don't have to remove it
successful[lfn] = True
elif len( repDict ) == 1:
# The file has only a single replica so don't remove
self.log.debug( "The replica you are trying to remove is the only one.", "%s @ %s" % ( lfn,
storageElementName ) )
failed[lfn] = "Failed to remove sole replica"
else:
replicaTuples.append( ( lfn, repDict[storageElementName] ) )
res = self.__removeReplica( storageElementName, replicaTuples )
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
successful.update( res['Value']['Successful'] )
gDataStoreClient.commit()
return S_OK( { 'Successful' : successful, 'Failed' : failed } )
def __removeReplica( self, storageElementName, fileTuple ):
""" remove replica """
lfnDict = {}
failed = {}
se = None if self.useCatalogPFN else StorageElement( storageElementName ) # Placeholder for the StorageElement object
if se:
res = se.isValid( 'removeFile' )
if not res['OK']:
return res
for lfn, pfn in fileTuple:
res = self.__verifyOperationWritePermission( lfn )
if not res['OK'] or not res['Value']:
errStr = "__removeReplica: Write access not permitted for this credential."
self.log.debug( errStr, lfn )
failed[lfn] = errStr
else:
# This is the PFN as in the FC
lfnDict[lfn] = pfn
# Now we should use the constructed PFNs if needed, for the physical removal
# Reverse lfnDict into pfnDict with required PFN
if self.useCatalogPFN:
pfnDict = dict( zip( lfnDict.values(), lfnDict.keys() ) )
else:
pfnDict = dict( [ ( se.getPfnForLfn( lfn )['Value'].get( 'Successful', {} ).get( lfn, lfnDict[lfn] ), lfn ) for lfn in lfnDict] )
# removePhysicalReplicas is called with real PFN list
res = self.__removePhysicalReplica( storageElementName, pfnDict.keys() )
if not res['OK']:
errStr = "__removeReplica: Failed to remove physical replicas."
self.log.debug( errStr, res['Message'] )
return S_ERROR( res['Message'] )
failed.update( dict( [( pfnDict[pfn], error ) for pfn, error in res['Value']['Failed'].items()] ) )
# Here we use the FC PFN...
replicaTuples = [( pfnDict[pfn], lfnDict[pfnDict[pfn]], storageElementName ) for pfn in res['Value']['Successful']]
res = self.__removeCatalogReplica( replicaTuples )
if not res['OK']:
errStr = "__removeReplica: Completely failed to remove physical files."
self.log.debug( errStr, res['Message'] )
failed.update( dict.fromkeys( [lfn for lfn, _pfn, _se in replicaTuples if lfn not in failed], res['Message'] ) )
successful = {}
else:
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
return S_OK( { 'Successful' : successful, 'Failed' : failed } )
def removeReplicaFromCatalog( self, storageElementName, lfn ):
""" remove :lfn: replica from :storageElementName: SE
:param self: self reference
:param str storageElementName: SE name
:param mixed lfn: a single LFN or list of LFNs
"""
# Remove replica from the file catalog 'lfn' are the file
# to be removed 'storageElementName' is the storage where the file is to be removed
if type( lfn ) == ListType:
lfns = lfn
elif type( lfn ) == StringType:
lfns = [lfn]
else:
errStr = "removeReplicaFromCatalog: Supplied lfns must be string or list of strings."
self.log.debug( errStr )
return S_ERROR( errStr )
self.log.debug( "removeReplicaFromCatalog: Will remove catalogue entry for %s lfns at %s." % \
( len( lfns ), storageElementName ) )
res = self.fc.getReplicas( lfns, allStatus = True )
if not res['OK']:
errStr = "removeReplicaFromCatalog: Completely failed to get replicas for lfns."
self.log.debug( errStr, res['Message'] )
return res
failed = {}
successful = {}
for lfn, reason in res['Value']['Failed'].items():
if reason in ( 'No such file or directory', 'File has zero replicas' ):
successful[lfn] = True
else:
failed[lfn] = reason
replicaTuples = []
for lfn, repDict in res['Value']['Successful'].items():
if storageElementName not in repDict:
# The file doesn't exist at the storage element so don't have to remove it
successful[lfn] = True
else:
replicaTuples.append( ( lfn, repDict[storageElementName], storageElementName ) )
self.log.debug( "removeReplicaFromCatalog: Resolved %s pfns for catalog removal at %s." % ( len( replicaTuples ),
storageElementName ) )
res = self.__removeCatalogReplica( replicaTuples )
failed.update( res['Value']['Failed'] )
successful.update( res['Value']['Successful'] )
resDict = {'Successful':successful, 'Failed':failed}
return S_OK( resDict )
def removeCatalogPhysicalFileNames( self, replicaTuple ):
""" Remove replicas from the file catalog specified by replica tuple
'replicaTuple' is a tuple containing the replica to be removed and is of the form ( lfn, pfn, se )
"""
if type( replicaTuple ) == ListType:
replicaTuples = replicaTuple
elif type( replicaTuple ) == TupleType:
replicaTuples = [replicaTuple]
else:
errStr = "removeCatalogPhysicalFileNames: Supplied info must be tuple or list of tuples."
self.log.debug( errStr )
return S_ERROR( errStr )
return self.__removeCatalogReplica( replicaTuples )
def __removeCatalogReplica( self, replicaTuple ):
""" remove replica form catalogue """
oDataOperation = self.__initialiseAccountingObject( 'removeCatalogReplica', '', len( replicaTuple ) )
oDataOperation.setStartTime()
start = time.time()
# HACK!
replicaDict = {}
for lfn, pfn, se in replicaTuple:
replicaDict[lfn] = {'SE':se, 'PFN':pfn}
res = self.fc.removeReplica( replicaDict )
oDataOperation.setEndTime()
oDataOperation.setValueByKey( 'RegistrationTime', time.time() - start )
if not res['OK']:
oDataOperation.setValueByKey( 'RegistrationOK', 0 )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
gDataStoreClient.addRegister( oDataOperation )
errStr = "__removeCatalogReplica: Completely failed to remove replica."
self.log.debug( errStr, res['Message'] )
return S_ERROR( errStr )
for lfn in res['Value']['Successful']:
infoStr = "__removeCatalogReplica: Successfully removed replica."
self.log.debug( infoStr, lfn )
if res['Value']['Successful']:
self.log.debug( "__removeCatalogReplica: Removed %d replicas" % len( res['Value']['Successful'] ) )
success = res['Value']['Successful']
if success:
self.log.info( "__removeCatalogReplica: Removed %d replicas" % len( success ) )
for lfn in success:
self.log.debug( "__removeCatalogReplica: Successfully removed replica.", lfn )
for lfn, error in res['Value']['Failed'].items():
self.log.error( "__removeCatalogReplica: Failed to remove replica.", "%s %s" % ( lfn, error ) )
oDataOperation.setValueByKey( 'RegistrationOK', len( success ) )
gDataStoreClient.addRegister( oDataOperation )
return res
def removePhysicalReplica( self, storageElementName, lfn ):
""" Remove replica from Storage Element.
'lfn' are the files to be removed
'storageElementName' is the storage where the file is to be removed
"""
if type( lfn ) == ListType:
lfns = lfn
elif type( lfn ) == StringType:
lfns = [lfn]
else:
errStr = "removePhysicalReplica: Supplied lfns must be string or list of strings."
self.log.debug( errStr )
return S_ERROR( errStr )
# Check that we have write permissions to this directory.
res = self.__verifyOperationWritePermission( lfns )
if not res['OK']:
return res
if not res['Value']:
errStr = "removePhysicalReplica: Write access not permitted for this credential."
self.log.debug( errStr, lfns )
return S_ERROR( errStr )
self.log.debug( "removePhysicalReplica: Attempting to remove %s lfns at %s." % ( len( lfns ),
storageElementName ) )
self.log.debug( "removePhysicalReplica: Attempting to resolve replicas." )
res = self.getReplicas( lfns )
if not res['OK']:
errStr = "removePhysicalReplica: Completely failed to get replicas for lfns."
self.log.debug( errStr, res['Message'] )
return res
failed = res['Value']['Failed']
successful = {}
pfnDict = {}
for lfn, repDict in res['Value']['Successful'].items():
if storageElementName not in repDict:
# The file doesn't exist at the storage element so don't have to remove it
successful[lfn] = True
else:
sePfn = repDict[storageElementName]
pfnDict[sePfn] = lfn
self.log.debug( "removePhysicalReplica: Resolved %s pfns for removal at %s." % ( len( pfnDict ),
storageElementName ) )
res = self.__removePhysicalReplica( storageElementName, pfnDict.keys() )
for pfn, error in res['Value']['Failed'].items():
failed[pfnDict[pfn]] = error
for pfn in res['Value']['Successful']:
successful[pfnDict[pfn]] = True
resDict = { 'Successful' : successful, 'Failed' : failed }
return S_OK( resDict )
def __removePhysicalReplica( self, storageElementName, pfnsToRemove ):
""" remove replica from storage element """
self.log.debug( "__removePhysicalReplica: Attempting to remove %s pfns at %s." % ( len( pfnsToRemove ),
storageElementName ) )
storageElement = StorageElement( storageElementName )
res = storageElement.isValid()
if not res['OK']:
errStr = "__removePhysicalReplica: The storage element is not currently valid."
self.log.debug( errStr, "%s %s" % ( storageElementName, res['Message'] ) )
return S_ERROR( errStr )
oDataOperation = self.__initialiseAccountingObject( 'removePhysicalReplica',
storageElementName,
len( pfnsToRemove ) )
oDataOperation.setStartTime()
start = time.time()
ret = storageElement.getFileSize( pfnsToRemove )
deletedSizes = ret.get( 'Value', {} ).get( 'Successful', {} )
res = storageElement.removeFile( pfnsToRemove )
oDataOperation.setEndTime()
oDataOperation.setValueByKey( 'TransferTime', time.time() - start )
if not res['OK']:
oDataOperation.setValueByKey( 'TransferOK', 0 )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
gDataStoreClient.addRegister( oDataOperation )
errStr = "__removePhysicalReplica: Failed to remove replicas."
self.log.debug( errStr, res['Message'] )
return S_ERROR( errStr )
else:
for surl, value in res['Value']['Failed'].items():
if 'No such file or directory' in value:
res['Value']['Successful'][surl] = surl
res['Value']['Failed'].pop( surl )
for surl in res['Value']['Successful']:
ret = returnSingleResult( storageElement.getPfnForProtocol( surl, protocol = self.registrationProtocol, withPort = False ) )
if not ret['OK']:
res['Value']['Successful'][surl] = surl
else:
res['Value']['Successful'][surl] = ret['Value']
deletedSize = sum( [size for pfn, size in deletedSizes.items() if pfn in res['Value']['Successful']] )
oDataOperation.setValueByKey( 'TransferSize', deletedSize )
oDataOperation.setValueByKey( 'TransferOK', len( res['Value']['Successful'] ) )
gDataStoreClient.addRegister( oDataOperation )
infoStr = "__removePhysicalReplica: Successfully issued accounting removal request."
self.log.debug( infoStr )
return res
#########################################################################
#
# File transfer methods
#
def put( self, lfn, fileName, diracSE, path = None ):
""" Put a local file to a Storage Element
:param self: self reference
:param str lfn: LFN
:param str fileName: the full path to the local file
:param str diracSE: the Storage Element to which to put the file
:param str path: the path on the storage where the file will be put (if not provided the LFN will be used)
"""
# Check that the local file exists
if not os.path.exists( fileName ):
errStr = "put: Supplied file does not exist."
self.log.debug( errStr, fileName )
return S_ERROR( errStr )
# If the path is not provided then use the LFN path
if not path:
path = os.path.dirname( lfn )
# Obtain the size of the local file
size = getSize( fileName )
if size == 0:
errStr = "put: Supplied file is zero size."
self.log.debug( errStr, fileName )
return S_ERROR( errStr )
##########################################################
# Instantiate the destination storage element here.
storageElement = StorageElement( diracSE )
res = storageElement.isValid()
if not res['OK']:
errStr = "put: The storage element is not currently valid."
self.log.debug( errStr, "%s %s" % ( diracSE, res['Message'] ) )
return S_ERROR( errStr )
res = returnSingleResult( storageElement.getPfnForLfn( lfn ) )
if not res['OK']:
errStr = "put: Failed to generate destination PFN."
self.log.debug( errStr, res['Message'] )
return S_ERROR( errStr )
destPfn = res['Value']
fileDict = {destPfn:fileName}
successful = {}
failed = {}
##########################################################
# Perform the put here.
startTime = time.time()
res = returnSingleResult( storageElement.putFile( fileDict ) )
putTime = time.time() - startTime
if not res['OK']:
errStr = "put: Failed to put file to Storage Element."
failed[lfn] = res['Message']
self.log.debug( errStr, "%s: %s" % ( fileName, res['Message'] ) )
else:
self.log.debug( "put: Put file to storage in %s seconds." % putTime )
successful[lfn] = destPfn
resDict = {'Successful': successful, 'Failed':failed}
return S_OK( resDict )
# def removeReplica(self,lfn,storageElementName,singleFile=False):
# def putReplica(self,lfn,storageElementName,singleFile=False):
# def replicateReplica(self,lfn,size,storageElementName,singleFile=False):
def getActiveReplicas( self, lfns ):
""" Get all the replicas for the SEs which are in Active status for reading.
"""
res = self.getReplicas( lfns, allStatus = False )
if not res['OK']:
return res
replicas = res['Value']
return self.checkActiveReplicas( replicas )
def checkActiveReplicas( self, replicaDict ):
""" Check a replica dictionary for active replicas
"""
if type( replicaDict ) != DictType:
return S_ERROR( 'Wrong argument type %s, expected a dictionary' % type( replicaDict ) )
for key in [ 'Successful', 'Failed' ]:
if not key in replicaDict:
return S_ERROR( 'Missing key "%s" in replica dictionary' % key )
if type( replicaDict[key] ) != DictType:
return S_ERROR( 'Wrong argument type %s, expected a dictionary' % type( replicaDict[key] ) )
seReadStatus = {}
for lfn, replicas in replicaDict['Successful'].items():
if type( replicas ) != DictType:
del replicaDict['Successful'][ lfn ]
replicaDict['Failed'][lfn] = 'Wrong replica info'
continue
for se in replicas.keys():
# Fix the caching
readStatus = seReadStatus[se] if se in seReadStatus else seReadStatus.setdefault( se, self.__SEActive( se ).get( 'Value', {} ).get( 'Read', False ) )
if not readStatus:
replicas.pop( se )
return S_OK( replicaDict )
def __SEActive( self, se ):
""" check is SE is active """
result = StorageFactory().getStorageName( se )
if not result['OK']:
return S_ERROR( 'SE not known' )
resolvedName = result['Value']
res = self.resourceStatus.getStorageElementStatus( resolvedName, default = None )
if not res[ 'OK' ]:
return S_ERROR( 'SE not known' )
seStatus = { 'Read' : True, 'Write' : True }
if res['Value'][resolvedName].get( 'ReadAccess', 'Active' ) not in ( 'Active', 'Degraded' ):
seStatus[ 'Read' ] = False
if res['Value'][resolvedName].get( 'WriteAccess', 'Active' ) not in ( 'Active', 'Degraded' ):
seStatus[ 'Write' ] = False
return S_OK( seStatus )
def __initialiseAccountingObject( self, operation, se, files ):
""" create accouting record """
accountingDict = {}
accountingDict['OperationType'] = operation
result = getProxyInfo()
if not result['OK']:
userName = 'system'
else:
userName = result['Value'].get( 'username', 'unknown' )
accountingDict['User'] = userName
accountingDict['Protocol'] = 'DataManager'
accountingDict['RegistrationTime'] = 0.0
accountingDict['RegistrationOK'] = 0
accountingDict['RegistrationTotal'] = 0
accountingDict['Destination'] = se
accountingDict['TransferTotal'] = files
accountingDict['TransferOK'] = files
accountingDict['TransferSize'] = files
accountingDict['TransferTime'] = 0.0
accountingDict['FinalStatus'] = 'Successful'
accountingDict['Source'] = DIRAC.siteName()
oDataOperation = DataOperation()
oDataOperation.setValuesFromDict( accountingDict )
return oDataOperation
##########################################
#
# Defunct methods only there before checking backward compatability
#
def getReplicas( self, lfns, allStatus = True ):
""" get replicas from catalogue """
res = self.fc.getReplicas( lfns, allStatus = allStatus )
if not self.useCatalogPFN:
if res['OK']:
se_lfn = {}
catalogReplicas = res['Value']['Successful']
# We group the query to getPfnForLfn by storage element to gain in speed
for lfn in catalogReplicas:
for se in catalogReplicas[lfn]:
se_lfn.setdefault( se, [] ).append( lfn )
for se in se_lfn:
seObj = StorageElement( se )
succPfn = seObj.getPfnForLfn( se_lfn[se] ).get( 'Value', {} ).get( 'Successful', {} )
for lfn in succPfn:
# catalogReplicas still points res["value"]["Successful"] so res will be updated
catalogReplicas[lfn][se] = succPfn[lfn]
return res
##################################################################################################3
# Methods from the catalogToStorage. It would all work with the direct call to the SE, but this checks
# first if the replica is known to the catalog
def __executeIfReplicaExists( self, storageElementName, lfn, method, **argsDict ):
""" a simple wrapper that allows replica querying then perform the StorageElement operation
:param self: self reference
:param str storageElementName: DIRAC SE name
:param mixed lfn: a LFN str, list of LFNs or dict with LFNs as keys
"""
# # default value
argsDict = argsDict if argsDict else {}
# # get replicas for lfn
res = FileCatalog( vo = self.vo ).getReplicas( lfn )
if not res["OK"]:
errStr = "_callReplicaSEFcn: Completely failed to get replicas for LFNs."
self.log.debug( errStr, res["Message"] )
return res
# # returned dict, get failed replicase
retDict = { "Failed": res["Value"]["Failed"],
"Successful" : {} }
# # print errors
for lfn, reason in retDict["Failed"].items():
self.log.error( "_callReplicaSEFcn: Failed to get replicas for file.", "%s %s" % ( lfn, reason ) )
# # good replicas
lfnReplicas = res["Value"]["Successful"]
# # store PFN to LFN mapping
pfnDict = {}
se = None # Placeholder for the StorageElement object
for lfn, replicas in lfnReplicas.items():
if storageElementName in replicas:
if self.useCatalogPFN:
pfn = replicas[storageElementName]
else:
se = se if se else StorageElement( storageElementName )
res = se.getPfnForLfn( lfn )
pfn = res.get( 'Value', {} ).get( 'Successful', {} ).get( lfn, replicas[storageElementName] )
pfnDict[pfn] = lfn
else:
errStr = "_callReplicaSEFcn: File hasn't got replica at supplied Storage Element."
self.log.error( errStr, "%s %s" % ( lfn, storageElementName ) )
retDict["Failed"][lfn] = errStr
# # call StorageElement function at least
se = se = se if se else StorageElement( storageElementName )
fcn = getattr( se, method )
res = fcn( pfnDict.keys(), **argsDict )
# # check result
if not res["OK"]:
errStr = "_callReplicaSEFcn: Failed to execute %s StorageElement method." % method
self.log.error( errStr, res["Message"] )
return res
# # filter out failed and successful
for pfn, pfnRes in res["Value"]["Successful"].items():
retDict["Successful"][pfnDict[pfn]] = pfnRes
for pfn, errorMessage in res["Value"]["Failed"].items():
retDict["Failed"][pfnDict[pfn]] = errorMessage
return S_OK( retDict )
def getReplicaIsFile( self, lfn, storageElementName ):
""" determine whether the supplied lfns are files at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn, "isFile" )
def getReplicaSize( self, lfn, storageElementName ):
""" get the size of files for the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn, "getFileSize" )
def getReplicaAccessUrl( self, lfn, storageElementName ):
""" get the access url for lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn, "getAccessUrl" )
def getReplicaMetadata( self, lfn, storageElementName ):
""" get the file metadata for lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn, "getFileMetadata" )
def prestageReplica( self, lfn, storageElementName, lifetime = 86400 ):
""" issue a prestage requests for the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param int lifetime: 24h in seconds
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn,
"prestageFile", lifetime = lifetime )
def pinReplica( self, lfn, storageElementName, lifetime = 86400 ):
""" pin the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param int lifetime: 24h in seconds
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn,
"pinFile", lifetime = lifetime )
def releaseReplica( self, lfn, storageElementName ):
""" release pins for the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn, "releaseFile" )
def getReplica( self, lfn, storageElementName, localPath = False ):
""" copy replicas from DIRAC SE to local directory
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param mixed localPath: path in the local file system, if False, os.getcwd() will be used
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn,
"getFile", localPath = localPath )
|
titasakgm/brc-stock | refs/heads/master | openerp/addons/plugin/plugin_handler.py | 11 | '''
Created on 18 oct. 2011
@author: openerp
'''
from openerp.osv import osv
from openerp.tools.translate import _
class plugin_handler(osv.osv_memory):
_name = 'plugin.handler'
def _make_url(self, cr, uid, res_id, model, context=None):
"""
@param res_id: on which document the message is pushed
@param model: name of the document linked with the mail
@return url
"""
base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url', default='http://localhost:8069', context=context)
if base_url:
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
base_url += '/login?db=%s&login=%s&key=%s#id=%s&model=%s' % (cr.dbname, user.login, user.password, res_id, model)
return base_url
def is_installed(self, cr, uid):
return True
def partner_get(self, cr, uid, address_email):
partner_obj = self.pool.get('res.partner')
partner_ids = partner_obj.search(cr, uid, [('email', 'like', address_email)])
res_id = partner_ids and partner_ids[0] or 0
url = self._make_url(cr, uid, res_id, 'res.partner')
return ('res.partner', res_id, url)
def document_get(self, cr, uid, email):
"""
@param email: email is a standard RFC2822 email message
@return Dictionary which contain id and the model name of the document linked with the mail
if no document is found the id = 0
(model_name, res_id, url, name_get)
"""
mail_message_obj = self.pool.get('mail.message')
model = ""
res_id = 0
url = ""
name = ""
msg = self.pool.get('mail.thread').message_parse(cr, uid, email)
parent_id = msg.get('parent_id', False)
message_id = msg.get('message_id')
msg_id = False
if message_id:
msg_ids = mail_message_obj.search(cr, uid, [('message_id', '=', message_id)])
msg_id = len(msg_ids) and msg_ids[0] or False
if not msg_id and parent_id:
msg_id = parent_id
if msg_id:
msg = mail_message_obj.browse(cr, uid, msg_id)
res_id = msg.res_id
model = msg.model
url = self._make_url(cr, uid, res_id, model)
name = self.pool.get(model).name_get(cr, uid, [res_id])[0][1]
return (model, res_id, url, name)
def document_type(self, cr, uid, context=None):
"""
Return the list of available model to push
res.partner is a special case
otherwise all model that inherit from mail.thread
['res.partner', 'project.issue']
"""
mail_thread_obj = self.pool.get('mail.thread')
doc_dict = mail_thread_obj.message_capable_models(cr, uid, context)
doc_dict['res.partner'] = "Partner"
return doc_dict.items()
# Can be used where search record was used
def list_document_get(self, cr, uid, model, name):
"""
This function return the result of name_search on the object model
@param model: the name of the model
@param : the name of the document
@return : the result of name_search a list of tuple
[(id, 'name')]
"""
return self.pool.get(model).name_search(cr, uid, name)
def push_message(self, cr, uid, model, email, res_id=0):
"""
@param email: email is a standard RFC2822 email message
@param model: On which model the message is pushed
@param thread_id: on which document the message is pushed, if thread_id = 0 a new document is created
@return Dictionary which contain model , url and resource id.
"""
mail_message = self.pool.get('mail.message')
model_obj = self.pool.get(model)
msg = self.pool.get('mail.thread').message_parse(cr, uid, email)
message_id = msg.get('message-id')
mail_ids = mail_message.search(cr, uid, [('message_id', '=', message_id), ('res_id', '=', res_id), ('model', '=', model)])
if message_id and mail_ids:
mail_record = mail_message.browse(cr, uid, mail_ids)[0]
res_id = mail_record.res_id
notify = _("Email already pushed")
elif res_id == 0:
if model == 'res.partner':
notify = _('Use the Partner button to create a new partner')
else:
res_id = model_obj.message_process(cr, uid, model, email)
notify = _("Mail successfully pushed, a new %s has been created.") % model
else:
model_obj.message_post(cr, uid, [res_id],
body=msg.get('body'),
subject=msg.get('subject'),
type='comment' if model == 'res.partner' else 'email',
parent_id=msg.get('parent_id'),
attachments=msg.get('attachments'))
notify = _("Mail successfully pushed")
url = self._make_url(cr, uid, res_id, model)
return (model, res_id, url, notify)
def contact_create(self, cr, uid, data, partner_id, context=None):
"""
@param data : the data use to create the res.partner
[('field_name', value)], field name is required
@param partner_id : On which partner the address is attached
if partner_id = 0 then create a new partner with the same name that the address
@return : the partner_id sended or created, this allow the plugin to open the right partner page
"""
partner_obj = self.pool.get('res.partner')
dictcreate = dict(data)
if partner_id:
is_company = partner_obj.browse(cr, uid, partner_id, context=context).is_company
if is_company:
dictcreate['parent_id'] = partner_id
partner_id = partner_obj.create(cr, uid, dictcreate)
url = self._make_url(cr, uid, partner_id, 'res.partner')
return ('res.partner', partner_id, url)
# Specific to outlook rfc822 is not available so we split in arguments headerd,body,attachemnts
def push_message_outlook(self, cr, uid, model, headers, res_id=0, body=False, body_html=False, attachments=False):
# ----------------------------------------
# solution 1
# construct a fake rfc822 from the separated arguement
#m = email.asdfsadf
# use the push_message method
#self.push_message(m)
# ----------------------------------------
# solution 2
# use self.pushmessage only with header and body
# add attachemnt yourself after
mail_message = self.pool.get('mail.message')
ir_attachment_obj = self.pool.get('ir.attachment')
attach_ids = []
msg = self.pool.get('mail.thread').message_parse(cr, uid, headers)
message_id = msg.get('message-id')
push_mail = self.push_message(cr, uid, model, headers, res_id)
res_id = push_mail[1]
model = push_mail[0]
notify = push_mail[3]
for name in attachments.keys():
attachment_ids = ir_attachment_obj.search(cr, uid, [('res_model', '=', model), ('res_id', '=', res_id), ('datas_fname', '=', name)])
if attachment_ids:
attach_ids.append(attachment_ids[0])
else:
vals = {"res_model": model, "res_id": res_id, "name": name, "datas": attachments[name], "datas_fname": name}
attach_ids.append(ir_attachment_obj.create(cr, uid, vals))
mail_ids = mail_message.search(cr, uid, [('message_id', '=', message_id), ('res_id', '=', res_id), ('model', '=', model)])
if mail_ids:
mail_message.write(cr, uid, mail_ids[0], {'attachment_ids': [(6, 0, attach_ids)], 'body': body, 'body_html': body_html})
url = self._make_url(cr, uid, res_id, model)
return (model, res_id, url, notify)
|
pratikmallya/hue | refs/heads/master | desktop/core/ext-py/Django-1.6.10/django/contrib/gis/utils/wkt.py | 219 | """
Utilities for manipulating Geometry WKT.
"""
from django.utils import six
def precision_wkt(geom, prec):
"""
Returns WKT text of the geometry according to the given precision (an
integer or a string). If the precision is an integer, then the decimal
places of coordinates WKT will be truncated to that number:
>>> pnt = Point(5, 23)
>>> pnt.wkt
'POINT (5.0000000000000000 23.0000000000000000)'
>>> precision(geom, 1)
'POINT (5.0 23.0)'
If the precision is a string, it must be valid Python format string
(e.g., '%20.7f') -- thus, you should know what you're doing.
"""
if isinstance(prec, int):
num_fmt = '%%.%df' % prec
elif isinstance(prec, six.string_types):
num_fmt = prec
else:
raise TypeError
# TODO: Support 3D geometries.
coord_fmt = ' '.join([num_fmt, num_fmt])
def formatted_coords(coords):
return ','.join([coord_fmt % c[:2] for c in coords])
def formatted_poly(poly):
return ','.join(['(%s)' % formatted_coords(r) for r in poly])
def formatted_geom(g):
gtype = str(g.geom_type).upper()
yield '%s(' % gtype
if gtype == 'POINT':
yield formatted_coords((g.coords,))
elif gtype in ('LINESTRING', 'LINEARRING'):
yield formatted_coords(g.coords)
elif gtype in ('POLYGON', 'MULTILINESTRING'):
yield formatted_poly(g)
elif gtype == 'MULTIPOINT':
yield formatted_coords(g.coords)
elif gtype == 'MULTIPOLYGON':
yield ','.join(['(%s)' % formatted_poly(p) for p in g])
elif gtype == 'GEOMETRYCOLLECTION':
yield ','.join([''.join([wkt for wkt in formatted_geom(child)]) for child in g])
else:
raise TypeError
yield ')'
return ''.join([wkt for wkt in formatted_geom(geom)])
|
davidfraser/sqlalchemy | refs/heads/master | lib/sqlalchemy/testing/provision.py | 47 | from sqlalchemy.engine import url as sa_url
from sqlalchemy import text
from sqlalchemy.util import compat
from . import config, engines
FOLLOWER_IDENT = None
class register(object):
def __init__(self):
self.fns = {}
@classmethod
def init(cls, fn):
return register().for_db("*")(fn)
def for_db(self, dbname):
def decorate(fn):
self.fns[dbname] = fn
return self
return decorate
def __call__(self, cfg, *arg):
if isinstance(cfg, compat.string_types):
url = sa_url.make_url(cfg)
elif isinstance(cfg, sa_url.URL):
url = cfg
else:
url = cfg.db.url
backend = url.get_backend_name()
if backend in self.fns:
return self.fns[backend](cfg, *arg)
else:
return self.fns['*'](cfg, *arg)
def create_follower_db(follower_ident):
for cfg in _configs_for_db_operation():
_create_db(cfg, cfg.db, follower_ident)
def configure_follower(follower_ident):
for cfg in config.Config.all_configs():
_configure_follower(cfg, follower_ident)
def setup_config(db_url, options, file_config, follower_ident):
if follower_ident:
db_url = _follower_url_from_main(db_url, follower_ident)
db_opts = {}
_update_db_opts(db_url, db_opts)
eng = engines.testing_engine(db_url, db_opts)
eng.connect().close()
cfg = config.Config.register(eng, db_opts, options, file_config)
if follower_ident:
_configure_follower(cfg, follower_ident)
return cfg
def drop_follower_db(follower_ident):
for cfg in _configs_for_db_operation():
_drop_db(cfg, cfg.db, follower_ident)
def _configs_for_db_operation():
hosts = set()
for cfg in config.Config.all_configs():
cfg.db.dispose()
for cfg in config.Config.all_configs():
url = cfg.db.url
backend = url.get_backend_name()
host_conf = (
backend,
url.username, url.host, url.database)
if host_conf not in hosts:
yield cfg
hosts.add(host_conf)
for cfg in config.Config.all_configs():
cfg.db.dispose()
@register.init
def _create_db(cfg, eng, ident):
raise NotImplementedError("no DB creation routine for cfg: %s" % eng.url)
@register.init
def _drop_db(cfg, eng, ident):
raise NotImplementedError("no DB drop routine for cfg: %s" % eng.url)
@register.init
def _update_db_opts(db_url, db_opts):
pass
@register.init
def _configure_follower(cfg, ident):
pass
@register.init
def _follower_url_from_main(url, ident):
url = sa_url.make_url(url)
url.database = ident
return url
@_update_db_opts.for_db("mssql")
def _mssql_update_db_opts(db_url, db_opts):
db_opts['legacy_schema_aliasing'] = False
@_follower_url_from_main.for_db("sqlite")
def _sqlite_follower_url_from_main(url, ident):
url = sa_url.make_url(url)
if not url.database or url.database == ':memory:':
return url
else:
return sa_url.make_url("sqlite:///%s.db" % ident)
@_create_db.for_db("postgresql")
def _pg_create_db(cfg, eng, ident):
with eng.connect().execution_options(
isolation_level="AUTOCOMMIT") as conn:
try:
_pg_drop_db(cfg, conn, ident)
except Exception:
pass
currentdb = conn.scalar("select current_database()")
conn.execute("CREATE DATABASE %s TEMPLATE %s" % (ident, currentdb))
@_create_db.for_db("mysql")
def _mysql_create_db(cfg, eng, ident):
with eng.connect() as conn:
try:
_mysql_drop_db(cfg, conn, ident)
except Exception:
pass
conn.execute("CREATE DATABASE %s" % ident)
conn.execute("CREATE DATABASE %s_test_schema" % ident)
conn.execute("CREATE DATABASE %s_test_schema_2" % ident)
@_configure_follower.for_db("mysql")
def _mysql_configure_follower(config, ident):
config.test_schema = "%s_test_schema" % ident
config.test_schema_2 = "%s_test_schema_2" % ident
@_create_db.for_db("sqlite")
def _sqlite_create_db(cfg, eng, ident):
pass
@_drop_db.for_db("postgresql")
def _pg_drop_db(cfg, eng, ident):
with eng.connect().execution_options(
isolation_level="AUTOCOMMIT") as conn:
conn.execute(
text(
"select pg_terminate_backend(pid) from pg_stat_activity "
"where usename=current_user and pid != pg_backend_pid() "
"and datname=:dname"
), dname=ident)
conn.execute("DROP DATABASE %s" % ident)
@_drop_db.for_db("sqlite")
def _sqlite_drop_db(cfg, eng, ident):
pass
#os.remove("%s.db" % ident)
@_drop_db.for_db("mysql")
def _mysql_drop_db(cfg, eng, ident):
with eng.connect() as conn:
try:
conn.execute("DROP DATABASE %s_test_schema" % ident)
except Exception:
pass
try:
conn.execute("DROP DATABASE %s_test_schema_2" % ident)
except Exception:
pass
try:
conn.execute("DROP DATABASE %s" % ident)
except Exception:
pass
|
edcast-inc/edx-platform-edcast | refs/heads/master | lms/djangoapps/open_ended_grading/staff_grading.py | 192 | """
LMS part of instructor grading:
- views + ajax handling
- calls the instructor grading service
"""
import logging
log = logging.getLogger(__name__)
class StaffGrading(object):
"""
Wrap up functionality for staff grading of submissions--interface exposes get_html, ajax views.
"""
def __init__(self, course):
self.course = course
def get_html(self):
return "<b>Instructor grading!</b>"
# context = {}
# return render_to_string('courseware/instructor_grading_view.html', context)
|
AnhellO/DAS_Sistemas | refs/heads/development | Ago-Dic-2017/Enrique Castillo/Ordinario/test/Lib/site-packages/django/contrib/gis/gdal/prototypes/srs.py | 88 | from ctypes import POINTER, c_char_p, c_int, c_void_p
from django.contrib.gis.gdal.libgdal import lgdal, std_call
from django.contrib.gis.gdal.prototypes.generation import (
const_string_output, double_output, int_output, srs_output, string_output,
void_output,
)
# Shortcut generation for routines with known parameters.
def srs_double(f):
"""
Create a function prototype for the OSR routines that take
the OSRSpatialReference object and return a double value.
"""
return double_output(f, [c_void_p, POINTER(c_int)], errcheck=True)
def units_func(f):
"""
Create a ctypes function prototype for OSR units functions, e.g.,
OSRGetAngularUnits, OSRGetLinearUnits.
"""
return double_output(f, [c_void_p, POINTER(c_char_p)], strarg=True)
# Creation & destruction.
clone_srs = srs_output(std_call('OSRClone'), [c_void_p])
new_srs = srs_output(std_call('OSRNewSpatialReference'), [c_char_p])
release_srs = void_output(lgdal.OSRRelease, [c_void_p], errcheck=False)
destroy_srs = void_output(std_call('OSRDestroySpatialReference'), [c_void_p], errcheck=False)
srs_validate = void_output(lgdal.OSRValidate, [c_void_p])
# Getting the semi_major, semi_minor, and flattening functions.
semi_major = srs_double(lgdal.OSRGetSemiMajor)
semi_minor = srs_double(lgdal.OSRGetSemiMinor)
invflattening = srs_double(lgdal.OSRGetInvFlattening)
# WKT, PROJ, EPSG, XML importation routines.
from_wkt = void_output(lgdal.OSRImportFromWkt, [c_void_p, POINTER(c_char_p)])
from_proj = void_output(lgdal.OSRImportFromProj4, [c_void_p, c_char_p])
from_epsg = void_output(std_call('OSRImportFromEPSG'), [c_void_p, c_int])
from_xml = void_output(lgdal.OSRImportFromXML, [c_void_p, c_char_p])
from_user_input = void_output(std_call('OSRSetFromUserInput'), [c_void_p, c_char_p])
# Morphing to/from ESRI WKT.
morph_to_esri = void_output(lgdal.OSRMorphToESRI, [c_void_p])
morph_from_esri = void_output(lgdal.OSRMorphFromESRI, [c_void_p])
# Identifying the EPSG
identify_epsg = void_output(lgdal.OSRAutoIdentifyEPSG, [c_void_p])
# Getting the angular_units, linear_units functions
linear_units = units_func(lgdal.OSRGetLinearUnits)
angular_units = units_func(lgdal.OSRGetAngularUnits)
# For exporting to WKT, PROJ.4, "Pretty" WKT, and XML.
to_wkt = string_output(std_call('OSRExportToWkt'), [c_void_p, POINTER(c_char_p)], decoding='utf-8')
to_proj = string_output(std_call('OSRExportToProj4'), [c_void_p, POINTER(c_char_p)], decoding='ascii')
to_pretty_wkt = string_output(
std_call('OSRExportToPrettyWkt'),
[c_void_p, POINTER(c_char_p), c_int], offset=-2, decoding='utf-8'
)
# Memory leak fixed in GDAL 1.5; still exists in 1.4.
to_xml = string_output(lgdal.OSRExportToXML, [c_void_p, POINTER(c_char_p), c_char_p], offset=-2, decoding='utf-8')
# String attribute retrival routines.
get_attr_value = const_string_output(std_call('OSRGetAttrValue'), [c_void_p, c_char_p, c_int], decoding='utf-8')
get_auth_name = const_string_output(lgdal.OSRGetAuthorityName, [c_void_p, c_char_p], decoding='ascii')
get_auth_code = const_string_output(lgdal.OSRGetAuthorityCode, [c_void_p, c_char_p], decoding='ascii')
# SRS Properties
isgeographic = int_output(lgdal.OSRIsGeographic, [c_void_p])
islocal = int_output(lgdal.OSRIsLocal, [c_void_p])
isprojected = int_output(lgdal.OSRIsProjected, [c_void_p])
# Coordinate transformation
new_ct = srs_output(std_call('OCTNewCoordinateTransformation'), [c_void_p, c_void_p])
destroy_ct = void_output(std_call('OCTDestroyCoordinateTransformation'), [c_void_p], errcheck=False)
|
gdbdzgd/namebench | refs/heads/master | nb_third_party/jinja2/sandbox.py | 284 | # -*- coding: utf-8 -*-
"""
jinja2.sandbox
~~~~~~~~~~~~~~
Adds a sandbox layer to Jinja as it was the default behavior in the old
Jinja 1 releases. This sandbox is slightly different from Jinja 1 as the
default behavior is easier to use.
The behavior can be changed by subclassing the environment.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
import operator
from jinja2.runtime import Undefined
from jinja2.environment import Environment
from jinja2.exceptions import SecurityError
from jinja2.utils import FunctionType, MethodType, TracebackType, CodeType, \
FrameType, GeneratorType
#: maximum number of items a range may produce
MAX_RANGE = 100000
#: attributes of function objects that are considered unsafe.
UNSAFE_FUNCTION_ATTRIBUTES = set(['func_closure', 'func_code', 'func_dict',
'func_defaults', 'func_globals'])
#: unsafe method attributes. function attributes are unsafe for methods too
UNSAFE_METHOD_ATTRIBUTES = set(['im_class', 'im_func', 'im_self'])
import warnings
# make sure we don't warn in python 2.6 about stuff we don't care about
warnings.filterwarnings('ignore', 'the sets module', DeprecationWarning,
module='jinja2.sandbox')
from collections import deque
_mutable_set_types = (set,)
_mutable_mapping_types = (dict,)
_mutable_sequence_types = (list,)
# on python 2.x we can register the user collection types
try:
from UserDict import UserDict, DictMixin
from UserList import UserList
_mutable_mapping_types += (UserDict, DictMixin)
_mutable_set_types += (UserList,)
except ImportError:
pass
# if sets is still available, register the mutable set from there as well
try:
from sets import Set
_mutable_set_types += (Set,)
except ImportError:
pass
#: register Python 2.6 abstract base classes
try:
from collections import MutableSet, MutableMapping, MutableSequence
_mutable_set_types += (MutableSet,)
_mutable_mapping_types += (MutableMapping,)
_mutable_sequence_types += (MutableSequence,)
except ImportError:
pass
_mutable_spec = (
(_mutable_set_types, frozenset([
'add', 'clear', 'difference_update', 'discard', 'pop', 'remove',
'symmetric_difference_update', 'update'
])),
(_mutable_mapping_types, frozenset([
'clear', 'pop', 'popitem', 'setdefault', 'update'
])),
(_mutable_sequence_types, frozenset([
'append', 'reverse', 'insert', 'sort', 'extend', 'remove'
])),
(deque, frozenset([
'append', 'appendleft', 'clear', 'extend', 'extendleft', 'pop',
'popleft', 'remove', 'rotate'
]))
)
def safe_range(*args):
"""A range that can't generate ranges with a length of more than
MAX_RANGE items.
"""
rng = xrange(*args)
if len(rng) > MAX_RANGE:
raise OverflowError('range too big, maximum size for range is %d' %
MAX_RANGE)
return rng
def unsafe(f):
"""
Mark a function or method as unsafe::
@unsafe
def delete(self):
pass
"""
f.unsafe_callable = True
return f
def is_internal_attribute(obj, attr):
"""Test if the attribute given is an internal python attribute. For
example this function returns `True` for the `func_code` attribute of
python objects. This is useful if the environment method
:meth:`~SandboxedEnvironment.is_safe_attribute` is overriden.
>>> from jinja2.sandbox import is_internal_attribute
>>> is_internal_attribute(lambda: None, "func_code")
True
>>> is_internal_attribute((lambda x:x).func_code, 'co_code')
True
>>> is_internal_attribute(str, "upper")
False
"""
if isinstance(obj, FunctionType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES:
return True
elif isinstance(obj, MethodType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES or \
attr in UNSAFE_METHOD_ATTRIBUTES:
return True
elif isinstance(obj, type):
if attr == 'mro':
return True
elif isinstance(obj, (CodeType, TracebackType, FrameType)):
return True
elif isinstance(obj, GeneratorType):
if attr == 'gi_frame':
return True
return attr.startswith('__')
def modifies_known_mutable(obj, attr):
"""This function checks if an attribute on a builtin mutable object
(list, dict, set or deque) would modify it if called. It also supports
the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and
with Python 2.6 onwards the abstract base classes `MutableSet`,
`MutableMapping`, and `MutableSequence`.
>>> modifies_known_mutable({}, "clear")
True
>>> modifies_known_mutable({}, "keys")
False
>>> modifies_known_mutable([], "append")
True
>>> modifies_known_mutable([], "index")
False
If called with an unsupported object (such as unicode) `False` is
returned.
>>> modifies_known_mutable("foo", "upper")
False
"""
for typespec, unsafe in _mutable_spec:
if isinstance(obj, typespec):
return attr in unsafe
return False
class SandboxedEnvironment(Environment):
"""The sandboxed environment. It works like the regular environment but
tells the compiler to generate sandboxed code. Additionally subclasses of
this environment may override the methods that tell the runtime what
attributes or functions are safe to access.
If the template tries to access insecure code a :exc:`SecurityError` is
raised. However also other exceptions may occour during the rendering so
the caller has to ensure that all exceptions are catched.
"""
sandboxed = True
def __init__(self, *args, **kwargs):
Environment.__init__(self, *args, **kwargs)
self.globals['range'] = safe_range
def is_safe_attribute(self, obj, attr, value):
"""The sandboxed environment will call this method to check if the
attribute of an object is safe to access. Per default all attributes
starting with an underscore are considered private as well as the
special attributes of internal python objects as returned by the
:func:`is_internal_attribute` function.
"""
return not (attr.startswith('_') or is_internal_attribute(obj, attr))
def is_safe_callable(self, obj):
"""Check if an object is safely callable. Per default a function is
considered safe unless the `unsafe_callable` attribute exists and is
True. Override this method to alter the behavior, but this won't
affect the `unsafe` decorator from this module.
"""
return not (getattr(obj, 'unsafe_callable', False) or \
getattr(obj, 'alters_data', False))
def getitem(self, obj, argument):
"""Subscribe an object from sandboxed code."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, basestring):
try:
attr = str(argument)
except:
pass
else:
try:
value = getattr(obj, attr)
except AttributeError:
pass
else:
if self.is_safe_attribute(obj, argument, value):
return value
return self.unsafe_undefined(obj, argument)
return self.undefined(obj=obj, name=argument)
def getattr(self, obj, attribute):
"""Subscribe an object from sandboxed code and prefer the
attribute. The attribute passed *must* be a bytestring.
"""
try:
value = getattr(obj, attribute)
except AttributeError:
try:
return obj[attribute]
except (TypeError, LookupError):
pass
else:
if self.is_safe_attribute(obj, attribute, value):
return value
return self.unsafe_undefined(obj, attribute)
return self.undefined(obj=obj, name=attribute)
def unsafe_undefined(self, obj, attribute):
"""Return an undefined object for unsafe attributes."""
return self.undefined('access to attribute %r of %r '
'object is unsafe.' % (
attribute,
obj.__class__.__name__
), name=attribute, obj=obj, exc=SecurityError)
def call(__self, __context, __obj, *args, **kwargs):
"""Call an object from sandboxed code."""
# the double prefixes are to avoid double keyword argument
# errors when proxying the call.
if not __self.is_safe_callable(__obj):
raise SecurityError('%r is not safely callable' % (__obj,))
return __context.call(__obj, *args, **kwargs)
class ImmutableSandboxedEnvironment(SandboxedEnvironment):
"""Works exactly like the regular `SandboxedEnvironment` but does not
permit modifications on the builtin mutable objects `list`, `set`, and
`dict` by using the :func:`modifies_known_mutable` function.
"""
def is_safe_attribute(self, obj, attr, value):
if not SandboxedEnvironment.is_safe_attribute(self, obj, attr, value):
return False
return not modifies_known_mutable(obj, attr)
|
adammenges/statsmodels | refs/heads/master | docs/source/plots/graphics_boxplot_beanplot.py | 44 | # -*- coding: utf-8 -*-
"""
Created on Fri May 04 00:22:40 2012
Author: Ralf Gommers
"""
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
data = sm.datasets.anes96.load_pandas()
party_ID = np.arange(7)
labels = ["Strong Democrat", "Weak Democrat", "Independent-Democrat",
"Independent-Indpendent", "Independent-Republican",
"Weak Republican", "Strong Republican"]
plt.rcParams['figure.subplot.bottom'] = 0.23 # keep labels visible
age = [data.exog['age'][data.endog == id] for id in party_ID]
fig = plt.figure()
ax = fig.add_subplot(111)
sm.graphics.beanplot(age, ax=ax, labels=labels,
plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
'label_fontsize':'small',
'label_rotation':30})
ax.set_xlabel("Party identification of respondent.")
ax.set_ylabel("Age")
#plt.show()
|
elventear/ansible | refs/heads/devel | lib/ansible/modules/network/junos/junos_netconf.py | 8 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'core',
'version': '1.0'
}
DOCUMENTATION = """
---
module: junos_netconf
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Configures the Junos Netconf system service
description:
- This module provides an abstraction that enables and configures
the netconf system service running on Junos devices. This module
can be used to easily enable the Netconf API. Netconf provides
a programmatic interface for working with configuration and state
resources as defined in RFC 6242.
extends_documentation_fragment: junos
options:
netconf_port:
description:
- This argument specifies the port the netconf service should
listen on for SSH connections. The default port as defined
in RFC 6242 is 830.
required: false
default: 830
aliases: ['listens_on']
version_added: "2.2"
state:
description:
- Specifies the state of the C(junos_netconf) resource on
the remote device. If the I(state) argument is set to
I(present) the netconf service will be configured. If the
I(state) argument is set to I(absent) the netconf service
will be removed from the configuration.
required: false
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: enable netconf service on port 830
junos_netconf:
listens_on: 830
state: present
- name: disable netconf service
junos_netconf:
state: absent
"""
RETURN = """
commands:
description: Returns the command sent to the remote device
returned: when changed is True
type: str
sample: 'set system services netconf ssh port 830'
"""
import re
from ansible.module_utils.junos import load_config, get_config
from ansible.module_utils.junos import junos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
def map_obj_to_commands(updates, module):
want, have = updates
commands = list()
if want['state'] == 'present' and have['state'] == 'absent':
commands.append(
'set system services netconf ssh port %s' % want['netconf_port']
)
elif want['state'] == 'absent' and have['state'] == 'present':
commands.append('delete system services netconf')
elif want['netconf_port'] != have.get('netconf_port'):
commands.append(
'set system services netconf ssh port %s' % want['netconf_port']
)
return commands
def parse_port(config):
match = re.search(r'port (\d+)', config)
if match:
return int(match.group(1))
def map_config_to_obj(module):
config = get_config(module, ['system services netconf'])
obj = {'state': 'absent'}
if config:
obj.update({
'state': 'present',
'netconf_port': parse_port(config)
})
return obj
def validate_netconf_port(value, module):
if not 1 <= value <= 65535:
module.fail_json(msg='netconf_port must be between 1 and 65535')
def map_params_to_obj(module):
obj = {
'netconf_port': module.params['netconf_port'],
'state': module.params['state']
}
for key, value in iteritems(obj):
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if all((value, validator)):
validator(value, module)
return obj
def main():
"""main entry point for module execution
"""
argument_spec = dict(
netconf_port=dict(type='int', default=830, aliases=['listens_on']),
state=dict(default='present', choices=['present', 'absent']),
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False, 'warnings': warnings}
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
commit = not module.check_mode
diff = load_config(module, commands, commit=commit)
if diff and module._diff:
if module._diff:
result['diff'] = {'prepared': diff}
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
jeremiahyan/odoo | refs/heads/master | addons/sale_expense/__manifest__.py | 1 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Sales Expense',
'version': '1.0',
'category': 'Sales/Sales',
'summary': 'Quotation, Sales Orders, Delivery & Invoicing Control',
'description': """
Reinvoice Employee Expense
==========================
Create some products for which you can re-invoice the costs.
This module allow to reinvoice employee expense, by setting the SO directly on the expense.
""",
'depends': ['sale_management', 'hr_expense'],
'data': [
'views/product_view.xml',
'views/hr_expense_views.xml',
'views/sale_order_views.xml',
],
'demo': ['data/sale_expense_demo.xml'],
'test': [],
'installable': True,
'auto_install': True,
'assets': {
'web.assets_backend': [
'sale_expense/static/src/**/*',
],
'web.qunit_suite_tests': [
'sale_expense/static/tests/**/*',
],
}
}
|
muffl0n/ansible | refs/heads/devel | contrib/inventory/zone.py | 138 | #!/usr/bin/env python
# (c) 2015, Dagobert Michelsen <dam@baltic-online.de>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from subprocess import Popen,PIPE
import sys
import json
result = {}
result['all'] = {}
pipe = Popen(['zoneadm', 'list', '-ip'], stdout=PIPE, universal_newlines=True)
result['all']['hosts'] = []
for l in pipe.stdout.readlines():
# 1:work:running:/zones/work:3126dc59-9a07-4829-cde9-a816e4c5040e:native:shared
s = l.split(':')
if s[1] != 'global':
result['all']['hosts'].append(s[1])
result['all']['vars'] = {}
result['all']['vars']['ansible_connection'] = 'zone'
if len(sys.argv) == 2 and sys.argv[1] == '--list':
print(json.dumps(result))
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
print(json.dumps({'ansible_connection': 'zone'}))
else:
print("Need an argument, either --list or --host <host>")
|
iulian787/spack | refs/heads/develop | var/spack/repos/builtin/packages/valgrind/package.py | 3 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import glob
import sys
class Valgrind(AutotoolsPackage, SourcewarePackage):
"""An instrumentation framework for building dynamic analysis.
There are Valgrind tools that can automatically detect many memory
management and threading bugs, and profile your programs in
detail. You can also use Valgrind to build new tools.
Valgrind is Open Source / Free Software, and is freely available
under the GNU General Public License, version 2.
"""
homepage = "http://valgrind.org/"
sourceware_mirror_path = "valgrind/valgrind-3.13.0.tar.bz2"
git = "git://sourceware.org/git/valgrind.git"
version('develop', branch='master')
version('3.15.0', sha256='417c7a9da8f60dd05698b3a7bc6002e4ef996f14c13f0ff96679a16873e78ab1')
version('3.14.0', sha256='037c11bfefd477cc6e9ebe8f193bb237fe397f7ce791b4a4ce3fa1c6a520baa5')
version('3.13.0', sha256='d76680ef03f00cd5e970bbdcd4e57fb1f6df7d2e2c071635ef2be74790190c3b')
version('3.12.0', sha256='67ca4395b2527247780f36148b084f5743a68ab0c850cb43e4a5b4b012cf76a1')
version('3.11.0', sha256='6c396271a8c1ddd5a6fb9abe714ea1e8a86fce85b30ab26b4266aeb4c2413b42')
version('3.10.1', sha256='fa253dc26ddb661b6269df58144eff607ea3f76a9bcfe574b0c7726e1dfcb997')
version('3.10.0', sha256='03047f82dfc6985a4c7d9d2700e17bc05f5e1a0ca6ad902e5d6c81aeb720edc9')
variant('mpi', default=True,
description='Activates MPI support for valgrind')
variant('boost', default=True,
description='Activates boost support for valgrind')
variant('only64bit', default=True,
description='Sets --enable-only64bit option for valgrind')
variant('ubsan', default=sys.platform != 'darwin',
description='Activates ubsan support for valgrind')
conflicts('+ubsan', when='%apple-clang',
msg="""
Cannot build libubsan with clang on macOS.
Otherwise with (Apple's) clang there is a linker error:
clang: error: unknown argument: '-static-libubsan'
""")
depends_on('mpi', when='+mpi')
depends_on('boost', when='+boost')
depends_on("autoconf", type='build', when='@develop')
depends_on("automake", type='build', when='@develop')
depends_on("libtool", type='build', when='@develop')
# Apply the patch suggested here:
# http://valgrind.10908.n7.nabble.com/Unable-to-compile-on-Mac-OS-X-10-11-td57237.html
patch('valgrind_3_12_0_osx.patch', when='@3.12.0 platform=darwin')
for os in ('mojave', 'catalina'):
conflicts("os=" + os, when='@:3.15')
def configure_args(self):
spec = self.spec
options = []
if spec.satisfies('+ubsan'):
options.append('--enable-ubsan')
if spec.satisfies('+only64bit'):
options.append('--enable-only64bit')
if sys.platform == 'darwin':
options.append('--build=amd64-darwin')
return options
# Valgrind the potential for overlong perl shebangs
def patch(self):
for link_tool_in in glob.glob('coregrind/link_tool_exe_*.in'):
filter_file('^#! @PERL@',
'#! /usr/bin/env perl',
link_tool_in)
|
Jayflux/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/sslutils/pregenerated.py | 470 | class PregeneratedSSLEnvironment(object):
"""SSL environment to use with existing key/certificate files
e.g. when running on a server with a public domain name
"""
ssl_enabled = True
def __init__(self, logger, host_key_path, host_cert_path,
ca_cert_path=None):
self._ca_cert_path = ca_cert_path
self._host_key_path = host_key_path
self._host_cert_path = host_cert_path
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
pass
def host_cert_path(self, hosts):
"""Return the key and certificate paths for the host"""
return self._host_key_path, self._host_cert_path
def ca_cert_path(self):
"""Return the certificate path of the CA that signed the
host certificates, or None if that isn't known"""
return self._ca_cert_path
|
puppies/fl2440 | refs/heads/master | linux-3.10.33/scripts/tracing/draw_functrace.py | 14679 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
srimai/odoo | refs/heads/8.0 | addons/website/models/website.py | 53 | # -*- coding: utf-8 -*-
import cStringIO
import contextlib
import datetime
import hashlib
import inspect
import logging
import math
import mimetypes
import unicodedata
import os
import re
import time
import urlparse
from PIL import Image
from sys import maxint
import werkzeug
# optional python-slugify import (https://github.com/un33k/python-slugify)
try:
import slugify as slugify_lib
except ImportError:
slugify_lib = None
import openerp
from openerp.osv import orm, osv, fields
from openerp.tools import html_escape as escape, ustr, image_resize_and_sharpen, image_save_for_web
from openerp.tools.safe_eval import safe_eval
from openerp.addons.web.http import request
logger = logging.getLogger(__name__)
def url_for(path_or_uri, lang=None):
if isinstance(path_or_uri, unicode):
path_or_uri = path_or_uri.encode('utf-8')
current_path = request.httprequest.path
if isinstance(current_path, unicode):
current_path = current_path.encode('utf-8')
location = path_or_uri.strip()
force_lang = lang is not None
url = urlparse.urlparse(location)
if request and not url.netloc and not url.scheme and (url.path or force_lang):
location = urlparse.urljoin(current_path, location)
lang = lang or request.context.get('lang')
langs = [lg[0] for lg in request.website.get_languages()]
if (len(langs) > 1 or force_lang) and is_multilang_url(location, langs):
ps = location.split('/')
if ps[1] in langs:
# Replace the language only if we explicitly provide a language to url_for
if force_lang:
ps[1] = lang
# Remove the default language unless it's explicitly provided
elif ps[1] == request.website.default_lang_code:
ps.pop(1)
# Insert the context language or the provided language
elif lang != request.website.default_lang_code or force_lang:
ps.insert(1, lang)
location = '/'.join(ps)
return location.decode('utf-8')
def is_multilang_url(local_url, langs=None):
if not langs:
langs = [lg[0] for lg in request.website.get_languages()]
spath = local_url.split('/')
# if a language is already in the path, remove it
if spath[1] in langs:
spath.pop(1)
local_url = '/'.join(spath)
try:
# Try to match an endpoint in werkzeug's routing table
url = local_url.split('?')
path = url[0]
query_string = url[1] if len(url) > 1 else None
router = request.httprequest.app.get_db_router(request.db).bind('')
# Force to check method to POST. Odoo uses methods : ['POST'] and ['GET', 'POST']
func = router.match(path, method='POST', query_args=query_string)[0]
return (func.routing.get('website', False) and
func.routing.get('multilang', func.routing['type'] == 'http'))
except Exception:
return False
def slugify(s, max_length=None):
""" Transform a string to a slug that can be used in a url path.
This method will first try to do the job with python-slugify if present.
Otherwise it will process string by stripping leading and ending spaces,
converting unicode chars to ascii, lowering all chars and replacing spaces
and underscore with hyphen "-".
:param s: str
:param max_length: int
:rtype: str
"""
s = ustr(s)
if slugify_lib:
# There are 2 different libraries only python-slugify is supported
try:
return slugify_lib.slugify(s, max_length=max_length)
except TypeError:
pass
uni = unicodedata.normalize('NFKD', s).encode('ascii', 'ignore').decode('ascii')
slug = re.sub('[\W_]', ' ', uni).strip().lower()
slug = re.sub('[-\s]+', '-', slug)
return slug[:max_length]
def slug(value):
if isinstance(value, orm.browse_record):
# [(id, name)] = value.name_get()
id, name = value.id, value.display_name
else:
# assume name_search result tuple
id, name = value
slugname = slugify(name or '').strip().strip('-')
if not slugname:
return str(id)
return "%s-%d" % (slugname, id)
# NOTE: as the pattern is used as it for the ModelConverter (ir_http.py), do not use any flags
_UNSLUG_RE = re.compile(r'(?:(\w{1,2}|\w[A-Za-z0-9-_]+?\w)-)?(-?\d+)(?=$|/)')
def unslug(s):
"""Extract slug and id from a string.
Always return un 2-tuple (str|None, int|None)
"""
m = _UNSLUG_RE.match(s)
if not m:
return None, None
return m.group(1), int(m.group(2))
def urlplus(url, params):
return werkzeug.Href(url)(params or None)
class website(osv.osv):
def _get_menu_website(self, cr, uid, ids, context=None):
# IF a menu is changed, update all websites
return self.search(cr, uid, [], context=context)
def _get_menu(self, cr, uid, ids, name, arg, context=None):
root_domain = [('parent_id', '=', False)]
menus = self.pool.get('website.menu').search(cr, uid, root_domain, order='id', context=context)
menu = menus and menus[0] or False
return dict( map(lambda x: (x, menu), ids) )
_name = "website" # Avoid website.website convention for conciseness (for new api). Got a special authorization from xmo and rco
_description = "Website"
_columns = {
'name': fields.char('Domain'),
'company_id': fields.many2one('res.company', string="Company"),
'language_ids': fields.many2many('res.lang', 'website_lang_rel', 'website_id', 'lang_id', 'Languages'),
'default_lang_id': fields.many2one('res.lang', string="Default language"),
'default_lang_code': fields.related('default_lang_id', 'code', type="char", string="Default language code", store=True),
'social_twitter': fields.char('Twitter Account'),
'social_facebook': fields.char('Facebook Account'),
'social_github': fields.char('GitHub Account'),
'social_linkedin': fields.char('LinkedIn Account'),
'social_youtube': fields.char('Youtube Account'),
'social_googleplus': fields.char('Google+ Account'),
'google_analytics_key': fields.char('Google Analytics Key'),
'user_id': fields.many2one('res.users', string='Public User'),
'partner_id': fields.related('user_id','partner_id', type='many2one', relation='res.partner', string='Public Partner'),
'menu_id': fields.function(_get_menu, relation='website.menu', type='many2one', string='Main Menu',
store= {
'website.menu': (_get_menu_website, ['sequence','parent_id','website_id'], 10)
})
}
_defaults = {
'company_id': lambda self,cr,uid,c: self.pool['ir.model.data'].xmlid_to_res_id(cr, openerp.SUPERUSER_ID, 'base.main_company'),
}
# cf. Wizard hack in website_views.xml
def noop(self, *args, **kwargs):
pass
def write(self, cr, uid, ids, vals, context=None):
self._get_languages.clear_cache(self)
return super(website, self).write(cr, uid, ids, vals, context)
def new_page(self, cr, uid, name, template='website.default_page', ispage=True, context=None):
context = context or {}
imd = self.pool.get('ir.model.data')
view = self.pool.get('ir.ui.view')
template_module, template_name = template.split('.')
# completely arbitrary max_length
page_name = slugify(name, max_length=50)
page_xmlid = "%s.%s" % (template_module, page_name)
try:
# existing page
imd.get_object_reference(cr, uid, template_module, page_name)
except ValueError:
# new page
_, template_id = imd.get_object_reference(cr, uid, template_module, template_name)
page_id = view.copy(cr, uid, template_id, context=context)
page = view.browse(cr, uid, page_id, context=context)
page.write({
'arch': page.arch.replace(template, page_xmlid),
'name': page_name,
'page': ispage,
})
imd.create(cr, uid, {
'name': page_name,
'module': template_module,
'model': 'ir.ui.view',
'res_id': page_id,
'noupdate': True
}, context=context)
return page_xmlid
def page_for_name(self, cr, uid, ids, name, module='website', context=None):
# whatever
return '%s.%s' % (module, slugify(name, max_length=50))
def page_exists(self, cr, uid, ids, name, module='website', context=None):
try:
name = (name or "").replace("/page/website.", "").replace("/page/", "")
if not name:
return False
return self.pool["ir.model.data"].get_object_reference(cr, uid, module, name)
except:
return False
@openerp.tools.ormcache(skiparg=3)
def _get_languages(self, cr, uid, id):
website = self.browse(cr, uid, id)
return [(lg.code, lg.name) for lg in website.language_ids]
def get_languages(self, cr, uid, ids, context=None):
return self._get_languages(cr, uid, ids[0])
def get_alternate_languages(self, cr, uid, ids, req=None, context=None):
langs = []
if req is None:
req = request.httprequest
default = self.get_current_website(cr, uid, context=context).default_lang_code
uri = req.path
if req.query_string:
uri += '?' + req.query_string
shorts = []
for code, name in self.get_languages(cr, uid, ids, context=context):
lg_path = ('/' + code) if code != default else ''
lg = code.split('_')
shorts.append(lg[0])
lang = {
'hreflang': ('-'.join(lg)).lower(),
'short': lg[0],
'href': req.url_root[0:-1] + lg_path + uri,
}
langs.append(lang)
for lang in langs:
if shorts.count(lang['short']) == 1:
lang['hreflang'] = lang['short']
return langs
def get_current_website(self, cr, uid, context=None):
# TODO: Select website, currently hard coded
return self.pool['website'].browse(cr, uid, 1, context=context)
def is_publisher(self, cr, uid, ids, context=None):
Access = self.pool['ir.model.access']
is_website_publisher = Access.check(cr, uid, 'ir.ui.view', 'write', False, context=context)
return is_website_publisher
def is_user(self, cr, uid, ids, context=None):
Access = self.pool['ir.model.access']
return Access.check(cr, uid, 'ir.ui.menu', 'read', False, context=context)
def get_template(self, cr, uid, ids, template, context=None):
if isinstance(template, (int, long)):
view_id = template
else:
if '.' not in template:
template = 'website.%s' % template
module, xmlid = template.split('.', 1)
model, view_id = request.registry["ir.model.data"].get_object_reference(cr, uid, module, xmlid)
return self.pool["ir.ui.view"].browse(cr, uid, view_id, context=context)
def _render(self, cr, uid, ids, template, values=None, context=None):
# TODO: remove this. (just kept for backward api compatibility for saas-3)
return self.pool['ir.ui.view'].render(cr, uid, template, values=values, context=context)
def render(self, cr, uid, ids, template, values=None, status_code=None, context=None):
# TODO: remove this. (just kept for backward api compatibility for saas-3)
return request.render(template, values, uid=uid)
def pager(self, cr, uid, ids, url, total, page=1, step=30, scope=5, url_args=None, context=None):
# Compute Pager
page_count = int(math.ceil(float(total) / step))
page = max(1, min(int(page if str(page).isdigit() else 1), page_count))
scope -= 1
pmin = max(page - int(math.floor(scope/2)), 1)
pmax = min(pmin + scope, page_count)
if pmax - pmin < scope:
pmin = pmax - scope if pmax - scope > 0 else 1
def get_url(page):
_url = "%s/page/%s" % (url, page) if page > 1 else url
if url_args:
_url = "%s?%s" % (_url, werkzeug.url_encode(url_args))
return _url
return {
"page_count": page_count,
"offset": (page - 1) * step,
"page": {
'url': get_url(page),
'num': page
},
"page_start": {
'url': get_url(pmin),
'num': pmin
},
"page_previous": {
'url': get_url(max(pmin, page - 1)),
'num': max(pmin, page - 1)
},
"page_next": {
'url': get_url(min(pmax, page + 1)),
'num': min(pmax, page + 1)
},
"page_end": {
'url': get_url(pmax),
'num': pmax
},
"pages": [
{'url': get_url(page), 'num': page}
for page in xrange(pmin, pmax+1)
]
}
def rule_is_enumerable(self, rule):
""" Checks that it is possible to generate sensible GET queries for
a given rule (if the endpoint matches its own requirements)
:type rule: werkzeug.routing.Rule
:rtype: bool
"""
endpoint = rule.endpoint
methods = rule.methods or ['GET']
converters = rule._converters.values()
if not ('GET' in methods
and endpoint.routing['type'] == 'http'
and endpoint.routing['auth'] in ('none', 'public')
and endpoint.routing.get('website', False)
and all(hasattr(converter, 'generate') for converter in converters)
and endpoint.routing.get('website')):
return False
# dont't list routes without argument having no default value or converter
spec = inspect.getargspec(endpoint.method.original_func)
# remove self and arguments having a default value
defaults_count = len(spec.defaults or [])
args = spec.args[1:(-defaults_count or None)]
# check that all args have a converter
return all( (arg in rule._converters) for arg in args)
def enumerate_pages(self, cr, uid, ids, query_string=None, context=None):
""" Available pages in the website/CMS. This is mostly used for links
generation and can be overridden by modules setting up new HTML
controllers for dynamic pages (e.g. blog).
By default, returns template views marked as pages.
:param str query_string: a (user-provided) string, fetches pages
matching the string
:returns: a list of mappings with two keys: ``name`` is the displayable
name of the resource (page), ``url`` is the absolute URL
of the same.
:rtype: list({name: str, url: str})
"""
router = request.httprequest.app.get_db_router(request.db)
# Force enumeration to be performed as public user
url_list = []
for rule in router.iter_rules():
if not self.rule_is_enumerable(rule):
continue
converters = rule._converters or {}
if query_string and not converters and (query_string not in rule.build([{}], append_unknown=False)[1]):
continue
values = [{}]
convitems = converters.items()
# converters with a domain are processed after the other ones
gd = lambda x: hasattr(x[1], 'domain') and (x[1].domain <> '[]')
convitems.sort(lambda x, y: cmp(gd(x), gd(y)))
for (i,(name, converter)) in enumerate(convitems):
newval = []
for val in values:
query = i==(len(convitems)-1) and query_string
for v in converter.generate(request.cr, uid, query=query, args=val, context=context):
newval.append( val.copy() )
v[name] = v['loc']
del v['loc']
newval[-1].update(v)
values = newval
for value in values:
domain_part, url = rule.build(value, append_unknown=False)
page = {'loc': url}
for key,val in value.items():
if key.startswith('__'):
page[key[2:]] = val
if url in ('/sitemap.xml',):
continue
if url in url_list:
continue
url_list.append(url)
yield page
def search_pages(self, cr, uid, ids, needle=None, limit=None, context=None):
name = (needle or "").replace("/page/website.", "").replace("/page/", "")
res = []
for page in self.enumerate_pages(cr, uid, ids, query_string=name, context=context):
if needle in page['loc']:
res.append(page)
if len(res) == limit:
break
return res
def kanban(self, cr, uid, ids, model, domain, column, template, step=None, scope=None, orderby=None, context=None):
step = step and int(step) or 10
scope = scope and int(scope) or 5
orderby = orderby or "name"
get_args = dict(request.httprequest.args or {})
model_obj = self.pool[model]
relation = model_obj._columns.get(column)._obj
relation_obj = self.pool[relation]
get_args.setdefault('kanban', "")
kanban = get_args.pop('kanban')
kanban_url = "?%s&kanban=" % werkzeug.url_encode(get_args)
pages = {}
for col in kanban.split(","):
if col:
col = col.split("-")
pages[int(col[0])] = int(col[1])
objects = []
for group in model_obj.read_group(cr, uid, domain, ["id", column], groupby=column):
obj = {}
# browse column
relation_id = group[column][0]
obj['column_id'] = relation_obj.browse(cr, uid, relation_id)
obj['kanban_url'] = kanban_url
for k, v in pages.items():
if k != relation_id:
obj['kanban_url'] += "%s-%s" % (k, v)
# pager
number = model_obj.search(cr, uid, group['__domain'], count=True)
obj['page_count'] = int(math.ceil(float(number) / step))
obj['page'] = pages.get(relation_id) or 1
if obj['page'] > obj['page_count']:
obj['page'] = obj['page_count']
offset = (obj['page']-1) * step
obj['page_start'] = max(obj['page'] - int(math.floor((scope-1)/2)), 1)
obj['page_end'] = min(obj['page_start'] + (scope-1), obj['page_count'])
# view data
obj['domain'] = group['__domain']
obj['model'] = model
obj['step'] = step
obj['orderby'] = orderby
# browse objects
object_ids = model_obj.search(cr, uid, group['__domain'], limit=step, offset=offset, order=orderby)
obj['object_ids'] = model_obj.browse(cr, uid, object_ids)
objects.append(obj)
values = {
'objects': objects,
'range': range,
'template': template,
}
return request.website._render("website.kanban_contain", values)
def kanban_col(self, cr, uid, ids, model, domain, page, template, step, orderby, context=None):
html = ""
model_obj = self.pool[model]
domain = safe_eval(domain)
step = int(step)
offset = (int(page)-1) * step
object_ids = model_obj.search(cr, uid, domain, limit=step, offset=offset, order=orderby)
object_ids = model_obj.browse(cr, uid, object_ids)
for object_id in object_ids:
html += request.website._render(template, {'object_id': object_id})
return html
def _image_placeholder(self, response):
# file_open may return a StringIO. StringIO can be closed but are
# not context managers in Python 2 though that is fixed in 3
with contextlib.closing(openerp.tools.misc.file_open(
os.path.join('web', 'static', 'src', 'img', 'placeholder.png'),
mode='rb')) as f:
response.data = f.read()
return response.make_conditional(request.httprequest)
def _image(self, cr, uid, model, id, field, response, max_width=maxint, max_height=maxint, cache=None, context=None):
""" Fetches the requested field and ensures it does not go above
(max_width, max_height), resizing it if necessary.
Resizing is bypassed if the object provides a $field_big, which will
be interpreted as a pre-resized version of the base field.
If the record is not found or does not have the requested field,
returns a placeholder image via :meth:`~._image_placeholder`.
Sets and checks conditional response parameters:
* :mailheader:`ETag` is always set (and checked)
* :mailheader:`Last-Modified is set iif the record has a concurrency
field (``__last_update``)
The requested field is assumed to be base64-encoded image data in
all cases.
"""
Model = self.pool[model]
id = int(id)
ids = Model.search(cr, uid,
[('id', '=', id)], context=context)
if not ids and 'website_published' in Model._fields:
ids = Model.search(cr, openerp.SUPERUSER_ID,
[('id', '=', id), ('website_published', '=', True)], context=context)
if not ids:
return self._image_placeholder(response)
concurrency = '__last_update'
[record] = Model.read(cr, openerp.SUPERUSER_ID, [id],
[concurrency, field],
context=context)
if concurrency in record:
server_format = openerp.tools.misc.DEFAULT_SERVER_DATETIME_FORMAT
try:
response.last_modified = datetime.datetime.strptime(
record[concurrency], server_format + '.%f')
except ValueError:
# just in case we have a timestamp without microseconds
response.last_modified = datetime.datetime.strptime(
record[concurrency], server_format)
# Field does not exist on model or field set to False
if not record.get(field):
# FIXME: maybe a field which does not exist should be a 404?
return self._image_placeholder(response)
response.set_etag(hashlib.sha1(record[field]).hexdigest())
response.make_conditional(request.httprequest)
if cache:
response.cache_control.max_age = cache
response.expires = int(time.time() + cache)
# conditional request match
if response.status_code == 304:
return response
data = record[field].decode('base64')
image = Image.open(cStringIO.StringIO(data))
response.mimetype = Image.MIME[image.format]
filename = '%s_%s.%s' % (model.replace('.', '_'), id, str(image.format).lower())
response.headers['Content-Disposition'] = 'inline; filename="%s"' % filename
if (not max_width) and (not max_height):
response.data = data
return response
w, h = image.size
max_w = int(max_width) if max_width else maxint
max_h = int(max_height) if max_height else maxint
if w < max_w and h < max_h:
response.data = data
else:
size = (max_w, max_h)
img = image_resize_and_sharpen(image, size, preserve_aspect_ratio=True)
image_save_for_web(img, response.stream, format=image.format)
# invalidate content-length computed by make_conditional as
# writing to response.stream does not do it (as of werkzeug 0.9.3)
del response.headers['Content-Length']
return response
def image_url(self, cr, uid, record, field, size=None, context=None):
"""Returns a local url that points to the image field of a given browse record."""
model = record._name
sudo_record = record.sudo()
id = '%s_%s' % (record.id, hashlib.sha1(sudo_record.write_date or sudo_record.create_date or '').hexdigest()[0:7])
size = '' if size is None else '/%s' % size
return '/website/image/%s/%s/%s%s' % (model, id, field, size)
class website_menu(osv.osv):
_name = "website.menu"
_description = "Website Menu"
_columns = {
'name': fields.char('Menu', required=True, translate=True),
'url': fields.char('Url'),
'new_window': fields.boolean('New Window'),
'sequence': fields.integer('Sequence'),
# TODO: support multiwebsite once done for ir.ui.views
'website_id': fields.many2one('website', 'Website'),
'parent_id': fields.many2one('website.menu', 'Parent Menu', select=True, ondelete="cascade"),
'child_id': fields.one2many('website.menu', 'parent_id', string='Child Menus'),
'parent_left': fields.integer('Parent Left', select=True),
'parent_right': fields.integer('Parent Right', select=True),
}
def __defaults_sequence(self, cr, uid, context):
menu = self.search_read(cr, uid, [(1,"=",1)], ["sequence"], limit=1, order="sequence DESC", context=context)
return menu and menu[0]["sequence"] or 0
_defaults = {
'url': '',
'sequence': __defaults_sequence,
'new_window': False,
}
_parent_store = True
_parent_order = 'sequence'
_order = "sequence"
# would be better to take a menu_id as argument
def get_tree(self, cr, uid, website_id, context=None):
def make_tree(node):
menu_node = dict(
id=node.id,
name=node.name,
url=node.url,
new_window=node.new_window,
sequence=node.sequence,
parent_id=node.parent_id.id,
children=[],
)
for child in node.child_id:
menu_node['children'].append(make_tree(child))
return menu_node
menu = self.pool.get('website').browse(cr, uid, website_id, context=context).menu_id
return make_tree(menu)
def save(self, cr, uid, website_id, data, context=None):
def replace_id(old_id, new_id):
for menu in data['data']:
if menu['id'] == old_id:
menu['id'] = new_id
if menu['parent_id'] == old_id:
menu['parent_id'] = new_id
to_delete = data['to_delete']
if to_delete:
self.unlink(cr, uid, to_delete, context=context)
for menu in data['data']:
mid = menu['id']
if isinstance(mid, str):
new_id = self.create(cr, uid, {'name': menu['name']}, context=context)
replace_id(mid, new_id)
for menu in data['data']:
self.write(cr, uid, [menu['id']], menu, context=context)
return True
class ir_attachment(osv.osv):
_inherit = "ir.attachment"
def _website_url_get(self, cr, uid, ids, name, arg, context=None):
result = {}
for attach in self.browse(cr, uid, ids, context=context):
if attach.url:
result[attach.id] = attach.url
else:
result[attach.id] = self.pool['website'].image_url(cr, uid, attach, 'datas')
return result
def _datas_checksum(self, cr, uid, ids, name, arg, context=None):
result = dict.fromkeys(ids, False)
attachments = self.read(cr, uid, ids, ['res_model'], context=context)
view_attachment_ids = [attachment['id'] for attachment in attachments if attachment['res_model'] == 'ir.ui.view']
for attach in self.read(cr, uid, view_attachment_ids, ['res_model', 'res_id', 'type', 'datas'], context=context):
result[attach['id']] = self._compute_checksum(attach)
return result
def _compute_checksum(self, attachment_dict):
if attachment_dict.get('res_model') == 'ir.ui.view'\
and not attachment_dict.get('res_id') and not attachment_dict.get('url')\
and attachment_dict.get('type', 'binary') == 'binary'\
and attachment_dict.get('datas'):
return hashlib.new('sha1', attachment_dict['datas']).hexdigest()
return None
def _datas_big(self, cr, uid, ids, name, arg, context=None):
result = dict.fromkeys(ids, False)
if context and context.get('bin_size'):
return result
for record in self.browse(cr, uid, ids, context=context):
if record.res_model != 'ir.ui.view' or not record.datas: continue
try:
result[record.id] = openerp.tools.image_resize_image_big(record.datas)
except IOError: # apparently the error PIL.Image.open raises
pass
return result
_columns = {
'datas_checksum': fields.function(_datas_checksum, size=40,
string="Datas checksum", type='char', store=True, select=True),
'website_url': fields.function(_website_url_get, string="Attachment URL", type='char'),
'datas_big': fields.function (_datas_big, type='binary', store=True,
string="Resized file content"),
'mimetype': fields.char('Mime Type', readonly=True),
}
def _add_mimetype_if_needed(self, values):
if values.get('datas_fname'):
values['mimetype'] = mimetypes.guess_type(values.get('datas_fname'))[0] or 'application/octet-stream'
def create(self, cr, uid, values, context=None):
chk = self._compute_checksum(values)
if chk:
match = self.search(cr, uid, [('datas_checksum', '=', chk)], context=context)
if match:
return match[0]
self._add_mimetype_if_needed(values)
return super(ir_attachment, self).create(
cr, uid, values, context=context)
def write(self, cr, uid, ids, values, context=None):
self._add_mimetype_if_needed(values)
return super(ir_attachment, self).write(cr, uid, ids, values, context=context)
def try_remove(self, cr, uid, ids, context=None):
""" Removes a web-based image attachment if it is used by no view
(template)
Returns a dict mapping attachments which would not be removed (if any)
mapped to the views preventing their removal
"""
Views = self.pool['ir.ui.view']
attachments_to_remove = []
# views blocking removal of the attachment
removal_blocked_by = {}
for attachment in self.browse(cr, uid, ids, context=context):
# in-document URLs are html-escaped, a straight search will not
# find them
url = escape(attachment.website_url)
ids = Views.search(cr, uid, ["|", ('arch', 'like', '"%s"' % url), ('arch', 'like', "'%s'" % url)], context=context)
if ids:
removal_blocked_by[attachment.id] = Views.read(
cr, uid, ids, ['name'], context=context)
else:
attachments_to_remove.append(attachment.id)
if attachments_to_remove:
self.unlink(cr, uid, attachments_to_remove, context=context)
return removal_blocked_by
class res_partner(osv.osv):
_inherit = "res.partner"
def google_map_img(self, cr, uid, ids, zoom=8, width=298, height=298, context=None):
partner = self.browse(cr, uid, ids[0], context=context)
params = {
'center': '%s, %s %s, %s' % (partner.street or '', partner.city or '', partner.zip or '', partner.country_id and partner.country_id.name_get()[0][1] or ''),
'size': "%sx%s" % (height, width),
'zoom': zoom,
'sensor': 'false',
}
return urlplus('//maps.googleapis.com/maps/api/staticmap' , params)
def google_map_link(self, cr, uid, ids, zoom=8, context=None):
partner = self.browse(cr, uid, ids[0], context=context)
params = {
'q': '%s, %s %s, %s' % (partner.street or '', partner.city or '', partner.zip or '', partner.country_id and partner.country_id.name_get()[0][1] or ''),
'z': 10
}
return urlplus('https://maps.google.com/maps' , params)
class res_company(osv.osv):
_inherit = "res.company"
def google_map_img(self, cr, uid, ids, zoom=8, width=298, height=298, context=None):
partner = self.browse(cr, openerp.SUPERUSER_ID, ids[0], context=context).partner_id
return partner and partner.google_map_img(zoom, width, height, context=context) or None
def google_map_link(self, cr, uid, ids, zoom=8, context=None):
partner = self.browse(cr, openerp.SUPERUSER_ID, ids[0], context=context).partner_id
return partner and partner.google_map_link(zoom, context=context) or None
class base_language_install(osv.osv_memory):
_inherit = "base.language.install"
_columns = {
'website_ids': fields.many2many('website', string='Websites to translate'),
}
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
defaults = super(base_language_install, self).default_get(cr, uid, fields, context)
website_id = context.get('params', {}).get('website_id')
if website_id:
if 'website_ids' not in defaults:
defaults['website_ids'] = []
defaults['website_ids'].append(website_id)
return defaults
def lang_install(self, cr, uid, ids, context=None):
if context is None:
context = {}
action = super(base_language_install, self).lang_install(cr, uid, ids, context)
language_obj = self.browse(cr, uid, ids)[0]
website_ids = [website.id for website in language_obj['website_ids']]
lang_id = self.pool['res.lang'].search(cr, uid, [('code', '=', language_obj['lang'])])
if website_ids and lang_id:
data = {'language_ids': [(4, lang_id[0])]}
self.pool['website'].write(cr, uid, website_ids, data)
params = context.get('params', {})
if 'url_return' in params:
return {
'url': params['url_return'].replace('[lang]', language_obj['lang']),
'type': 'ir.actions.act_url',
'target': 'self'
}
return action
class website_seo_metadata(osv.Model):
_name = 'website.seo.metadata'
_description = 'SEO metadata'
_columns = {
'website_meta_title': fields.char("Website meta title", translate=True),
'website_meta_description': fields.text("Website meta description", translate=True),
'website_meta_keywords': fields.char("Website meta keywords", translate=True),
}
# vim:et:
|
RickyCook/DockCI | refs/heads/master | tests/db/api/test_job_api_db.py | 2 | """ Test ``dockci.api.jwt`` against the DB """
import json
import pytest
def job_url_for(job):
""" Job API URL for the given job """
return '/api/v1/projects/{project}/jobs/{job}'.format(
project=job.project.slug,
job=job.slug,
)
def stage_url_for(stage):
""" Stage API URL for the given stage """
return '{base}/stages/{stage}'.format(
base=job_url_for(stage.job),
stage=stage.slug,
)
@pytest.mark.usefixtures('db')
class TestJobEdit(object):
""" Test the ``JobDetail.patch`` resource """
def test_as_admin(self, client, job, admin_user):
""" Ensure even admins are denied """
original_commit = job.commit
job_url = job_url_for(job)
response = client.patch(
job_url,
headers={
'x_dockci_username': admin_user.email,
'x_dockci_password': 'testpass',
},
data={'commit': 'updated'},
)
assert response.status_code == 401
response_data = json.loads(response.data.decode())
assert response_data == {'message': 'Only an agent can do this'}
response = client.get(job_url)
response_data = json.loads(response.data.decode())
assert response_data.pop('commit') == original_commit
def test_as_agent(self, client, job, agent_token):
""" Ensure agents can update """
job_url = job_url_for(job)
response = client.patch(
job_url,
headers={'x_dockci_api_key': agent_token},
data={'commit': 'updated'},
)
assert response.status_code == 200
response_data = json.loads(response.data.decode())
assert response_data.pop('commit') == 'updated'
response = client.get(job_url)
response_data = json.loads(response.data.decode())
assert response_data.pop('commit') == 'updated'
@pytest.mark.parametrize('field_name', ['start_ts', 'complete_ts'])
def test_update_date(self, client, job, agent_token, field_name):
""" Ensure dates are updated correctly """
job_url = job_url_for(job)
response = client.patch(
job_url,
headers={'x_dockci_api_key': agent_token},
data={field_name: '2016-02-03T04:05:06'},
)
assert response.status_code == 200
response_data = json.loads(response.data.decode())
assert response_data.pop(field_name) == '2016-02-03T04:05:06'
response = client.get(job_url)
response_data = json.loads(response.data.decode())
assert response_data.pop(field_name) == '2016-02-03T04:05:06'
@pytest.mark.usefixtures('db')
class TestStageDetail(object):
""" Test the ``StageDetail`` resource """
def test_as_admin(self, client, job, admin_user):
""" Ensure even admins are denied """
stage_url = '{base}/stages/teststage'.format(base=job_url_for(job))
response = client.put(
stage_url,
headers={
'x_dockci_username': admin_user.email,
'x_dockci_password': 'testpass',
},
data={'success': True},
)
assert response.status_code == 401
response_data = json.loads(response.data.decode())
assert response_data == {'message': 'Only an agent can do this'}
response = client.get(stage_url)
assert response.status_code == 404
def test_create(self, client, job, agent_token):
""" Ensure agents can update """
stage_url = '{base}/stages/teststage'.format(base=job_url_for(job))
response = client.put(
stage_url,
headers={'x_dockci_api_key': agent_token},
data={'success': 'true'},
)
assert response.status_code == 200 # TODO 201
response_data = json.loads(response.data.decode())
assert response_data.pop('success') == True
response = client.get(stage_url)
response_data = json.loads(response.data.decode())
assert response_data.pop('success') == True
def test_update(self, client, stage, agent_token):
""" Ensure agents can update """
stage_url = stage_url_for(stage)
response = client.put(
stage_url,
headers={'x_dockci_api_key': agent_token},
data={'success': 'false'},
)
assert response.status_code == 200
response_data = json.loads(response.data.decode())
assert response_data.pop('success') == False
response = client.get(stage_url)
response_data = json.loads(response.data.decode())
assert response_data.pop('success') == False
|
mainglis/adminpanel | refs/heads/master | nextgensbl/users2/urls.py | 1 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.views.generic import TemplateView
from django.conf.urls import url, patterns
from . import views
# urlpatterns = patterns('',
# url(r'^$', TemplateView.as_view(template_name='base.html')),
# )
urlpatterns = [
url(r'^$', views.index, name='index'),
# # URL pattern for the UserDetailView
# url(regex=r'^(?P<username>[\w.@+-]+)/$', view=views.UserDetailView.as_view(), name='detail'),
# # URL pattern for the UserListView
# url(regex=r'^$', view=views.UserListView.as_view(), name='list'),
#
# # URL pattern for the UserRedirectView
# url(regex=r'^~redirect/$', view=views.UserRedirectView.as_view(), name='redirect'),
#
# # URL pattern for the UserDetailView
# url(regex=r'^(?P<username>[\w.@+-]+)/$', view=views.UserDetailView.as_view(), name='detail'),
#
# # URL pattern for the UserUpdateView
# url(regex=r'^~update/$', view=views.UserUpdateView.as_view(), name='update'),
]
|
devdelay/home-assistant | refs/heads/dev | tests/components/test_init.py | 4 | """The testd for Core components."""
# pylint: disable=protected-access,too-many-public-methods
import unittest
from unittest.mock import patch
from tempfile import TemporaryDirectory
import yaml
import homeassistant.core as ha
from homeassistant import config
from homeassistant.const import (
STATE_ON, STATE_OFF, SERVICE_TURN_ON, SERVICE_TURN_OFF, SERVICE_TOGGLE)
import homeassistant.components as comps
from homeassistant.helpers import entity
from tests.common import get_test_home_assistant, mock_service
class TestComponentsCore(unittest.TestCase):
"""Test homeassistant.components module."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.assertTrue(comps.setup(self.hass, {}))
self.hass.states.set('light.Bowl', STATE_ON)
self.hass.states.set('light.Ceiling', STATE_OFF)
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_is_on(self):
"""Test is_on method."""
self.assertTrue(comps.is_on(self.hass, 'light.Bowl'))
self.assertFalse(comps.is_on(self.hass, 'light.Ceiling'))
self.assertTrue(comps.is_on(self.hass))
self.assertFalse(comps.is_on(self.hass, 'non_existing.entity'))
def test_turn_on_without_entities(self):
"""Test turn_on method without entities."""
calls = mock_service(self.hass, 'light', SERVICE_TURN_ON)
comps.turn_on(self.hass)
self.hass.pool.block_till_done()
self.assertEqual(0, len(calls))
def test_turn_on(self):
"""Test turn_on method."""
calls = mock_service(self.hass, 'light', SERVICE_TURN_ON)
comps.turn_on(self.hass, 'light.Ceiling')
self.hass.pool.block_till_done()
self.assertEqual(1, len(calls))
def test_turn_off(self):
"""Test turn_off method."""
calls = mock_service(self.hass, 'light', SERVICE_TURN_OFF)
comps.turn_off(self.hass, 'light.Bowl')
self.hass.pool.block_till_done()
self.assertEqual(1, len(calls))
def test_toggle(self):
"""Test toggle method."""
calls = mock_service(self.hass, 'light', SERVICE_TOGGLE)
comps.toggle(self.hass, 'light.Bowl')
self.hass.pool.block_till_done()
self.assertEqual(1, len(calls))
@patch('homeassistant.core.ServiceRegistry.call')
def test_turn_on_to_not_block_for_domains_without_service(self, mock_call):
"""Test if turn_on is blocking domain with no service."""
mock_service(self.hass, 'light', SERVICE_TURN_ON)
# We can't test if our service call results in services being called
# because by mocking out the call service method, we mock out all
# So we mimick how the service registry calls services
service_call = ha.ServiceCall('homeassistant', 'turn_on', {
'entity_id': ['light.test', 'sensor.bla', 'light.bla']
})
self.hass.services._services['homeassistant']['turn_on'](service_call)
self.assertEqual(2, mock_call.call_count)
self.assertEqual(
('light', 'turn_on', {'entity_id': ['light.bla', 'light.test']},
True),
mock_call.call_args_list[0][0])
self.assertEqual(
('sensor', 'turn_on', {'entity_id': ['sensor.bla']}, False),
mock_call.call_args_list[1][0])
def test_reload_core_conf(self):
"""Test reload core conf service."""
ent = entity.Entity()
ent.entity_id = 'test.entity'
ent.hass = self.hass
ent.update_ha_state()
state = self.hass.states.get('test.entity')
assert state is not None
assert state.state == 'unknown'
assert state.attributes == {}
with TemporaryDirectory() as conf_dir:
self.hass.config.config_dir = conf_dir
conf_yaml = self.hass.config.path(config.YAML_CONFIG_FILE)
with open(conf_yaml, 'a') as fp:
fp.write(yaml.dump({
ha.DOMAIN: {
'latitude': 10,
'longitude': 20,
'customize': {
'test.Entity': {
'hello': 'world'
}
}
}
}))
comps.reload_core_config(self.hass)
self.hass.pool.block_till_done()
assert 10 == self.hass.config.latitude
assert 20 == self.hass.config.longitude
ent.update_ha_state()
state = self.hass.states.get('test.entity')
assert state is not None
assert state.state == 'unknown'
assert state.attributes.get('hello') == 'world'
@patch('homeassistant.components._LOGGER.error')
@patch('homeassistant.config.process_ha_core_config')
def test_reload_core_with_wrong_conf(self, mock_process, mock_error):
"""Test reload core conf service."""
with TemporaryDirectory() as conf_dir:
self.hass.config.config_dir = conf_dir
conf_yaml = self.hass.config.path(config.YAML_CONFIG_FILE)
with open(conf_yaml, 'a') as fp:
fp.write(yaml.dump(['invalid', 'config']))
comps.reload_core_config(self.hass)
self.hass.pool.block_till_done()
assert mock_error.called
assert mock_process.called is False
|
keybar/keybar | refs/heads/master | src/keybar/tests/helpers.py | 1 | import errno
import socket
import pytest
from django.db import connections
from django.test.testcases import LiveServerThread as LiveServerThreadBase
from pytest_django.live_server_helper import parse_addr
from tornado import ioloop
from urllib3.util.url import parse_url
from keybar.client import LocalClient
from keybar.server import get_server
class LiveServerThread(LiveServerThreadBase):
ports = range(8990, 9999)
def run(self):
if self.connections_override:
# Override this thread's database connections with the ones
# provided by the main thread.
for alias, conn in self.connections_override.items():
connections[alias] = conn
try:
self.server = get_server(debug=False)
for index, port in enumerate(self.possible_ports):
try:
self.server.listen(port, 'local.keybar.io')
except socket.error as exc:
if (index + 1 < len(self.possible_ports) and exc.errno == errno.EADDRINUSE):
# This port is already in use, so we go on and try with
# the next one in the list.
continue
else:
# Either none of the given ports are free or the error
# is something else than "Address already in use". So
# we let that error bubble up to the main thread.
raise exc
else:
# A free port was found.
self.port = port
break
self.is_ready.set()
self.loop = ioloop.IOLoop.instance()
self.loop.start()
except Exception as exc:
self.terminate()
self.error = exc
self.is_ready.set()
def terminate(self):
if hasattr(self, 'loop'):
# Stop the WSGI server
self.server.stop()
self.loop.stop()
class LiveServer:
def __init__(self, addr):
host, possible_ports = parse_addr(addr)
self.thread = LiveServerThread(host, possible_ports, None)
self.thread.daemon = True
self.thread.start()
self.thread.is_ready.wait()
if self.thread.error:
raise self.thread.error
def stop(self):
"""Stop the server"""
self.thread.terminate()
self.thread.join()
@property
def url(self):
return 'https://{0}'.format(self.domain)
@property
def domain(self):
return '{0}:{1}'.format(self.thread.host, self.thread.port)
def __str__(self):
return self.url
def __add__(self, other):
return str(self) + other
def __repr__(self):
return '<LiveServer listening at {0}>'.format(self.url)
class LiveServerTest:
@pytest.fixture(autouse=True)
def setup(self, settings, keybar_liveserver):
self.liveserver = keybar_liveserver
# XXX: HACK! Figure out how to use multiple setup-methods properly
# with pytest
getattr(self, '_setup', lambda: None)()
def get_client(self, device_id, secret):
client = LocalClient(device_id, secret)
parsed_url = parse_url(self.liveserver.url)
client.host = parsed_url.host
client.port = parsed_url.port
return client
|
triplekiller/code-snippet | refs/heads/master | python/decorator.py | 2 | #!/usr/bin/env python3
# Decorator: function wrappers that are run when the python interpreter loads
# the function, and can modify what the function receives and returns.
from time import sleep
from functools import wraps
import logging
logging.basicConfig()
log = logging.getLogger("retry")
def retry(f):
@wraps(f)
def wrapped_f(*args, **kwargs):
MAX_ATTEMPTS = 5
for attempt in range(1, MAX_ATTEMPTS + 1):
try:
return f(*args, **kwargs)
except:
log.exception("Attempt %s/%s failed: %s", attempt, MAX_ATTEMPTS,
(args, kwargs))
sleep(10 * attempt)
log.critical("All %s attempts failed: %s", MAX_ATTEMPTS, (args, kwargs))
return wrapped_f
counter = 0
@retry
def save_to_db(arg):
print("Write to database.")
print("Automatically retried if exception is thrown.")
global counter
counter += 1
if counter < 2:
raise ValueError(arg)
if __name__ == '__main__':
save_to_db("Some bad value")
|
sherazkasi/SabreSoftware | refs/heads/master | Lib/site-packages/numpy/distutils/command/build_scripts.py | 100 | """ Modified version of build_scripts that handles building scripts from functions.
"""
from distutils.command.build_scripts import build_scripts as old_build_scripts
from numpy.distutils import log
from numpy.distutils.misc_util import is_string
class build_scripts(old_build_scripts):
def generate_scripts(self, scripts):
new_scripts = []
func_scripts = []
for script in scripts:
if is_string(script):
new_scripts.append(script)
else:
func_scripts.append(script)
if not func_scripts:
return new_scripts
build_dir = self.build_dir
self.mkpath(build_dir)
for func in func_scripts:
script = func(build_dir)
if not script:
continue
if is_string(script):
log.info(" adding '%s' to scripts" % (script,))
new_scripts.append(script)
else:
[log.info(" adding '%s' to scripts" % (s,)) for s in script]
new_scripts.extend(list(script))
return new_scripts
def run (self):
if not self.scripts:
return
self.scripts = self.generate_scripts(self.scripts)
# Now make sure that the distribution object has this list of scripts.
# setuptools' develop command requires that this be a list of filenames,
# not functions.
self.distribution.scripts = self.scripts
return old_build_scripts.run(self)
def get_source_files(self):
from numpy.distutils.misc_util import get_script_files
return get_script_files(self.scripts)
|
hydrospanner/DForurm | refs/heads/master | DForurm/env/Lib/site-packages/django/contrib/gis/db/backends/oracle/models.py | 111 | """
The GeometryColumns and SpatialRefSys models for the Oracle spatial
backend.
It should be noted that Oracle Spatial does not have database tables
named according to the OGC standard, so the closest analogs are used.
For example, the `USER_SDO_GEOM_METADATA` is used for the GeometryColumns
model and the `SDO_COORD_REF_SYS` is used for the SpatialRefSys model.
"""
from django.contrib.gis.db import models
from django.contrib.gis.db.backends.base.models import SpatialRefSysMixin
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class OracleGeometryColumns(models.Model):
"Maps to the Oracle USER_SDO_GEOM_METADATA table."
table_name = models.CharField(max_length=32)
column_name = models.CharField(max_length=1024)
srid = models.IntegerField(primary_key=True)
# TODO: Add support for `diminfo` column (type MDSYS.SDO_DIM_ARRAY).
class Meta:
app_label = 'gis'
db_table = 'USER_SDO_GEOM_METADATA'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the feature table
name.
"""
return 'table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the feature
geometry column.
"""
return 'column_name'
def __str__(self):
return '%s - %s (SRID: %s)' % (self.table_name, self.column_name, self.srid)
class OracleSpatialRefSys(models.Model, SpatialRefSysMixin):
"Maps to the Oracle MDSYS.CS_SRS table."
cs_name = models.CharField(max_length=68)
srid = models.IntegerField(primary_key=True)
auth_srid = models.IntegerField()
auth_name = models.CharField(max_length=256)
wktext = models.CharField(max_length=2046)
# Optional geometry representing the bounds of this coordinate
# system. By default, all are NULL in the table.
cs_bounds = models.PolygonField(null=True)
objects = models.GeoManager()
class Meta:
app_label = 'gis'
db_table = 'CS_SRS'
managed = False
@property
def wkt(self):
return self.wktext
|
rezib/cloubed | refs/heads/master | cloubed/DomainEvent.py | 1 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Copyright 2013 Rémi Palancher
#
# This file is part of Cloubed.
#
# Cloubed is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# Cloubed is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with Cloubed. If not, see
# <http://www.gnu.org/licenses/>.
""" DomainEvent class of Cloubed """
import logging
class DomainEvent:
""" DomainEvent class """
_domain_event_infos = {
0: ["DEFINED", {
0: "DEFINED_ADDED", # newly created config file
1: "DEFINED_UPDATED", # changed config file
2: "DEFINED_LAST" }
],
1: ["UNDEFINED", {
0: "UNDEFINED_REMOVED", # deleted the config file
1: "UNDEFINED_LAST" }
],
2: ["STARTED", {
0: "STARTED_BOOTED", # normal startup from boot
1: "STARTED_MIGRATED", # incoming migration from another host
2: "STARTED_RESTORED", # restored from a state file
3: "STARTED_FROM_SNAPSHOT", # restored from snapshot
4: "STARTED_WAKEUP", # started due to wakeup event
5: "STARTED_LAST" }
],
3: ["SUSPENDED", {
0: "SUSPENDED_PAUSED", # normal suspend due to admin pause
1: "SUSPENDED_MIGRATED", # suspended for offline migration
2: "SUSPENDED_IOERROR", # suspended due to a disk I/O error
3: "SUSPENDED_WATCHDOG", # suspended due to a watchdog firing
4: "SUSPENDED_RESTORED", # restored from paused state file
5: "SUSPENDED_FROM_SNAPSHOT", # restored from paused snapshot
6: "SUSPENDED_API_ERROR", # suspended after failure during libvirt
# API call
7: "SUSPENDED_LAST" }
],
4: ["RESUMED", {
0: "RESUMED_UNPAUSED", # normal resume due to admin unpause
1: "RESUMED_MIGRATED", # resumed for completion during migration
2: "RESUMED_FROM_SNAPSHOT", # resumed from snapshot
3: "RESUMED_LAST" }
],
5: ["STOPPED", {
0: "STOPPED_SHUTDOWN", # normal shutdown
1: "STOPPED_DESTROYED", # forced poweroff from host
2: "STOPPED_CRASHED", # guest crashed
3: "STOPPED_MIGRATED", # migrated off to another host
4: "STOPPED_SAVED", # saved to a state file
5: "STOPPED_FAILED", # host emulator/mgmt failed
6: "STOPPED_FROM_SNAPSHOT", # offline snapshot loaded
7: "STOPPED_LAST" }
],
6: ["SHUTDOWN", {
0: "SHUTDOWN_FINISHED", # guest finished shutdown sequence
1: "SHUTDOWN_LAST" }
],
7: ["PMSUSPENDED", {
0: "PMSUSPENDED_MEMORY", # guest was PM suspended to memory
1: "PMSUSPENDED_DISK", # guest was PM suspended tp disk
2: "PMSUSPENDED_LAST" }
],
8: ["LAST", None ]
}
def __init__(self, event_type, event_detail):
if isinstance(event_type, int) and isinstance(event_detail, int):
self.type = DomainEvent \
._domain_event_infos[event_type][0]
self.detail = DomainEvent \
._domain_event_infos[event_type][1][event_detail]
elif isinstance(event_type, str) and isinstance(event_detail, str):
self.type = event_type
self.detail = event_detail
else:
return None
def __str__(self):
return "{event_type}>{event_detail}" \
.format(event_type=self.type, event_detail=self.detail)
def __eq__(self, other):
logging.debug("comparing events: {event1} {event2}" \
.format(event1=self, event2=other))
if self.type == other.type \
and self.detail == other.detail:
return True
return False
@classmethod
def get_event_info(cls, event_type, event_detail):
"""
get_event_info: Returns both the type name and the detail of the
DomainEvent
"""
return (cls._domain_event_infos[event_type][0],
cls._domain_event_infos[event_type][1][event_detail])
|
mozilla/make.mozilla.org | refs/heads/master | make_mozilla/core/fields.py | 1 | from django.core.exceptions import SuspiciousOperation
from django.core.files.base import ContentFile
from django.db import models
from django.db.models.fields import files
from PIL import Image
from os import path
import cStringIO
import make_mozilla
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ['^make_mozilla\.core\.fields\.SizedImageField'])
ROOT = path.dirname(path.dirname(make_mozilla.__file__))
class SizedImageFieldFile(files.ImageFieldFile):
def __init__(self, *args, **kwargs):
super(SizedImageFieldFile, self).__init__(*args, **kwargs)
self.sizes = self.field.sizes
if self.sizes and self.name:
path, extension = self.url.rsplit('.', 1)
for name, size in self.sizes.items():
try:
width, height = self.get_width_height(size)
try:
original_width, original_height = (self.width, self.height)
except SuspiciousOperation:
try:
original_width, original_height = Image.open('%s%s' % (ROOT, self.name)).size
except IOError:
original_width = original_height = False
if width == original_width and height == original_height:
setattr(self, '%s_url' % name,
'%s.%s' % (path, extension))
else:
setattr(self, '%s_url' % name,
'%s.%s.%s' % (path, name, extension))
setattr(self, '%s_width' % name, int(width))
setattr(self, '%s_height' % name, int(height))
except IOError:
pass
def generate_thumb(self, img, width, height, format):
img.seek(0)
image = Image.open(img)
image_width, image_height = image.size
meta = image.info
if height is None:
height = width/float(image_width) * image_height
maxfactor = max(width/float(image_width), height/float(image_height))
w = round(image_width * maxfactor)
h = round(image_height * maxfactor)
image = image.resize((w, h), Image.ANTIALIAS)
x = round((w-width)/2.0)
y = round((h-height)/2.0)
image = image.crop((x, y, w-x, h-y))
image.load()
image = image.resize((width, height), Image.ANTIALIAS)
image.thumbnail((width, height), Image.ANTIALIAS)
io = cStringIO.StringIO()
format = 'jpeg' if format.lower() == 'jpg' else format.lower()
image.save(io, format, **meta)
return ContentFile(io.getvalue())
def get_width_height(self, size):
try:
width = int(size)
try:
height = width / float(self.width) * self.height
except IOError:
height = None
except SuspiciousOperation:
ow, oh = Image.open('%s%s' % (ROOT, self.name)).size
height = width / float(ow) * oh
except TypeError:
width = size[0]
height = size[1]
return (width, height)
def save(self, name, content, save=True):
super(SizedImageFieldFile, self).save(name, content, save)
if self.sizes:
path, extension = self.name.rsplit('.', 1)
for name, size in self.sizes.items():
width, height = self.get_width_height(size)
if width != self.width or height != self.height:
target = '%s.%s.%s' % (path, name, extension)
thumb = self.generate_thumb(content, width, height, extension)
self.storage.save(target, thumb)
def delete(self, save=True):
super(SizedImageFieldFile, self).delete(save)
if self.sizes:
path, extension = self.name.rsplit('.', 1)
for name in self.sizes:
try:
self.storage.delete('%s.%s.%s' % (path, name, extension))
except:
pass
class SizedImageField(models.ImageField):
attr_class = SizedImageFieldFile
def __init__(self, *args, **options):
self.sizes = options.pop('sizes', None)
super(SizedImageField, self).__init__(*args, **options)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.