repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
Anislav/Stream-Framework | refs/heads/master | feedly/tests/managers/redis.py | 5 | from feedly.feed_managers.base import Feedly
from feedly.feeds.base import UserBaseFeed
from feedly.feeds.redis import RedisFeed
from feedly.tests.managers.base import BaseFeedlyTest
import pytest
class RedisUserBaseFeed(UserBaseFeed, RedisFeed):
pass
class RedisFeedly(Feedly):
feed_classes = {
'feed': RedisFeed
}
user_feed_class = RedisUserBaseFeed
@pytest.mark.usefixtures("redis_reset")
class RedisFeedlyTest(BaseFeedlyTest):
manager_class = RedisFeedly
|
emollient/WITRapp | refs/heads/master | witrapp/witrapp/views.py | 1 | from pyramid.view import view_config
from sqlalchemy.exc import DBAPIError
import urllib2
@view_config(route_name='home', renderer='templates/index.mako')
def home(request):
response = urllib2.urlopen('http://witr.rit.edu/static/live.m3u')
response = response.read()
print response
return {'response': response}
@view_config(route_name='all_songs_play', renderer='witrapp:templates/allsongs.mako')
def all_songs_play(request):
pass
|
blacklin/kbengine | refs/heads/master | kbe/res/scripts/common/Lib/lib2to3/fixes/fix_import.py | 136 | """Fixer for import statements.
If spam is being imported from the local directory, this import:
from spam import eggs
Becomes:
from .spam import eggs
And this import:
import spam
Becomes:
from . import spam
"""
# Local imports
from .. import fixer_base
from os.path import dirname, join, exists, sep
from ..fixer_util import FromImport, syms, token
def traverse_imports(names):
"""
Walks over all the names imported in a dotted_as_names node.
"""
pending = [names]
while pending:
node = pending.pop()
if node.type == token.NAME:
yield node.value
elif node.type == syms.dotted_name:
yield "".join([ch.value for ch in node.children])
elif node.type == syms.dotted_as_name:
pending.append(node.children[0])
elif node.type == syms.dotted_as_names:
pending.extend(node.children[::-2])
else:
raise AssertionError("unknown node type")
class FixImport(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
import_from< 'from' imp=any 'import' ['('] any [')'] >
|
import_name< 'import' imp=any >
"""
def start_tree(self, tree, name):
super(FixImport, self).start_tree(tree, name)
self.skip = "absolute_import" in tree.future_features
def transform(self, node, results):
if self.skip:
return
imp = results['imp']
if node.type == syms.import_from:
# Some imps are top-level (eg: 'import ham')
# some are first level (eg: 'import ham.eggs')
# some are third level (eg: 'import ham.eggs as spam')
# Hence, the loop
while not hasattr(imp, 'value'):
imp = imp.children[0]
if self.probably_a_local_import(imp.value):
imp.value = "." + imp.value
imp.changed()
else:
have_local = False
have_absolute = False
for mod_name in traverse_imports(imp):
if self.probably_a_local_import(mod_name):
have_local = True
else:
have_absolute = True
if have_absolute:
if have_local:
# We won't handle both sibling and absolute imports in the
# same statement at the moment.
self.warning(node, "absolute and local imports together")
return
new = FromImport(".", [imp])
new.prefix = node.prefix
return new
def probably_a_local_import(self, imp_name):
if imp_name.startswith("."):
# Relative imports are certainly not local imports.
return False
imp_name = imp_name.split(".", 1)[0]
base_path = dirname(self.filename)
base_path = join(base_path, imp_name)
# If there is no __init__.py next to the file its not in a package
# so can't be a relative import.
if not exists(join(dirname(base_path), "__init__.py")):
return False
for ext in [".py", sep, ".pyc", ".so", ".sl", ".pyd"]:
if exists(base_path + ext):
return True
return False
|
chrisschuette/Nyx | refs/heads/master | third_party/gtest/test/gtest_break_on_failure_unittest.py | 2140 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's break-on-failure mode.
A user can ask Google Test to seg-fault when an assertion fails, using
either the GTEST_BREAK_ON_FAILURE environment variable or the
--gtest_break_on_failure flag. This script tests such functionality
by invoking gtest_break_on_failure_unittest_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gtest_test_utils
import os
import sys
# Constants.
IS_WINDOWS = os.name == 'nt'
# The environment variable for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_ENV_VAR = 'GTEST_BREAK_ON_FAILURE'
# The command line flag for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_FLAG = 'gtest_break_on_failure'
# The environment variable for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE_ENV_VAR = 'GTEST_THROW_ON_FAILURE'
# The environment variable for enabling/disabling the catch-exceptions mode.
CATCH_EXCEPTIONS_ENV_VAR = 'GTEST_CATCH_EXCEPTIONS'
# Path to the gtest_break_on_failure_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_break_on_failure_unittest_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
def Run(command):
"""Runs a command; returns 1 if it was killed by a signal, or 0 otherwise."""
p = gtest_test_utils.Subprocess(command, env=environ)
if p.terminated_by_signal:
return 1
else:
return 0
# The tests.
class GTestBreakOnFailureUnitTest(gtest_test_utils.TestCase):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable or
the --gtest_break_on_failure flag to turn assertion failures into
segmentation faults.
"""
def RunAndVerify(self, env_var_value, flag_value, expect_seg_fault):
"""Runs gtest_break_on_failure_unittest_ and verifies that it does
(or does not) have a seg-fault.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
expect_seg_fault: 1 if the program is expected to generate a seg-fault;
0 otherwise.
"""
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % BREAK_ON_FAILURE_FLAG
else:
flag = '--%s' % BREAK_ON_FAILURE_FLAG
command = [EXE_PATH]
if flag:
command.append(flag)
if expect_seg_fault:
should_or_not = 'should'
else:
should_or_not = 'should not'
has_seg_fault = Run(command)
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a seg-fault.' %
(BREAK_ON_FAILURE_ENV_VAR, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(has_seg_fault == expect_seg_fault, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None,
flag_value=None,
expect_seg_fault=0)
def testEnvVar(self):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value=None,
expect_seg_fault=1)
def testFlag(self):
"""Tests using the --gtest_break_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
def testFlagOverridesEnvVar(self):
"""Tests that the flag overrides the environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='0',
flag_value='1',
expect_seg_fault=1)
self.RunAndVerify(env_var_value='1',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
def testBreakOnFailureOverridesThrowOnFailure(self):
"""Tests that gtest_break_on_failure overrides gtest_throw_on_failure."""
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, None)
if IS_WINDOWS:
def testCatchExceptionsDoesNotInterfere(self):
"""Tests that gtest_catch_exceptions doesn't interfere."""
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, None)
if __name__ == '__main__':
gtest_test_utils.Main()
|
YijunZh/openplatform-python | refs/heads/master | guardianapi/__init__.py | 9 | from client import Client
|
joshloyal/scikit-learn | refs/heads/master | doc/sphinxext/numpy_ext/docscrape_sphinx.py | 408 | import re
import inspect
import textwrap
import pydoc
from .docscrape import NumpyDocString
from .docscrape import FunctionDoc
from .docscrape import ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config=None):
config = {} if config is None else config
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
# GAEL: Toctree commented out below because it creates
# hundreds of sphinx warnings
# out += ['.. autosummary::', ' :toctree:', '']
out += ['.. autosummary::', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
import sphinx # local import to avoid test dependency
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises', 'Attributes'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Methods',):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config=None):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
|
saydulk/django | refs/heads/master | tests/admin_views/forms.py | 339 | from django import forms
from django.contrib.admin.forms import AdminAuthenticationForm
class CustomAdminAuthenticationForm(AdminAuthenticationForm):
class Media:
css = {'all': ('path/to/media.css',)}
def clean_username(self):
username = self.cleaned_data.get('username')
if username == 'customform':
raise forms.ValidationError('custom form error')
return username
|
himleyb85/django | refs/heads/master | tests/template_tests/filter_tests/test_timeuntil.py | 161 | from __future__ import unicode_literals
from datetime import datetime, timedelta
from django.template.defaultfilters import timeuntil_filter
from django.test import SimpleTestCase
from django.test.utils import requires_tz_support
from ..utils import setup
from .timezone_utils import TimezoneTestCase
class TimeuntilTests(TimezoneTestCase):
# Default compare with datetime.now()
@setup({'timeuntil01': '{{ a|timeuntil }}'})
def test_timeuntil01(self):
output = self.engine.render_to_string('timeuntil01', {'a': datetime.now() + timedelta(minutes=2, seconds=10)})
self.assertEqual(output, '2\xa0minutes')
@setup({'timeuntil02': '{{ a|timeuntil }}'})
def test_timeuntil02(self):
output = self.engine.render_to_string('timeuntil02', {'a': (datetime.now() + timedelta(days=1, seconds=10))})
self.assertEqual(output, '1\xa0day')
@setup({'timeuntil03': '{{ a|timeuntil }}'})
def test_timeuntil03(self):
output = self.engine.render_to_string(
'timeuntil03', {'a': (datetime.now() + timedelta(hours=8, minutes=10, seconds=10))}
)
self.assertEqual(output, '8\xa0hours, 10\xa0minutes')
# Compare to a given parameter
@setup({'timeuntil04': '{{ a|timeuntil:b }}'})
def test_timeuntil04(self):
output = self.engine.render_to_string(
'timeuntil04',
{'a': self.now - timedelta(days=1), 'b': self.now - timedelta(days=2)},
)
self.assertEqual(output, '1\xa0day')
@setup({'timeuntil05': '{{ a|timeuntil:b }}'})
def test_timeuntil05(self):
output = self.engine.render_to_string(
'timeuntil05',
{'a': self.now - timedelta(days=2), 'b': self.now - timedelta(days=2, minutes=1)},
)
self.assertEqual(output, '1\xa0minute')
# Regression for #7443
@setup({'timeuntil06': '{{ earlier|timeuntil }}'})
def test_timeuntil06(self):
output = self.engine.render_to_string('timeuntil06', {'earlier': self.now - timedelta(days=7)})
self.assertEqual(output, '0\xa0minutes')
@setup({'timeuntil07': '{{ earlier|timeuntil:now }}'})
def test_timeuntil07(self):
output = self.engine.render_to_string(
'timeuntil07', {'now': self.now, 'earlier': self.now - timedelta(days=7)}
)
self.assertEqual(output, '0\xa0minutes')
@setup({'timeuntil08': '{{ later|timeuntil }}'})
def test_timeuntil08(self):
output = self.engine.render_to_string('timeuntil08', {'later': self.now + timedelta(days=7, hours=1)})
self.assertEqual(output, '1\xa0week')
@setup({'timeuntil09': '{{ later|timeuntil:now }}'})
def test_timeuntil09(self):
output = self.engine.render_to_string('timeuntil09', {'now': self.now, 'later': self.now + timedelta(days=7)})
self.assertEqual(output, '1\xa0week')
# Ensures that differing timezones are calculated correctly.
@requires_tz_support
@setup({'timeuntil10': '{{ a|timeuntil }}'})
def test_timeuntil10(self):
output = self.engine.render_to_string('timeuntil10', {'a': self.now_tz})
self.assertEqual(output, '0\xa0minutes')
@requires_tz_support
@setup({'timeuntil11': '{{ a|timeuntil }}'})
def test_timeuntil11(self):
output = self.engine.render_to_string('timeuntil11', {'a': self.now_tz_i})
self.assertEqual(output, '0\xa0minutes')
@setup({'timeuntil12': '{{ a|timeuntil:b }}'})
def test_timeuntil12(self):
output = self.engine.render_to_string('timeuntil12', {'a': self.now_tz_i, 'b': self.now_tz})
self.assertEqual(output, '0\xa0minutes')
# Regression for #9065 (two date objects).
@setup({'timeuntil13': '{{ a|timeuntil:b }}'})
def test_timeuntil13(self):
output = self.engine.render_to_string('timeuntil13', {'a': self.today, 'b': self.today})
self.assertEqual(output, '0\xa0minutes')
@setup({'timeuntil14': '{{ a|timeuntil:b }}'})
def test_timeuntil14(self):
output = self.engine.render_to_string('timeuntil14', {'a': self.today, 'b': self.today - timedelta(hours=24)})
self.assertEqual(output, '1\xa0day')
class FunctionTests(SimpleTestCase):
def test_until_now(self):
self.assertEqual(timeuntil_filter(datetime.now() + timedelta(1, 1)), '1\xa0day')
def test_explicit_date(self):
self.assertEqual(timeuntil_filter(datetime(2005, 12, 30), datetime(2005, 12, 29)), '1\xa0day')
|
jzoldak/edx-platform | refs/heads/master | common/lib/capa/capa/tests/test_customrender.py | 37 | from lxml import etree
import unittest
import xml.sax.saxutils as saxutils
from capa.tests.helpers import test_capa_system
from capa import customrender
# just a handy shortcut
lookup_tag = customrender.registry.get_class_for_tag
def extract_context(xml):
"""
Given an xml element corresponding to the output of test_capa_system.render_template, get back the
original context
"""
return eval(xml.text)
def quote_attr(s):
return saxutils.quoteattr(s)[1:-1] # don't want the outer quotes
class HelperTest(unittest.TestCase):
'''
Make sure that our helper function works!
'''
def check(self, d):
xml = etree.XML(test_capa_system().render_template('blah', d))
self.assertEqual(d, extract_context(xml))
def test_extract_context(self):
self.check({})
self.check({1, 2})
self.check({'id', 'an id'})
self.check({'with"quote', 'also"quote'})
class SolutionRenderTest(unittest.TestCase):
'''
Make sure solutions render properly.
'''
def test_rendering(self):
solution = 'To compute unicorns, count them.'
xml_str = """<solution id="solution_12">{s}</solution>""".format(s=solution)
element = etree.fromstring(xml_str)
renderer = lookup_tag('solution')(test_capa_system(), element)
self.assertEqual(renderer.id, 'solution_12')
# Our test_capa_system "renders" templates to a div with the repr of the context.
xml = renderer.get_html()
context = extract_context(xml)
self.assertEqual(context, {'id': 'solution_12'})
class MathRenderTest(unittest.TestCase):
'''
Make sure math renders properly.
'''
def check_parse(self, latex_in, mathjax_out):
xml_str = """<math>{tex}</math>""".format(tex=latex_in)
element = etree.fromstring(xml_str)
renderer = lookup_tag('math')(test_capa_system(), element)
self.assertEqual(renderer.mathstr, mathjax_out)
def test_parsing(self):
self.check_parse('$abc$', '[mathjaxinline]abc[/mathjaxinline]')
self.check_parse('$abc', '$abc')
self.check_parse(r'$\displaystyle 2+2$', '[mathjax] 2+2[/mathjax]')
# NOTE: not testing get_html yet because I don't understand why it's doing what it's doing.
|
IllusionRom-deprecated/android_platform_tools_idea | refs/heads/master | python/testData/refactoring/rename/renameInheritors.py | 83 | class A:
def f<caret>oo(self): pass
class B(A):
def foo(self): pass |
chris-chris/tensorflow | refs/heads/master | tensorflow/compiler/tests/momentum_test.py | 72 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Momentum."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import momentum as momentum_lib
class MomentumOptimizerTest(XLATestCase):
def _update_nesterov_momentum_numpy(self, var, accum, g, lr, momentum):
var += accum * lr * momentum
accum = accum * momentum + g
var -= lr * accum
var -= accum * lr * momentum
return var, accum
def testBasic(self):
for dtype in self.float_types:
with self.test_session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
mom_opt = momentum_lib.MomentumOptimizer(
learning_rate=2.0, momentum=0.9)
mom_update = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
self.assertFalse(slot0 in variables.trainable_variables())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
self.assertFalse(slot1 in variables.trainable_variables())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(np.array([0.1, 0.1]), slot0.eval())
self.assertAllCloseAccordingToType(np.array([0.01, 0.01]), slot1.eval())
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]), var1.eval())
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]), slot0.eval())
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]), slot1.eval())
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([
2.98 - ((0.9 * 0.01 + 0.01) * 2.0), 3.98 - (
(0.9 * 0.01 + 0.01) * 2.0)
]), var1.eval())
def testNesterovMomentum(self):
for dtype in self.float_types:
with self.test_session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
var0_np = np.array([1.0, 2.0], dtype=dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype)
accum0_np = np.array([0.0, 0.0], dtype=dtype)
accum1_np = np.array([0.0, 0.0], dtype=dtype)
cost = 5 * var0 * var0 + 3 * var1
global_step = resource_variable_ops.ResourceVariable(
array_ops.zeros([], dtypes.int32), name="global_step")
mom_op = momentum_lib.MomentumOptimizer(
learning_rate=2.0, momentum=0.9, use_nesterov=True)
opt_op = mom_op.minimize(cost, global_step, [var0, var1])
variables.global_variables_initializer().run()
for _ in range(1, 5):
opt_op.run()
var0_np, accum0_np = self._update_nesterov_momentum_numpy(
var0_np, accum0_np, var0_np * 10, 2.0, 0.9)
var1_np, accum1_np = self._update_nesterov_momentum_numpy(var1_np,
accum1_np,
3, 2.0, 0.9)
self.assertAllClose(var0_np, var0.eval())
self.assertAllClose(var1_np, var1.eval())
def testTensorLearningRateAndMomentum(self):
for dtype in self.float_types:
with self.test_session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
mom_opt = momentum_lib.MomentumOptimizer(
learning_rate=constant_op.constant(2.0),
momentum=constant_op.constant(0.9))
mom_update = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
self.assertFalse(slot0 in variables.trainable_variables())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
self.assertFalse(slot1 in variables.trainable_variables())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(np.array([0.1, 0.1]), slot0.eval())
self.assertAllCloseAccordingToType(np.array([0.01, 0.01]), slot1.eval())
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]), var1.eval())
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]), slot0.eval())
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]), slot1.eval())
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([
2.98 - ((0.9 * 0.01 + 0.01) * 2.0), 3.98 - (
(0.9 * 0.01 + 0.01) * 2.0)
]), var1.eval())
if __name__ == "__main__":
test.main()
|
ganga-devs/ganga | refs/heads/develop | ganga/GangaCore/test/GPI/Mergers/TestCustomMerger.py | 1 |
import os
import tempfile
import pytest
from GangaTest.Framework.utils import write_file
from GangaCore.GPIDev.Base.Proxy import addProxy
from GangaCore.GPIDev.Adapters.IPostProcessor import PostProcessException
from GangaCore.testlib.GangaUnitTest import GangaUnitTest
from GangaCore.testlib.monitoring import run_until_completed, run_until_state
from .CopySplitter import CopySplitter
CopySplitter = addProxy(CopySplitter)
class TestCustomMerger(GangaUnitTest):
def setUp(self):
super(TestCustomMerger, self).setUp()
from GangaCore.GPI import Job, Executable, Local, File, LocalFile
self.jobslice = []
self.file_name = 'id_echo.sh'
for i in range(2):
j = Job(application=Executable(), backend=Local(batchsize=10))
scriptString = '''
#!/bin/sh
echo "Output from job $1." > out.txt
echo "Output from job $2." > out2.txt
'''
# write string to tmpfile
tmpdir = tempfile.mktemp()
os.mkdir(tmpdir)
fileName = os.path.join(tmpdir, self.file_name)
write_file(fileName, scriptString)
j.application.exe = 'sh'
j.application.args = [File(fileName), str(j.id), str(j.id * 10)]
j.outputfiles = [LocalFile('out.txt'), LocalFile('out2.txt')]
self.jobslice.append(j)
def runJobSlice(self):
for j in self.jobslice:
j.submit()
assert run_until_completed(j), 'Timeout on job submission: job is still not finished'
def tearDown(self):
for j in self.jobslice:
j.remove()
super(TestCustomMerger, self).tearDown()
def testSimpleCustomMerge(self):
from GangaCore.GPI import CustomMerger
self.runJobSlice()
tmpdir = tempfile.mktemp()
os.mkdir(tmpdir)
file_name = os.path.join(tmpdir, 'merge.py')
with open(file_name, 'w') as module_file:
module_file.write("""from __future__ import print_function
def mergefiles(file_list, output_file):
'''Free script for merging files'''
with open(output_file,'w') as out:
for f in file_list:
print(f, file=out)
return True
""")
cm = CustomMerger(module=file_name)
cm.files = ['out.txt', 'out2.txt']
assert cm.merge(self.jobslice, tmpdir), 'Merge should complete'
assert os.path.exists(os.path.join(tmpdir, 'out.txt')), 'out.txt must exist'
assert os.path.exists(os.path.join(tmpdir, 'out2.txt')), 'out2.txt must exist'
def testFailJobOnMerge(self):
from GangaCore.GPI import CustomMerger
self.runJobSlice()
tmpdir = tempfile.mktemp()
os.mkdir(tmpdir)
file_name = os.path.join(tmpdir, 'merge.py')
with open(file_name, 'w') as module_file:
module_file.write("""def mergefiles(file_list, output_file):
'''Free script for merging files'''
return False
""")
cm = CustomMerger(module=file_name)
cm.files = ['out.txt', 'out2.txt']
with pytest.raises(PostProcessException):
cm.merge(self.jobslice, tmpdir)
j = self.jobslice[0].copy()
j.splitter = CopySplitter()
j.postprocessors = cm
j.submit()
assert run_until_state(j, state='failed')
|
Z2PackDev/bands_inspect | refs/heads/master | bands_inspect/eigenvals.py | 2 | # -*- coding: utf-8 -*-
# (c) 2017-2019, ETH Zurich, Institut fuer Theoretische Physik
# Author: Dominik Gresch <greschd@gmx.ch>
"""
Defines the data container for eigenvalue data (bandstructures).
"""
import types
import numpy as np
from fsc.export import export
from fsc.hdf5_io import HDF5Enabled, subscribe_hdf5
from .kpoints import KpointsExplicit, KpointsBase
from .io import from_hdf5
@export
@subscribe_hdf5(
'bands_inspect.eigenvals_data', extra_tags=('eigenvals_data', )
)
class EigenvalsData(HDF5Enabled, types.SimpleNamespace):
"""
Data container for the eigenvalues at a given set of k-points. The eigenvalues are automatically sorted by value.
:param kpoints: List of k-points where the eigenvalues are given.
:type kpoints: list
:param eigenvals: Eigenvalues at each k-point. The outer axis corresponds to the different k-points, and the inner axis corresponds to the different eigenvalues at a given k-point.
:type eigenvals: 2D array
"""
def __init__(self, *, kpoints, eigenvals):
if not isinstance(kpoints, KpointsBase):
kpoints = KpointsExplicit(kpoints)
eigenvals = np.sort(eigenvals)
if len(kpoints.kpoints_explicit) != len(eigenvals):
raise ValueError(
"Number of kpoints ({}) does not match the number of eigenvalue lists ({})"
.format(len(kpoints.kpoints_explicit), len(eigenvals))
)
self.kpoints = kpoints
self.eigenvals = eigenvals
def slice_bands(self, band_idx):
"""
Returns a new instance which contains only the bands given in the index.
:param band_idx: Indices for the bands in the new instance.
:type band_idx: list
"""
new_eigenvals = self.eigenvals.T[sorted(band_idx)].T
return type(self)(kpoints=self.kpoints, eigenvals=new_eigenvals)
@classmethod
def from_eigenval_function(
cls, *, kpoints, eigenval_function, listable=False
):
"""
Create an instance using a function that calculates the eigenvalues.
:param kpoints: k-points for which the eigenvalues are to be calculated.
:type kpoints: KpointsBase
:param eigenval_function: Function which calculates the eigenvalues.
:param listable: Flag showing whether the function can handle a list of k-points (``True``) or only single k-points (``False``).
:type listable: bool
"""
if listable:
eigenvals = eigenval_function(kpoints.kpoints_explicit)
else:
eigenvals = [
eigenval_function(k) for k in kpoints.kpoints_explicit
]
return cls(kpoints=kpoints, eigenvals=eigenvals)
def to_hdf5(self, hdf5_handle):
hdf5_handle.create_group('kpoints_obj')
self.kpoints.to_hdf5(hdf5_handle['kpoints_obj'])
hdf5_handle['eigenvals'] = self.eigenvals
@classmethod
def from_hdf5(cls, hdf5_handle):
kpoints = from_hdf5(hdf5_handle['kpoints_obj'])
eigenvals = hdf5_handle['eigenvals'][()]
return cls(kpoints=kpoints, eigenvals=eigenvals)
def shift(self, value):
"""
Returns an instance with eigenvalues shifted by the given value.
:param value: The value by which the eigenvalues are shifted.
:type value: float
"""
new_eigenvals = self.eigenvals + value
return type(self)(kpoints=self.kpoints, eigenvals=new_eigenvals)
|
alviano/wasp | refs/heads/master | tests/sat/Models/test2.SAT.dimacs.test.py | 5 | input = """
c soddisfacibile
p cnf 4 4
1 2 3 0
2 3 4 0
1 -2 -3 0
1 0
"""
output = """
sat
"""
|
lemonsoda/pythonlearning | refs/heads/master | pythonlearn/search.py | 1 | import os
def search(word,curdir):
files=[x for x in os.listdir(curdir) if os.path.isfile(x)]
dirs=[x for x in os.listdir(curdir) if os.path.isdir(x)]
for filename in files:
if word in filename:
print os.path.join(curdir,filename)
#print dirs
for dirname in dirs:
search(word,os.path.join(curdir,dirname))
search('e',os.path.curdir)
|
vladon/omim | refs/heads/master | 3party/protobuf/gtest/scripts/pump.py | 2471 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""pump v0.2.0 - Pretty Useful for Meta Programming.
A tool for preprocessor meta programming. Useful for generating
repetitive boilerplate code. Especially useful for writing C++
classes, functions, macros, and templates that need to work with
various number of arguments.
USAGE:
pump.py SOURCE_FILE
EXAMPLES:
pump.py foo.cc.pump
Converts foo.cc.pump to foo.cc.
GRAMMAR:
CODE ::= ATOMIC_CODE*
ATOMIC_CODE ::= $var ID = EXPRESSION
| $var ID = [[ CODE ]]
| $range ID EXPRESSION..EXPRESSION
| $for ID SEPARATOR [[ CODE ]]
| $($)
| $ID
| $(EXPRESSION)
| $if EXPRESSION [[ CODE ]] ELSE_BRANCH
| [[ CODE ]]
| RAW_CODE
SEPARATOR ::= RAW_CODE | EMPTY
ELSE_BRANCH ::= $else [[ CODE ]]
| $elif EXPRESSION [[ CODE ]] ELSE_BRANCH
| EMPTY
EXPRESSION has Python syntax.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
TOKEN_TABLE = [
(re.compile(r'\$var\s+'), '$var'),
(re.compile(r'\$elif\s+'), '$elif'),
(re.compile(r'\$else\s+'), '$else'),
(re.compile(r'\$for\s+'), '$for'),
(re.compile(r'\$if\s+'), '$if'),
(re.compile(r'\$range\s+'), '$range'),
(re.compile(r'\$[_A-Za-z]\w*'), '$id'),
(re.compile(r'\$\(\$\)'), '$($)'),
(re.compile(r'\$'), '$'),
(re.compile(r'\[\[\n?'), '[['),
(re.compile(r'\]\]\n?'), ']]'),
]
class Cursor:
"""Represents a position (line and column) in a text file."""
def __init__(self, line=-1, column=-1):
self.line = line
self.column = column
def __eq__(self, rhs):
return self.line == rhs.line and self.column == rhs.column
def __ne__(self, rhs):
return not self == rhs
def __lt__(self, rhs):
return self.line < rhs.line or (
self.line == rhs.line and self.column < rhs.column)
def __le__(self, rhs):
return self < rhs or self == rhs
def __gt__(self, rhs):
return rhs < self
def __ge__(self, rhs):
return rhs <= self
def __str__(self):
if self == Eof():
return 'EOF'
else:
return '%s(%s)' % (self.line + 1, self.column)
def __add__(self, offset):
return Cursor(self.line, self.column + offset)
def __sub__(self, offset):
return Cursor(self.line, self.column - offset)
def Clone(self):
"""Returns a copy of self."""
return Cursor(self.line, self.column)
# Special cursor to indicate the end-of-file.
def Eof():
"""Returns the special cursor to denote the end-of-file."""
return Cursor(-1, -1)
class Token:
"""Represents a token in a Pump source file."""
def __init__(self, start=None, end=None, value=None, token_type=None):
if start is None:
self.start = Eof()
else:
self.start = start
if end is None:
self.end = Eof()
else:
self.end = end
self.value = value
self.token_type = token_type
def __str__(self):
return 'Token @%s: \'%s\' type=%s' % (
self.start, self.value, self.token_type)
def Clone(self):
"""Returns a copy of self."""
return Token(self.start.Clone(), self.end.Clone(), self.value,
self.token_type)
def StartsWith(lines, pos, string):
"""Returns True iff the given position in lines starts with 'string'."""
return lines[pos.line][pos.column:].startswith(string)
def FindFirstInLine(line, token_table):
best_match_start = -1
for (regex, token_type) in token_table:
m = regex.search(line)
if m:
# We found regex in lines
if best_match_start < 0 or m.start() < best_match_start:
best_match_start = m.start()
best_match_length = m.end() - m.start()
best_match_token_type = token_type
if best_match_start < 0:
return None
return (best_match_start, best_match_length, best_match_token_type)
def FindFirst(lines, token_table, cursor):
"""Finds the first occurrence of any string in strings in lines."""
start = cursor.Clone()
cur_line_number = cursor.line
for line in lines[start.line:]:
if cur_line_number == start.line:
line = line[start.column:]
m = FindFirstInLine(line, token_table)
if m:
# We found a regex in line.
(start_column, length, token_type) = m
if cur_line_number == start.line:
start_column += start.column
found_start = Cursor(cur_line_number, start_column)
found_end = found_start + length
return MakeToken(lines, found_start, found_end, token_type)
cur_line_number += 1
# We failed to find str in lines
return None
def SubString(lines, start, end):
"""Returns a substring in lines."""
if end == Eof():
end = Cursor(len(lines) - 1, len(lines[-1]))
if start >= end:
return ''
if start.line == end.line:
return lines[start.line][start.column:end.column]
result_lines = ([lines[start.line][start.column:]] +
lines[start.line + 1:end.line] +
[lines[end.line][:end.column]])
return ''.join(result_lines)
def StripMetaComments(str):
"""Strip meta comments from each line in the given string."""
# First, completely remove lines containing nothing but a meta
# comment, including the trailing \n.
str = re.sub(r'^\s*\$\$.*\n', '', str)
# Then, remove meta comments from contentful lines.
return re.sub(r'\s*\$\$.*', '', str)
def MakeToken(lines, start, end, token_type):
"""Creates a new instance of Token."""
return Token(start, end, SubString(lines, start, end), token_type)
def ParseToken(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = regex.search(line)
if m and not m.start():
return MakeToken(lines, pos, pos + m.end(), token_type)
else:
print 'ERROR: %s expected at %s.' % (token_type, pos)
sys.exit(1)
ID_REGEX = re.compile(r'[_A-Za-z]\w*')
EQ_REGEX = re.compile(r'=')
REST_OF_LINE_REGEX = re.compile(r'.*?(?=$|\$\$)')
OPTIONAL_WHITE_SPACES_REGEX = re.compile(r'\s*')
WHITE_SPACE_REGEX = re.compile(r'\s')
DOT_DOT_REGEX = re.compile(r'\.\.')
def Skip(lines, pos, regex):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m and not m.start():
return pos + m.end()
else:
return pos
def SkipUntil(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m:
return pos + m.start()
else:
print ('ERROR: %s expected on line %s after column %s.' %
(token_type, pos.line + 1, pos.column))
sys.exit(1)
def ParseExpTokenInParens(lines, pos):
def ParseInParens(pos):
pos = Skip(lines, pos, OPTIONAL_WHITE_SPACES_REGEX)
pos = Skip(lines, pos, r'\(')
pos = Parse(pos)
pos = Skip(lines, pos, r'\)')
return pos
def Parse(pos):
pos = SkipUntil(lines, pos, r'\(|\)', ')')
if SubString(lines, pos, pos + 1) == '(':
pos = Parse(pos + 1)
pos = Skip(lines, pos, r'\)')
return Parse(pos)
else:
return pos
start = pos.Clone()
pos = ParseInParens(pos)
return MakeToken(lines, start, pos, 'exp')
def RStripNewLineFromToken(token):
if token.value.endswith('\n'):
return Token(token.start, token.end, token.value[:-1], token.token_type)
else:
return token
def TokenizeLines(lines, pos):
while True:
found = FindFirst(lines, TOKEN_TABLE, pos)
if not found:
yield MakeToken(lines, pos, Eof(), 'code')
return
if found.start == pos:
prev_token = None
prev_token_rstripped = None
else:
prev_token = MakeToken(lines, pos, found.start, 'code')
prev_token_rstripped = RStripNewLineFromToken(prev_token)
if found.token_type == '$var':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
eq_token = ParseToken(lines, pos, EQ_REGEX, '=')
yield eq_token
pos = Skip(lines, eq_token.end, r'\s*')
if SubString(lines, pos, pos + 2) != '[[':
exp_token = ParseToken(lines, pos, REST_OF_LINE_REGEX, 'exp')
yield exp_token
pos = Cursor(exp_token.end.line + 1, 0)
elif found.token_type == '$for':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, WHITE_SPACE_REGEX)
elif found.token_type == '$range':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
dots_pos = SkipUntil(lines, pos, DOT_DOT_REGEX, '..')
yield MakeToken(lines, pos, dots_pos, 'exp')
yield MakeToken(lines, dots_pos, dots_pos + 2, '..')
pos = dots_pos + 2
new_pos = Cursor(pos.line + 1, 0)
yield MakeToken(lines, pos, new_pos, 'exp')
pos = new_pos
elif found.token_type == '$':
if prev_token:
yield prev_token
yield found
exp_token = ParseExpTokenInParens(lines, found.end)
yield exp_token
pos = exp_token.end
elif (found.token_type == ']]' or found.token_type == '$if' or
found.token_type == '$elif' or found.token_type == '$else'):
if prev_token_rstripped:
yield prev_token_rstripped
yield found
pos = found.end
else:
if prev_token:
yield prev_token
yield found
pos = found.end
def Tokenize(s):
"""A generator that yields the tokens in the given string."""
if s != '':
lines = s.splitlines(True)
for token in TokenizeLines(lines, Cursor(0, 0)):
yield token
class CodeNode:
def __init__(self, atomic_code_list=None):
self.atomic_code = atomic_code_list
class VarNode:
def __init__(self, identifier=None, atomic_code=None):
self.identifier = identifier
self.atomic_code = atomic_code
class RangeNode:
def __init__(self, identifier=None, exp1=None, exp2=None):
self.identifier = identifier
self.exp1 = exp1
self.exp2 = exp2
class ForNode:
def __init__(self, identifier=None, sep=None, code=None):
self.identifier = identifier
self.sep = sep
self.code = code
class ElseNode:
def __init__(self, else_branch=None):
self.else_branch = else_branch
class IfNode:
def __init__(self, exp=None, then_branch=None, else_branch=None):
self.exp = exp
self.then_branch = then_branch
self.else_branch = else_branch
class RawCodeNode:
def __init__(self, token=None):
self.raw_code = token
class LiteralDollarNode:
def __init__(self, token):
self.token = token
class ExpNode:
def __init__(self, token, python_exp):
self.token = token
self.python_exp = python_exp
def PopFront(a_list):
head = a_list[0]
a_list[:1] = []
return head
def PushFront(a_list, elem):
a_list[:0] = [elem]
def PopToken(a_list, token_type=None):
token = PopFront(a_list)
if token_type is not None and token.token_type != token_type:
print 'ERROR: %s expected at %s' % (token_type, token.start)
print 'ERROR: %s found instead' % (token,)
sys.exit(1)
return token
def PeekToken(a_list):
if not a_list:
return None
return a_list[0]
def ParseExpNode(token):
python_exp = re.sub(r'([_A-Za-z]\w*)', r'self.GetValue("\1")', token.value)
return ExpNode(token, python_exp)
def ParseElseNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
next = PeekToken(tokens)
if not next:
return None
if next.token_type == '$else':
Pop('$else')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
elif next.token_type == '$elif':
Pop('$elif')
exp = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
inner_else_node = ParseElseNode(tokens)
return CodeNode([IfNode(ParseExpNode(exp), code_node, inner_else_node)])
elif not next.value.strip():
Pop('code')
return ParseElseNode(tokens)
else:
return None
def ParseAtomicCodeNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
head = PopFront(tokens)
t = head.token_type
if t == 'code':
return RawCodeNode(head)
elif t == '$var':
id_token = Pop('id')
Pop('=')
next = PeekToken(tokens)
if next.token_type == 'exp':
exp_token = Pop()
return VarNode(id_token, ParseExpNode(exp_token))
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return VarNode(id_token, code_node)
elif t == '$for':
id_token = Pop('id')
next_token = PeekToken(tokens)
if next_token.token_type == 'code':
sep_token = next_token
Pop('code')
else:
sep_token = None
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return ForNode(id_token, sep_token, code_node)
elif t == '$if':
exp_token = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
else_node = ParseElseNode(tokens)
return IfNode(ParseExpNode(exp_token), code_node, else_node)
elif t == '$range':
id_token = Pop('id')
exp1_token = Pop('exp')
Pop('..')
exp2_token = Pop('exp')
return RangeNode(id_token, ParseExpNode(exp1_token),
ParseExpNode(exp2_token))
elif t == '$id':
return ParseExpNode(Token(head.start + 1, head.end, head.value[1:], 'id'))
elif t == '$($)':
return LiteralDollarNode(head)
elif t == '$':
exp_token = Pop('exp')
return ParseExpNode(exp_token)
elif t == '[[':
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
else:
PushFront(tokens, head)
return None
def ParseCodeNode(tokens):
atomic_code_list = []
while True:
if not tokens:
break
atomic_code_node = ParseAtomicCodeNode(tokens)
if atomic_code_node:
atomic_code_list.append(atomic_code_node)
else:
break
return CodeNode(atomic_code_list)
def ParseToAST(pump_src_text):
"""Convert the given Pump source text into an AST."""
tokens = list(Tokenize(pump_src_text))
code_node = ParseCodeNode(tokens)
return code_node
class Env:
def __init__(self):
self.variables = []
self.ranges = []
def Clone(self):
clone = Env()
clone.variables = self.variables[:]
clone.ranges = self.ranges[:]
return clone
def PushVariable(self, var, value):
# If value looks like an int, store it as an int.
try:
int_value = int(value)
if ('%s' % int_value) == value:
value = int_value
except Exception:
pass
self.variables[:0] = [(var, value)]
def PopVariable(self):
self.variables[:1] = []
def PushRange(self, var, lower, upper):
self.ranges[:0] = [(var, lower, upper)]
def PopRange(self):
self.ranges[:1] = []
def GetValue(self, identifier):
for (var, value) in self.variables:
if identifier == var:
return value
print 'ERROR: meta variable %s is undefined.' % (identifier,)
sys.exit(1)
def EvalExp(self, exp):
try:
result = eval(exp.python_exp)
except Exception, e:
print 'ERROR: caught exception %s: %s' % (e.__class__.__name__, e)
print ('ERROR: failed to evaluate meta expression %s at %s' %
(exp.python_exp, exp.token.start))
sys.exit(1)
return result
def GetRange(self, identifier):
for (var, lower, upper) in self.ranges:
if identifier == var:
return (lower, upper)
print 'ERROR: range %s is undefined.' % (identifier,)
sys.exit(1)
class Output:
def __init__(self):
self.string = ''
def GetLastLine(self):
index = self.string.rfind('\n')
if index < 0:
return ''
return self.string[index + 1:]
def Append(self, s):
self.string += s
def RunAtomicCode(env, node, output):
if isinstance(node, VarNode):
identifier = node.identifier.value.strip()
result = Output()
RunAtomicCode(env.Clone(), node.atomic_code, result)
value = result.string
env.PushVariable(identifier, value)
elif isinstance(node, RangeNode):
identifier = node.identifier.value.strip()
lower = int(env.EvalExp(node.exp1))
upper = int(env.EvalExp(node.exp2))
env.PushRange(identifier, lower, upper)
elif isinstance(node, ForNode):
identifier = node.identifier.value.strip()
if node.sep is None:
sep = ''
else:
sep = node.sep.value
(lower, upper) = env.GetRange(identifier)
for i in range(lower, upper + 1):
new_env = env.Clone()
new_env.PushVariable(identifier, i)
RunCode(new_env, node.code, output)
if i != upper:
output.Append(sep)
elif isinstance(node, RawCodeNode):
output.Append(node.raw_code.value)
elif isinstance(node, IfNode):
cond = env.EvalExp(node.exp)
if cond:
RunCode(env.Clone(), node.then_branch, output)
elif node.else_branch is not None:
RunCode(env.Clone(), node.else_branch, output)
elif isinstance(node, ExpNode):
value = env.EvalExp(node)
output.Append('%s' % (value,))
elif isinstance(node, LiteralDollarNode):
output.Append('$')
elif isinstance(node, CodeNode):
RunCode(env.Clone(), node, output)
else:
print 'BAD'
print node
sys.exit(1)
def RunCode(env, code_node, output):
for atomic_code in code_node.atomic_code:
RunAtomicCode(env, atomic_code, output)
def IsSingleLineComment(cur_line):
return '//' in cur_line
def IsInPreprocessorDirective(prev_lines, cur_line):
if cur_line.lstrip().startswith('#'):
return True
return prev_lines and prev_lines[-1].endswith('\\')
def WrapComment(line, output):
loc = line.find('//')
before_comment = line[:loc].rstrip()
if before_comment == '':
indent = loc
else:
output.append(before_comment)
indent = len(before_comment) - len(before_comment.lstrip())
prefix = indent*' ' + '// '
max_len = 80 - len(prefix)
comment = line[loc + 2:].strip()
segs = [seg for seg in re.split(r'(\w+\W*)', comment) if seg != '']
cur_line = ''
for seg in segs:
if len((cur_line + seg).rstrip()) < max_len:
cur_line += seg
else:
if cur_line.strip() != '':
output.append(prefix + cur_line.rstrip())
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapCode(line, line_concat, output):
indent = len(line) - len(line.lstrip())
prefix = indent*' ' # Prefix of the current line
max_len = 80 - indent - len(line_concat) # Maximum length of the current line
new_prefix = prefix + 4*' ' # Prefix of a continuation line
new_max_len = max_len - 4 # Maximum length of a continuation line
# Prefers to wrap a line after a ',' or ';'.
segs = [seg for seg in re.split(r'([^,;]+[,;]?)', line.strip()) if seg != '']
cur_line = '' # The current line without leading spaces.
for seg in segs:
# If the line is still too long, wrap at a space.
while cur_line == '' and len(seg.strip()) > max_len:
seg = seg.lstrip()
split_at = seg.rfind(' ', 0, max_len)
output.append(prefix + seg[:split_at].strip() + line_concat)
seg = seg[split_at + 1:]
prefix = new_prefix
max_len = new_max_len
if len((cur_line + seg).rstrip()) < max_len:
cur_line = (cur_line + seg).lstrip()
else:
output.append(prefix + cur_line.rstrip() + line_concat)
prefix = new_prefix
max_len = new_max_len
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapPreprocessorDirective(line, output):
WrapCode(line, ' \\', output)
def WrapPlainCode(line, output):
WrapCode(line, '', output)
def IsMultiLineIWYUPragma(line):
return re.search(r'/\* IWYU pragma: ', line)
def IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
return (re.match(r'^#(ifndef|define|endif\s*//)\s*[\w_]+\s*$', line) or
re.match(r'^#include\s', line) or
# Don't break IWYU pragmas, either; that causes iwyu.py problems.
re.search(r'// IWYU pragma: ', line))
def WrapLongLine(line, output):
line = line.rstrip()
if len(line) <= 80:
output.append(line)
elif IsSingleLineComment(line):
if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
# The style guide made an exception to allow long header guard lines,
# includes and IWYU pragmas.
output.append(line)
else:
WrapComment(line, output)
elif IsInPreprocessorDirective(output, line):
if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
# The style guide made an exception to allow long header guard lines,
# includes and IWYU pragmas.
output.append(line)
else:
WrapPreprocessorDirective(line, output)
elif IsMultiLineIWYUPragma(line):
output.append(line)
else:
WrapPlainCode(line, output)
def BeautifyCode(string):
lines = string.splitlines()
output = []
for line in lines:
WrapLongLine(line, output)
output2 = [line.rstrip() for line in output]
return '\n'.join(output2) + '\n'
def ConvertFromPumpSource(src_text):
"""Return the text generated from the given Pump source text."""
ast = ParseToAST(StripMetaComments(src_text))
output = Output()
RunCode(Env(), ast, output)
return BeautifyCode(output.string)
def main(argv):
if len(argv) == 1:
print __doc__
sys.exit(1)
file_path = argv[-1]
output_str = ConvertFromPumpSource(file(file_path, 'r').read())
if file_path.endswith('.pump'):
output_file_path = file_path[:-5]
else:
output_file_path = '-'
if output_file_path == '-':
print output_str,
else:
output_file = file(output_file_path, 'w')
output_file.write('// This file was GENERATED by command:\n')
output_file.write('// %s %s\n' %
(os.path.basename(__file__), os.path.basename(file_path)))
output_file.write('// DO NOT EDIT BY HAND!!!\n\n')
output_file.write(output_str)
output_file.close()
if __name__ == '__main__':
main(sys.argv)
|
islacoin/islacoin | refs/heads/master | share/qt/extract_strings_qt.py | 1294 | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
OUT_CPP="src/qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {')
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
|
hvekriya/mojwmt | refs/heads/master | node_modules/grunt-sass/node_modules/node-sass/node_modules/node-gyp/gyp/tools/pretty_sln.py | 1831 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints the information in a sln file in a diffable way.
It first outputs each projects in alphabetical order with their
dependencies.
Then it outputs a possible build order.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import re
import sys
import pretty_vcproj
def BuildProject(project, built, projects, deps):
# if all dependencies are done, we can build it, otherwise we try to build the
# dependency.
# This is not infinite-recursion proof.
for dep in deps[project]:
if dep not in built:
BuildProject(dep, built, projects, deps)
print project
built.append(project)
def ParseSolution(solution_file):
# All projects, their clsid and paths.
projects = dict()
# A list of dependencies associated with a project.
dependencies = dict()
# Regular expressions that matches the SLN format.
# The first line of a project definition.
begin_project = re.compile(r'^Project\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942'
r'}"\) = "(.*)", "(.*)", "(.*)"$')
# The last line of a project definition.
end_project = re.compile('^EndProject$')
# The first line of a dependency list.
begin_dep = re.compile(
r'ProjectSection\(ProjectDependencies\) = postProject$')
# The last line of a dependency list.
end_dep = re.compile('EndProjectSection$')
# A line describing a dependency.
dep_line = re.compile(' *({.*}) = ({.*})$')
in_deps = False
solution = open(solution_file)
for line in solution:
results = begin_project.search(line)
if results:
# Hack to remove icu because the diff is too different.
if results.group(1).find('icu') != -1:
continue
# We remove "_gyp" from the names because it helps to diff them.
current_project = results.group(1).replace('_gyp', '')
projects[current_project] = [results.group(2).replace('_gyp', ''),
results.group(3),
results.group(2)]
dependencies[current_project] = []
continue
results = end_project.search(line)
if results:
current_project = None
continue
results = begin_dep.search(line)
if results:
in_deps = True
continue
results = end_dep.search(line)
if results:
in_deps = False
continue
results = dep_line.search(line)
if results and in_deps and current_project:
dependencies[current_project].append(results.group(1))
continue
# Change all dependencies clsid to name instead.
for project in dependencies:
# For each dependencies in this project
new_dep_array = []
for dep in dependencies[project]:
# Look for the project name matching this cldis
for project_info in projects:
if projects[project_info][1] == dep:
new_dep_array.append(project_info)
dependencies[project] = sorted(new_dep_array)
return (projects, dependencies)
def PrintDependencies(projects, deps):
print "---------------------------------------"
print "Dependencies for all projects"
print "---------------------------------------"
print "-- --"
for (project, dep_list) in sorted(deps.items()):
print "Project : %s" % project
print "Path : %s" % projects[project][0]
if dep_list:
for dep in dep_list:
print " - %s" % dep
print ""
print "-- --"
def PrintBuildOrder(projects, deps):
print "---------------------------------------"
print "Build order "
print "---------------------------------------"
print "-- --"
built = []
for (project, _) in sorted(deps.items()):
if project not in built:
BuildProject(project, built, projects, deps)
print "-- --"
def PrintVCProj(projects):
for project in projects:
print "-------------------------------------"
print "-------------------------------------"
print project
print project
print project
print "-------------------------------------"
print "-------------------------------------"
project_path = os.path.abspath(os.path.join(os.path.dirname(sys.argv[1]),
projects[project][2]))
pretty = pretty_vcproj
argv = [ '',
project_path,
'$(SolutionDir)=%s\\' % os.path.dirname(sys.argv[1]),
]
argv.extend(sys.argv[3:])
pretty.main(argv)
def main():
# check if we have exactly 1 parameter.
if len(sys.argv) < 2:
print 'Usage: %s "c:\\path\\to\\project.sln"' % sys.argv[0]
return 1
(projects, deps) = ParseSolution(sys.argv[1])
PrintDependencies(projects, deps)
PrintBuildOrder(projects, deps)
if '--recursive' in sys.argv:
PrintVCProj(projects)
return 0
if __name__ == '__main__':
sys.exit(main())
|
scottfrazer/hermes | refs/heads/develop | examples/xml/test.py | 1 | import hermes
with open('xml.hgr') as fp:
xml_parser = hermes.compile(fp, module='xml_parser', debug=False)
tokens = xml_parser.lex('<a x="1" y="2"><b><c>hello</c><d>world</d></b></a>', '<string>')
for token in tokens:
print(token)
#tree = xml_parser.parse('<a x="1" y="2"><b><c>hello</c><d>world</d></b></a>')
#print(tree.dumps(indent=2, color=json_parser.term_color))
#print(tree.toAst().dumps(indent=2, b64_source=True, color=json_parser.term_color))
|
samdowd/drumm-farm | refs/heads/master | drumm_env/lib/python2.7/site-packages/phonenumbers/data/region_JM.py | 1 | """Auto-generated file, do not edit by hand. JM metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_JM = PhoneMetadata(id='JM', country_code=1, international_prefix='011',
general_desc=PhoneNumberDesc(national_number_pattern='[589]\\d{9}', possible_length=(10,), possible_length_local_only=(7,)),
fixed_line=PhoneNumberDesc(national_number_pattern='876(?:5(?:0[12]|1[0-468]|2[35]|63)|6(?:0[1-3579]|1[027-9]|[23]\\d|40|5[06]|6[2-589]|7[05]|8[04]|9[4-9])|7(?:0[2-689]|[1-6]\\d|8[056]|9[45])|9(?:0[1-8]|1[02378]|[2-8]\\d|9[2-468]))\\d{4}', example_number='8765123456', possible_length=(10,), possible_length_local_only=(7,)),
mobile=PhoneNumberDesc(national_number_pattern='876(?:2[14-9]\\d|[348]\\d{2}|5(?:0[3-9]|[2-57-9]\\d|6[0-24-9])|7(?:0[07]|7\\d|8[1-47-9]|9[0-36-9])|9(?:[01]9|9[0579]))\\d{4}', example_number='8762101234', possible_length=(10,), possible_length_local_only=(7,)),
toll_free=PhoneNumberDesc(national_number_pattern='8(?:00|33|44|55|66|77|88)[2-9]\\d{6}', example_number='8002123456', possible_length=(10,)),
premium_rate=PhoneNumberDesc(national_number_pattern='900[2-9]\\d{6}', example_number='9002123456', possible_length=(10,)),
personal_number=PhoneNumberDesc(national_number_pattern='5(?:00|22|33|44|66|77|88)[2-9]\\d{6}', example_number='5002345678', possible_length=(10,)),
national_prefix='1',
national_prefix_for_parsing='1',
leading_digits='876')
|
arakashic/shadowsocks | refs/heads/master | shadowsocks/asyncdns.py | 655 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import socket
import struct
import re
import logging
from shadowsocks import common, lru_cache, eventloop, shell
CACHE_SWEEP_INTERVAL = 30
VALID_HOSTNAME = re.compile(br"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
common.patch_socket()
# rfc1035
# format
# +---------------------+
# | Header |
# +---------------------+
# | Question | the question for the name server
# +---------------------+
# | Answer | RRs answering the question
# +---------------------+
# | Authority | RRs pointing toward an authority
# +---------------------+
# | Additional | RRs holding additional information
# +---------------------+
#
# header
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ID |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# |QR| Opcode |AA|TC|RD|RA| Z | RCODE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | QDCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ANCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | NSCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ARCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
QTYPE_ANY = 255
QTYPE_A = 1
QTYPE_AAAA = 28
QTYPE_CNAME = 5
QTYPE_NS = 2
QCLASS_IN = 1
def build_address(address):
address = address.strip(b'.')
labels = address.split(b'.')
results = []
for label in labels:
l = len(label)
if l > 63:
return None
results.append(common.chr(l))
results.append(label)
results.append(b'\0')
return b''.join(results)
def build_request(address, qtype):
request_id = os.urandom(2)
header = struct.pack('!BBHHHH', 1, 0, 1, 0, 0, 0)
addr = build_address(address)
qtype_qclass = struct.pack('!HH', qtype, QCLASS_IN)
return request_id + header + addr + qtype_qclass
def parse_ip(addrtype, data, length, offset):
if addrtype == QTYPE_A:
return socket.inet_ntop(socket.AF_INET, data[offset:offset + length])
elif addrtype == QTYPE_AAAA:
return socket.inet_ntop(socket.AF_INET6, data[offset:offset + length])
elif addrtype in [QTYPE_CNAME, QTYPE_NS]:
return parse_name(data, offset)[1]
else:
return data[offset:offset + length]
def parse_name(data, offset):
p = offset
labels = []
l = common.ord(data[p])
while l > 0:
if (l & (128 + 64)) == (128 + 64):
# pointer
pointer = struct.unpack('!H', data[p:p + 2])[0]
pointer &= 0x3FFF
r = parse_name(data, pointer)
labels.append(r[1])
p += 2
# pointer is the end
return p - offset, b'.'.join(labels)
else:
labels.append(data[p + 1:p + 1 + l])
p += 1 + l
l = common.ord(data[p])
return p - offset + 1, b'.'.join(labels)
# rfc1035
# record
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | |
# / /
# / NAME /
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TYPE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | CLASS |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TTL |
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | RDLENGTH |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--|
# / RDATA /
# / /
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
def parse_record(data, offset, question=False):
nlen, name = parse_name(data, offset)
if not question:
record_type, record_class, record_ttl, record_rdlength = struct.unpack(
'!HHiH', data[offset + nlen:offset + nlen + 10]
)
ip = parse_ip(record_type, data, record_rdlength, offset + nlen + 10)
return nlen + 10 + record_rdlength, \
(name, ip, record_type, record_class, record_ttl)
else:
record_type, record_class = struct.unpack(
'!HH', data[offset + nlen:offset + nlen + 4]
)
return nlen + 4, (name, None, record_type, record_class, None, None)
def parse_header(data):
if len(data) >= 12:
header = struct.unpack('!HBBHHHH', data[:12])
res_id = header[0]
res_qr = header[1] & 128
res_tc = header[1] & 2
res_ra = header[2] & 128
res_rcode = header[2] & 15
# assert res_tc == 0
# assert res_rcode in [0, 3]
res_qdcount = header[3]
res_ancount = header[4]
res_nscount = header[5]
res_arcount = header[6]
return (res_id, res_qr, res_tc, res_ra, res_rcode, res_qdcount,
res_ancount, res_nscount, res_arcount)
return None
def parse_response(data):
try:
if len(data) >= 12:
header = parse_header(data)
if not header:
return None
res_id, res_qr, res_tc, res_ra, res_rcode, res_qdcount, \
res_ancount, res_nscount, res_arcount = header
qds = []
ans = []
offset = 12
for i in range(0, res_qdcount):
l, r = parse_record(data, offset, True)
offset += l
if r:
qds.append(r)
for i in range(0, res_ancount):
l, r = parse_record(data, offset)
offset += l
if r:
ans.append(r)
for i in range(0, res_nscount):
l, r = parse_record(data, offset)
offset += l
for i in range(0, res_arcount):
l, r = parse_record(data, offset)
offset += l
response = DNSResponse()
if qds:
response.hostname = qds[0][0]
for an in qds:
response.questions.append((an[1], an[2], an[3]))
for an in ans:
response.answers.append((an[1], an[2], an[3]))
return response
except Exception as e:
shell.print_exception(e)
return None
def is_valid_hostname(hostname):
if len(hostname) > 255:
return False
if hostname[-1] == b'.':
hostname = hostname[:-1]
return all(VALID_HOSTNAME.match(x) for x in hostname.split(b'.'))
class DNSResponse(object):
def __init__(self):
self.hostname = None
self.questions = [] # each: (addr, type, class)
self.answers = [] # each: (addr, type, class)
def __str__(self):
return '%s: %s' % (self.hostname, str(self.answers))
STATUS_IPV4 = 0
STATUS_IPV6 = 1
class DNSResolver(object):
def __init__(self, server_list=None):
self._loop = None
self._hosts = {}
self._hostname_status = {}
self._hostname_to_cb = {}
self._cb_to_hostname = {}
self._cache = lru_cache.LRUCache(timeout=300)
self._sock = None
if server_list is None:
self._servers = None
self._parse_resolv()
else:
self._servers = server_list
self._parse_hosts()
# TODO monitor hosts change and reload hosts
# TODO parse /etc/gai.conf and follow its rules
def _parse_resolv(self):
self._servers = []
try:
with open('/etc/resolv.conf', 'rb') as f:
content = f.readlines()
for line in content:
line = line.strip()
if line:
if line.startswith(b'nameserver'):
parts = line.split()
if len(parts) >= 2:
server = parts[1]
if common.is_ip(server) == socket.AF_INET:
if type(server) != str:
server = server.decode('utf8')
self._servers.append(server)
except IOError:
pass
if not self._servers:
self._servers = ['8.8.4.4', '8.8.8.8']
def _parse_hosts(self):
etc_path = '/etc/hosts'
if 'WINDIR' in os.environ:
etc_path = os.environ['WINDIR'] + '/system32/drivers/etc/hosts'
try:
with open(etc_path, 'rb') as f:
for line in f.readlines():
line = line.strip()
parts = line.split()
if len(parts) >= 2:
ip = parts[0]
if common.is_ip(ip):
for i in range(1, len(parts)):
hostname = parts[i]
if hostname:
self._hosts[hostname] = ip
except IOError:
self._hosts['localhost'] = '127.0.0.1'
def add_to_loop(self, loop):
if self._loop:
raise Exception('already add to loop')
self._loop = loop
# TODO when dns server is IPv6
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
self._sock.setblocking(False)
loop.add(self._sock, eventloop.POLL_IN, self)
loop.add_periodic(self.handle_periodic)
def _call_callback(self, hostname, ip, error=None):
callbacks = self._hostname_to_cb.get(hostname, [])
for callback in callbacks:
if callback in self._cb_to_hostname:
del self._cb_to_hostname[callback]
if ip or error:
callback((hostname, ip), error)
else:
callback((hostname, None),
Exception('unknown hostname %s' % hostname))
if hostname in self._hostname_to_cb:
del self._hostname_to_cb[hostname]
if hostname in self._hostname_status:
del self._hostname_status[hostname]
def _handle_data(self, data):
response = parse_response(data)
if response and response.hostname:
hostname = response.hostname
ip = None
for answer in response.answers:
if answer[1] in (QTYPE_A, QTYPE_AAAA) and \
answer[2] == QCLASS_IN:
ip = answer[0]
break
if not ip and self._hostname_status.get(hostname, STATUS_IPV6) \
== STATUS_IPV4:
self._hostname_status[hostname] = STATUS_IPV6
self._send_req(hostname, QTYPE_AAAA)
else:
if ip:
self._cache[hostname] = ip
self._call_callback(hostname, ip)
elif self._hostname_status.get(hostname, None) == STATUS_IPV6:
for question in response.questions:
if question[1] == QTYPE_AAAA:
self._call_callback(hostname, None)
break
def handle_event(self, sock, fd, event):
if sock != self._sock:
return
if event & eventloop.POLL_ERR:
logging.error('dns socket err')
self._loop.remove(self._sock)
self._sock.close()
# TODO when dns server is IPv6
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
self._sock.setblocking(False)
self._loop.add(self._sock, eventloop.POLL_IN, self)
else:
data, addr = sock.recvfrom(1024)
if addr[0] not in self._servers:
logging.warn('received a packet other than our dns')
return
self._handle_data(data)
def handle_periodic(self):
self._cache.sweep()
def remove_callback(self, callback):
hostname = self._cb_to_hostname.get(callback)
if hostname:
del self._cb_to_hostname[callback]
arr = self._hostname_to_cb.get(hostname, None)
if arr:
arr.remove(callback)
if not arr:
del self._hostname_to_cb[hostname]
if hostname in self._hostname_status:
del self._hostname_status[hostname]
def _send_req(self, hostname, qtype):
req = build_request(hostname, qtype)
for server in self._servers:
logging.debug('resolving %s with type %d using server %s',
hostname, qtype, server)
self._sock.sendto(req, (server, 53))
def resolve(self, hostname, callback):
if type(hostname) != bytes:
hostname = hostname.encode('utf8')
if not hostname:
callback(None, Exception('empty hostname'))
elif common.is_ip(hostname):
callback((hostname, hostname), None)
elif hostname in self._hosts:
logging.debug('hit hosts: %s', hostname)
ip = self._hosts[hostname]
callback((hostname, ip), None)
elif hostname in self._cache:
logging.debug('hit cache: %s', hostname)
ip = self._cache[hostname]
callback((hostname, ip), None)
else:
if not is_valid_hostname(hostname):
callback(None, Exception('invalid hostname: %s' % hostname))
return
arr = self._hostname_to_cb.get(hostname, None)
if not arr:
self._hostname_status[hostname] = STATUS_IPV4
self._send_req(hostname, QTYPE_A)
self._hostname_to_cb[hostname] = [callback]
self._cb_to_hostname[callback] = hostname
else:
arr.append(callback)
# TODO send again only if waited too long
self._send_req(hostname, QTYPE_A)
def close(self):
if self._sock:
if self._loop:
self._loop.remove_periodic(self.handle_periodic)
self._loop.remove(self._sock)
self._sock.close()
self._sock = None
def test():
dns_resolver = DNSResolver()
loop = eventloop.EventLoop()
dns_resolver.add_to_loop(loop)
global counter
counter = 0
def make_callback():
global counter
def callback(result, error):
global counter
# TODO: what can we assert?
print(result, error)
counter += 1
if counter == 9:
dns_resolver.close()
loop.stop()
a_callback = callback
return a_callback
assert(make_callback() != make_callback())
dns_resolver.resolve(b'google.com', make_callback())
dns_resolver.resolve('google.com', make_callback())
dns_resolver.resolve('example.com', make_callback())
dns_resolver.resolve('ipv6.google.com', make_callback())
dns_resolver.resolve('www.facebook.com', make_callback())
dns_resolver.resolve('ns2.google.com', make_callback())
dns_resolver.resolve('invalid.@!#$%^&$@.hostname', make_callback())
dns_resolver.resolve('toooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'long.hostname', make_callback())
dns_resolver.resolve('toooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'long.hostname', make_callback())
loop.run()
if __name__ == '__main__':
test()
|
ojake/django | refs/heads/master | django/conf/locale/zh_Hans/formats.py | 1008 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'Y年n月j日' # 2016年9月5日
TIME_FORMAT = 'H:i' # 20:45
DATETIME_FORMAT = 'Y年n月j日 H:i' # 2016年9月5日 20:45
YEAR_MONTH_FORMAT = 'Y年n月' # 2016年9月
MONTH_DAY_FORMAT = 'm月j日' # 9月5日
SHORT_DATE_FORMAT = 'Y年n月j日' # 2016年9月5日
SHORT_DATETIME_FORMAT = 'Y年n月j日 H:i' # 2016年9月5日 20:45
FIRST_DAY_OF_WEEK = 1 # 星期一 (Monday)
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%Y/%m/%d', # '2016/09/05'
'%Y-%m-%d', # '2016-09-05'
'%Y年%n月%j日', # '2016年9月5日'
]
TIME_INPUT_FORMATS = [
'%H:%M', # '20:45'
'%H:%M:%S', # '20:45:29'
'%H:%M:%S.%f', # '20:45:29.000200'
]
DATETIME_INPUT_FORMATS = [
'%Y/%m/%d %H:%M', # '2016/09/05 20:45'
'%Y-%m-%d %H:%M', # '2016-09-05 20:45'
'%Y年%n月%j日 %H:%M', # '2016年9月5日 14:45'
'%Y/%m/%d %H:%M:%S', # '2016/09/05 20:45:29'
'%Y-%m-%d %H:%M:%S', # '2016-09-05 20:45:29'
'%Y年%n月%j日 %H:%M:%S', # '2016年9月5日 20:45:29'
'%Y/%m/%d %H:%M:%S.%f', # '2016/09/05 20:45:29.000200'
'%Y-%m-%d %H:%M:%S.%f', # '2016-09-05 20:45:29.000200'
'%Y年%n月%j日 %H:%n:%S.%f', # '2016年9月5日 20:45:29.000200'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ''
NUMBER_GROUPING = 4
|
lanselin/pysal | refs/heads/master | pysal/spatial_dynamics/util.py | 11 | """
Utilities for the spatial dynamics module.
"""
__all__ = ['shuffle_matrix', 'get_lower']
import numpy as np
def shuffle_matrix(X, ids):
"""
Random permutation of rows and columns of a matrix
Parameters
----------
X : array
(k, k), array to be permutated.
ids : array
range (k, ).
Returns
-------
X : array
(k, k) with rows and columns randomly shuffled.
Examples
--------
>>> X=np.arange(16)
>>> X.shape=(4,4)
>>> np.random.seed(10)
>>> shuffle_matrix(X,range(4))
array([[10, 8, 11, 9],
[ 2, 0, 3, 1],
[14, 12, 15, 13],
[ 6, 4, 7, 5]])
"""
np.random.shuffle(ids)
return X[ids, :][:, ids]
def get_lower(matrix):
"""
Flattens the lower part of an n x n matrix into an n*(n-1)/2 x 1 vector.
Parameters
----------
matrix : array
(n, n) numpy array, a distance matrix.
Returns
-------
lowvec : array
numpy array, the lower half of the distance matrix flattened into
a vector of length n*(n-1)/2.
Examples
--------
>>> import numpy as np
>>> import pysal
>>> test = np.array([[0,1,2,3],[1,0,1,2],[2,1,0,1],[4,2,1,0]])
>>> lower = get_lower(test)
>>> lower
array([[1],
[2],
[1],
[4],
[2],
[1]])
"""
n = matrix.shape[0]
lowerlist = []
for i in range(n):
for j in range(n):
if i > j:
lowerlist.append(matrix[i, j])
veclen = n * (n - 1) / 2
lowvec = np.reshape(lowerlist, (veclen, 1))
return lowvec
|
jaysonsantos/servo | refs/heads/master | components/script/dom/bindings/codegen/parser/tests/test_treatNonCallableAsNull.py | 170 | import WebIDL
def WebIDLTest(parser, harness):
parser.parse("""
[TreatNonCallableAsNull] callback Function = any(any... arguments);
interface TestTreatNonCallableAsNull1 {
attribute Function? onfoo;
attribute Function onbar;
};
""")
results = parser.finish()
iface = results[1]
attr = iface.members[0]
harness.check(attr.type.treatNonCallableAsNull(), True, "Got the expected value")
attr = iface.members[1]
harness.check(attr.type.treatNonCallableAsNull(), False, "Got the expected value")
parser = parser.reset()
threw = False
try:
parser.parse("""
callback Function = any(any... arguments);
interface TestTreatNonCallableAsNull2 {
[TreatNonCallableAsNull] attribute Function onfoo;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
parser = parser.reset()
threw = False
try:
parser.parse("""
callback Function = any(any... arguments);
[TreatNonCallableAsNull]
interface TestTreatNonCallableAsNull3 {
attribute Function onfoo;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
parser = parser.reset()
threw = False
try:
parser.parse("""
[TreatNonCallableAsNull, TreatNonObjectAsNull]
callback Function = any(any... arguments);
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
|
hainm/scipy | refs/heads/master | scipy/integrate/quadpack.py | 42 | # Author: Travis Oliphant 2001
# Author: Nathan Woods 2013 (nquad &c)
from __future__ import division, print_function, absolute_import
import sys
import warnings
from functools import partial
from . import _quadpack
import numpy
from numpy import Inf
__all__ = ['quad', 'dblquad', 'tplquad', 'nquad', 'quad_explain',
'IntegrationWarning']
error = _quadpack.error
class IntegrationWarning(UserWarning):
"""
Warning on issues during integration.
"""
pass
def quad_explain(output=sys.stdout):
"""
Print extra information about integrate.quad() parameters and returns.
Parameters
----------
output : instance with "write" method, optional
Information about `quad` is passed to ``output.write()``.
Default is ``sys.stdout``.
Returns
-------
None
"""
output.write(quad.__doc__)
def quad(func, a, b, args=(), full_output=0, epsabs=1.49e-8, epsrel=1.49e-8,
limit=50, points=None, weight=None, wvar=None, wopts=None, maxp1=50,
limlst=50):
"""
Compute a definite integral.
Integrate func from `a` to `b` (possibly infinite interval) using a
technique from the Fortran library QUADPACK.
Parameters
----------
func : function
A Python function or method to integrate. If `func` takes many
arguments, it is integrated along the axis corresponding to the
first argument.
If the user desires improved integration performance, then f may
instead be a ``ctypes`` function of the form:
f(int n, double args[n]),
where ``args`` is an array of function arguments and ``n`` is the
length of ``args``. ``f.argtypes`` should be set to
``(c_int, c_double)``, and ``f.restype`` should be ``(c_double,)``.
a : float
Lower limit of integration (use -numpy.inf for -infinity).
b : float
Upper limit of integration (use numpy.inf for +infinity).
args : tuple, optional
Extra arguments to pass to `func`.
full_output : int, optional
Non-zero to return a dictionary of integration information.
If non-zero, warning messages are also suppressed and the
message is appended to the output tuple.
Returns
-------
y : float
The integral of func from `a` to `b`.
abserr : float
An estimate of the absolute error in the result.
infodict : dict
A dictionary containing additional information.
Run scipy.integrate.quad_explain() for more information.
message :
A convergence message.
explain :
Appended only with 'cos' or 'sin' weighting and infinite
integration limits, it contains an explanation of the codes in
infodict['ierlst']
Other Parameters
----------------
epsabs : float or int, optional
Absolute error tolerance.
epsrel : float or int, optional
Relative error tolerance.
limit : float or int, optional
An upper bound on the number of subintervals used in the adaptive
algorithm.
points : (sequence of floats,ints), optional
A sequence of break points in the bounded integration interval
where local difficulties of the integrand may occur (e.g.,
singularities, discontinuities). The sequence does not have
to be sorted.
weight : float or int, optional
String indicating weighting function. Full explanation for this
and the remaining arguments can be found below.
wvar : optional
Variables for use with weighting functions.
wopts : optional
Optional input for reusing Chebyshev moments.
maxp1 : float or int, optional
An upper bound on the number of Chebyshev moments.
limlst : int, optional
Upper bound on the number of cycles (>=3) for use with a sinusoidal
weighting and an infinite end-point.
See Also
--------
dblquad : double integral
tplquad : triple integral
nquad : n-dimensional integrals (uses `quad` recursively)
fixed_quad : fixed-order Gaussian quadrature
quadrature : adaptive Gaussian quadrature
odeint : ODE integrator
ode : ODE integrator
simps : integrator for sampled data
romb : integrator for sampled data
scipy.special : for coefficients and roots of orthogonal polynomials
Notes
-----
**Extra information for quad() inputs and outputs**
If full_output is non-zero, then the third output argument
(infodict) is a dictionary with entries as tabulated below. For
infinite limits, the range is transformed to (0,1) and the
optional outputs are given with respect to this transformed range.
Let M be the input argument limit and let K be infodict['last'].
The entries are:
'neval'
The number of function evaluations.
'last'
The number, K, of subintervals produced in the subdivision process.
'alist'
A rank-1 array of length M, the first K elements of which are the
left end points of the subintervals in the partition of the
integration range.
'blist'
A rank-1 array of length M, the first K elements of which are the
right end points of the subintervals.
'rlist'
A rank-1 array of length M, the first K elements of which are the
integral approximations on the subintervals.
'elist'
A rank-1 array of length M, the first K elements of which are the
moduli of the absolute error estimates on the subintervals.
'iord'
A rank-1 integer array of length M, the first L elements of
which are pointers to the error estimates over the subintervals
with ``L=K`` if ``K<=M/2+2`` or ``L=M+1-K`` otherwise. Let I be the
sequence ``infodict['iord']`` and let E be the sequence
``infodict['elist']``. Then ``E[I[1]], ..., E[I[L]]`` forms a
decreasing sequence.
If the input argument points is provided (i.e. it is not None),
the following additional outputs are placed in the output
dictionary. Assume the points sequence is of length P.
'pts'
A rank-1 array of length P+2 containing the integration limits
and the break points of the intervals in ascending order.
This is an array giving the subintervals over which integration
will occur.
'level'
A rank-1 integer array of length M (=limit), containing the
subdivision levels of the subintervals, i.e., if (aa,bb) is a
subinterval of ``(pts[1], pts[2])`` where ``pts[0]`` and ``pts[2]``
are adjacent elements of ``infodict['pts']``, then (aa,bb) has level l
if ``|bb-aa| = |pts[2]-pts[1]| * 2**(-l)``.
'ndin'
A rank-1 integer array of length P+2. After the first integration
over the intervals (pts[1], pts[2]), the error estimates over some
of the intervals may have been increased artificially in order to
put their subdivision forward. This array has ones in slots
corresponding to the subintervals for which this happens.
**Weighting the integrand**
The input variables, *weight* and *wvar*, are used to weight the
integrand by a select list of functions. Different integration
methods are used to compute the integral with these weighting
functions. The possible values of weight and the corresponding
weighting functions are.
========== =================================== =====================
``weight`` Weight function used ``wvar``
========== =================================== =====================
'cos' cos(w*x) wvar = w
'sin' sin(w*x) wvar = w
'alg' g(x) = ((x-a)**alpha)*((b-x)**beta) wvar = (alpha, beta)
'alg-loga' g(x)*log(x-a) wvar = (alpha, beta)
'alg-logb' g(x)*log(b-x) wvar = (alpha, beta)
'alg-log' g(x)*log(x-a)*log(b-x) wvar = (alpha, beta)
'cauchy' 1/(x-c) wvar = c
========== =================================== =====================
wvar holds the parameter w, (alpha, beta), or c depending on the weight
selected. In these expressions, a and b are the integration limits.
For the 'cos' and 'sin' weighting, additional inputs and outputs are
available.
For finite integration limits, the integration is performed using a
Clenshaw-Curtis method which uses Chebyshev moments. For repeated
calculations, these moments are saved in the output dictionary:
'momcom'
The maximum level of Chebyshev moments that have been computed,
i.e., if ``M_c`` is ``infodict['momcom']`` then the moments have been
computed for intervals of length ``|b-a| * 2**(-l)``,
``l=0,1,...,M_c``.
'nnlog'
A rank-1 integer array of length M(=limit), containing the
subdivision levels of the subintervals, i.e., an element of this
array is equal to l if the corresponding subinterval is
``|b-a|* 2**(-l)``.
'chebmo'
A rank-2 array of shape (25, maxp1) containing the computed
Chebyshev moments. These can be passed on to an integration
over the same interval by passing this array as the second
element of the sequence wopts and passing infodict['momcom'] as
the first element.
If one of the integration limits is infinite, then a Fourier integral is
computed (assuming w neq 0). If full_output is 1 and a numerical error
is encountered, besides the error message attached to the output tuple,
a dictionary is also appended to the output tuple which translates the
error codes in the array ``info['ierlst']`` to English messages. The
output information dictionary contains the following entries instead of
'last', 'alist', 'blist', 'rlist', and 'elist':
'lst'
The number of subintervals needed for the integration (call it ``K_f``).
'rslst'
A rank-1 array of length M_f=limlst, whose first ``K_f`` elements
contain the integral contribution over the interval
``(a+(k-1)c, a+kc)`` where ``c = (2*floor(|w|) + 1) * pi / |w|``
and ``k=1,2,...,K_f``.
'erlst'
A rank-1 array of length ``M_f`` containing the error estimate
corresponding to the interval in the same position in
``infodict['rslist']``.
'ierlst'
A rank-1 integer array of length ``M_f`` containing an error flag
corresponding to the interval in the same position in
``infodict['rslist']``. See the explanation dictionary (last entry
in the output tuple) for the meaning of the codes.
Examples
--------
Calculate :math:`\\int^4_0 x^2 dx` and compare with an analytic result
>>> from scipy import integrate
>>> x2 = lambda x: x**2
>>> integrate.quad(x2, 0, 4)
(21.333333333333332, 2.3684757858670003e-13)
>>> print(4**3 / 3.) # analytical result
21.3333333333
Calculate :math:`\\int^\\infty_0 e^{-x} dx`
>>> invexp = lambda x: np.exp(-x)
>>> integrate.quad(invexp, 0, np.inf)
(1.0, 5.842605999138044e-11)
>>> f = lambda x,a : a*x
>>> y, err = integrate.quad(f, 0, 1, args=(1,))
>>> y
0.5
>>> y, err = integrate.quad(f, 0, 1, args=(3,))
>>> y
1.5
Calculate :math:`\\int^1_0 x^2 + y^2 dx` with ctypes, holding
y parameter as 1::
testlib.c =>
double func(int n, double args[n]){
return args[0]*args[0] + args[1]*args[1];}
compile to library testlib.*
::
from scipy import integrate
import ctypes
lib = ctypes.CDLL('/home/.../testlib.*') #use absolute path
lib.func.restype = ctypes.c_double
lib.func.argtypes = (ctypes.c_int,ctypes.c_double)
integrate.quad(lib.func,0,1,(1))
#(1.3333333333333333, 1.4802973661668752e-14)
print((1.0**3/3.0 + 1.0) - (0.0**3/3.0 + 0.0)) #Analytic result
# 1.3333333333333333
"""
if not isinstance(args, tuple):
args = (args,)
if (weight is None):
retval = _quad(func, a, b, args, full_output, epsabs, epsrel, limit,
points)
else:
retval = _quad_weight(func, a, b, args, full_output, epsabs, epsrel,
limlst, limit, maxp1, weight, wvar, wopts)
ier = retval[-1]
if ier == 0:
return retval[:-1]
msgs = {80: "A Python error occurred possibly while calling the function.",
1: "The maximum number of subdivisions (%d) has been achieved.\n If increasing the limit yields no improvement it is advised to analyze \n the integrand in order to determine the difficulties. If the position of a \n local difficulty can be determined (singularity, discontinuity) one will \n probably gain from splitting up the interval and calling the integrator \n on the subranges. Perhaps a special-purpose integrator should be used." % limit,
2: "The occurrence of roundoff error is detected, which prevents \n the requested tolerance from being achieved. The error may be \n underestimated.",
3: "Extremely bad integrand behavior occurs at some points of the\n integration interval.",
4: "The algorithm does not converge. Roundoff error is detected\n in the extrapolation table. It is assumed that the requested tolerance\n cannot be achieved, and that the returned result (if full_output = 1) is \n the best which can be obtained.",
5: "The integral is probably divergent, or slowly convergent.",
6: "The input is invalid.",
7: "Abnormal termination of the routine. The estimates for result\n and error are less reliable. It is assumed that the requested accuracy\n has not been achieved.",
'unknown': "Unknown error."}
if weight in ['cos','sin'] and (b == Inf or a == -Inf):
msgs[1] = "The maximum number of cycles allowed has been achieved., e.e.\n of subintervals (a+(k-1)c, a+kc) where c = (2*int(abs(omega)+1))\n *pi/abs(omega), for k = 1, 2, ..., lst. One can allow more cycles by increasing the value of limlst. Look at info['ierlst'] with full_output=1."
msgs[4] = "The extrapolation table constructed for convergence acceleration\n of the series formed by the integral contributions over the cycles, \n does not converge to within the requested accuracy. Look at \n info['ierlst'] with full_output=1."
msgs[7] = "Bad integrand behavior occurs within one or more of the cycles.\n Location and type of the difficulty involved can be determined from \n the vector info['ierlist'] obtained with full_output=1."
explain = {1: "The maximum number of subdivisions (= limit) has been \n achieved on this cycle.",
2: "The occurrence of roundoff error is detected and prevents\n the tolerance imposed on this cycle from being achieved.",
3: "Extremely bad integrand behavior occurs at some points of\n this cycle.",
4: "The integral over this cycle does not converge (to within the required accuracy) due to roundoff in the extrapolation procedure invoked on this cycle. It is assumed that the result on this interval is the best which can be obtained.",
5: "The integral over this cycle is probably divergent or slowly convergent."}
try:
msg = msgs[ier]
except KeyError:
msg = msgs['unknown']
if ier in [1,2,3,4,5,7]:
if full_output:
if weight in ['cos','sin'] and (b == Inf or a == Inf):
return retval[:-1] + (msg, explain)
else:
return retval[:-1] + (msg,)
else:
warnings.warn(msg, IntegrationWarning)
return retval[:-1]
else:
raise ValueError(msg)
def _quad(func,a,b,args,full_output,epsabs,epsrel,limit,points):
infbounds = 0
if (b != Inf and a != -Inf):
pass # standard integration
elif (b == Inf and a != -Inf):
infbounds = 1
bound = a
elif (b == Inf and a == -Inf):
infbounds = 2
bound = 0 # ignored
elif (b != Inf and a == -Inf):
infbounds = -1
bound = b
else:
raise RuntimeError("Infinity comparisons don't work for you.")
if points is None:
if infbounds == 0:
return _quadpack._qagse(func,a,b,args,full_output,epsabs,epsrel,limit)
else:
return _quadpack._qagie(func,bound,infbounds,args,full_output,epsabs,epsrel,limit)
else:
if infbounds != 0:
raise ValueError("Infinity inputs cannot be used with break points.")
else:
nl = len(points)
the_points = numpy.zeros((nl+2,), float)
the_points[:nl] = points
return _quadpack._qagpe(func,a,b,the_points,args,full_output,epsabs,epsrel,limit)
def _quad_weight(func,a,b,args,full_output,epsabs,epsrel,limlst,limit,maxp1,weight,wvar,wopts):
if weight not in ['cos','sin','alg','alg-loga','alg-logb','alg-log','cauchy']:
raise ValueError("%s not a recognized weighting function." % weight)
strdict = {'cos':1,'sin':2,'alg':1,'alg-loga':2,'alg-logb':3,'alg-log':4}
if weight in ['cos','sin']:
integr = strdict[weight]
if (b != Inf and a != -Inf): # finite limits
if wopts is None: # no precomputed chebyshev moments
return _quadpack._qawoe(func, a, b, wvar, integr, args, full_output,
epsabs, epsrel, limit, maxp1,1)
else: # precomputed chebyshev moments
momcom = wopts[0]
chebcom = wopts[1]
return _quadpack._qawoe(func, a, b, wvar, integr, args, full_output,
epsabs, epsrel, limit, maxp1, 2, momcom, chebcom)
elif (b == Inf and a != -Inf):
return _quadpack._qawfe(func, a, wvar, integr, args, full_output,
epsabs,limlst,limit,maxp1)
elif (b != Inf and a == -Inf): # remap function and interval
if weight == 'cos':
def thefunc(x,*myargs):
y = -x
func = myargs[0]
myargs = (y,) + myargs[1:]
return func(*myargs)
else:
def thefunc(x,*myargs):
y = -x
func = myargs[0]
myargs = (y,) + myargs[1:]
return -func(*myargs)
args = (func,) + args
return _quadpack._qawfe(thefunc, -b, wvar, integr, args,
full_output, epsabs, limlst, limit, maxp1)
else:
raise ValueError("Cannot integrate with this weight from -Inf to +Inf.")
else:
if a in [-Inf,Inf] or b in [-Inf,Inf]:
raise ValueError("Cannot integrate with this weight over an infinite interval.")
if weight[:3] == 'alg':
integr = strdict[weight]
return _quadpack._qawse(func, a, b, wvar, integr, args,
full_output, epsabs, epsrel, limit)
else: # weight == 'cauchy'
return _quadpack._qawce(func, a, b, wvar, args, full_output,
epsabs, epsrel, limit)
def _infunc(x,func,gfun,hfun,more_args):
a = gfun(x)
b = hfun(x)
myargs = (x,) + more_args
return quad(func,a,b,args=myargs)[0]
def dblquad(func, a, b, gfun, hfun, args=(), epsabs=1.49e-8, epsrel=1.49e-8):
"""
Compute a double integral.
Return the double (definite) integral of ``func(y, x)`` from ``x = a..b``
and ``y = gfun(x)..hfun(x)``.
Parameters
----------
func : callable
A Python function or method of at least two variables: y must be the
first argument and x the second argument.
a, b : float
The limits of integration in x: `a` < `b`
gfun : callable
The lower boundary curve in y which is a function taking a single
floating point argument (x) and returning a floating point result: a
lambda function can be useful here.
hfun : callable
The upper boundary curve in y (same requirements as `gfun`).
args : sequence, optional
Extra arguments to pass to `func`.
epsabs : float, optional
Absolute tolerance passed directly to the inner 1-D quadrature
integration. Default is 1.49e-8.
epsrel : float, optional
Relative tolerance of the inner 1-D integrals. Default is 1.49e-8.
Returns
-------
y : float
The resultant integral.
abserr : float
An estimate of the error.
See also
--------
quad : single integral
tplquad : triple integral
nquad : N-dimensional integrals
fixed_quad : fixed-order Gaussian quadrature
quadrature : adaptive Gaussian quadrature
odeint : ODE integrator
ode : ODE integrator
simps : integrator for sampled data
romb : integrator for sampled data
scipy.special : for coefficients and roots of orthogonal polynomials
"""
return quad(_infunc, a, b, (func, gfun, hfun, args),
epsabs=epsabs, epsrel=epsrel)
def _infunc2(y,x,func,qfun,rfun,more_args):
a2 = qfun(x,y)
b2 = rfun(x,y)
myargs = (y,x) + more_args
return quad(func,a2,b2,args=myargs)[0]
def tplquad(func, a, b, gfun, hfun, qfun, rfun, args=(), epsabs=1.49e-8,
epsrel=1.49e-8):
"""
Compute a triple (definite) integral.
Return the triple integral of ``func(z, y, x)`` from ``x = a..b``,
``y = gfun(x)..hfun(x)``, and ``z = qfun(x,y)..rfun(x,y)``.
Parameters
----------
func : function
A Python function or method of at least three variables in the
order (z, y, x).
a, b : float
The limits of integration in x: `a` < `b`
gfun : function
The lower boundary curve in y which is a function taking a single
floating point argument (x) and returning a floating point result:
a lambda function can be useful here.
hfun : function
The upper boundary curve in y (same requirements as `gfun`).
qfun : function
The lower boundary surface in z. It must be a function that takes
two floats in the order (x, y) and returns a float.
rfun : function
The upper boundary surface in z. (Same requirements as `qfun`.)
args : tuple, optional
Extra arguments to pass to `func`.
epsabs : float, optional
Absolute tolerance passed directly to the innermost 1-D quadrature
integration. Default is 1.49e-8.
epsrel : float, optional
Relative tolerance of the innermost 1-D integrals. Default is 1.49e-8.
Returns
-------
y : float
The resultant integral.
abserr : float
An estimate of the error.
See Also
--------
quad: Adaptive quadrature using QUADPACK
quadrature: Adaptive Gaussian quadrature
fixed_quad: Fixed-order Gaussian quadrature
dblquad: Double integrals
nquad : N-dimensional integrals
romb: Integrators for sampled data
simps: Integrators for sampled data
ode: ODE integrators
odeint: ODE integrators
scipy.special: For coefficients and roots of orthogonal polynomials
"""
return dblquad(_infunc2, a, b, gfun, hfun, (func, qfun, rfun, args),
epsabs=epsabs, epsrel=epsrel)
def nquad(func, ranges, args=None, opts=None):
"""
Integration over multiple variables.
Wraps `quad` to enable integration over multiple variables.
Various options allow improved integration of discontinuous functions, as
well as the use of weighted integration, and generally finer control of the
integration process.
Parameters
----------
func : callable
The function to be integrated. Has arguments of ``x0, ... xn``,
``t0, tm``, where integration is carried out over ``x0, ... xn``, which
must be floats. Function signature should be
``func(x0, x1, ..., xn, t0, t1, ..., tm)``. Integration is carried out
in order. That is, integration over ``x0`` is the innermost integral,
and ``xn`` is the outermost.
If performance is a concern, this function may be a ctypes function of
the form::
f(int n, double args[n])
where ``n`` is the number of extra parameters and args is an array
of doubles of the additional parameters. This function may then
be compiled to a dynamic/shared library then imported through
``ctypes``, setting the function's argtypes to ``(c_int, c_double)``,
and the function's restype to ``(c_double)``. Its pointer may then be
passed into `nquad` normally.
This allows the underlying Fortran library to evaluate the function in
the innermost integration calls without callbacks to Python, and also
speeds up the evaluation of the function itself.
ranges : iterable object
Each element of ranges may be either a sequence of 2 numbers, or else
a callable that returns such a sequence. ``ranges[0]`` corresponds to
integration over x0, and so on. If an element of ranges is a callable,
then it will be called with all of the integration arguments available.
e.g. if ``func = f(x0, x1, x2)``, then ``ranges[0]`` may be defined as
either ``(a, b)`` or else as ``(a, b) = range0(x1, x2)``.
args : iterable object, optional
Additional arguments ``t0, ..., tn``, required by `func`.
opts : iterable object or dict, optional
Options to be passed to `quad`. May be empty, a dict, or
a sequence of dicts or functions that return a dict. If empty, the
default options from scipy.integrate.quadare used. If a dict, the same
options are used for all levels of integraion. If a sequence, then each
element of the sequence corresponds to a particular integration. e.g.
opts[0] corresponds to integration over x0, and so on. The available
options together with their default values are:
- epsabs = 1.49e-08
- epsrel = 1.49e-08
- limit = 50
- points = None
- weight = None
- wvar = None
- wopts = None
The ``full_output`` option from `quad` is unavailable, due to the
complexity of handling the large amount of data such an option would
return for this kind of nested integration. For more information on
these options, see `quad` and `quad_explain`.
Returns
-------
result : float
The result of the integration.
abserr : float
The maximum of the estimates of the absolute error in the various
integration results.
See Also
--------
quad : 1-dimensional numerical integration
dblquad, tplquad : double and triple integrals
fixed_quad : fixed-order Gaussian quadrature
quadrature : adaptive Gaussian quadrature
Examples
--------
>>> from scipy import integrate
>>> func = lambda x0,x1,x2,x3 : x0**2 + x1*x2 - x3**3 + np.sin(x0) + (
... 1 if (x0-.2*x3-.5-.25*x1>0) else 0)
>>> points = [[lambda (x1,x2,x3) : 0.2*x3 + 0.5 + 0.25*x1], [], [], []]
>>> def opts0(*args, **kwargs):
... return {'points':[0.2*args[2] + 0.5 + 0.25*args[0]]}
>>> integrate.nquad(func, [[0,1], [-1,1], [.13,.8], [-.15,1]],
... opts=[opts0,{},{},{}])
(1.5267454070738633, 2.9437360001402324e-14)
>>> scale = .1
>>> def func2(x0, x1, x2, x3, t0, t1):
... return x0*x1*x3**2 + np.sin(x2) + 1 + (1 if x0+t1*x1-t0>0 else 0)
>>> def lim0(x1, x2, x3, t0, t1):
... return [scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) - 1,
... scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) + 1]
>>> def lim1(x2, x3, t0, t1):
... return [scale * (t0*x2 + t1*x3) - 1,
... scale * (t0*x2 + t1*x3) + 1]
>>> def lim2(x3, t0, t1):
... return [scale * (x3 + t0**2*t1**3) - 1,
... scale * (x3 + t0**2*t1**3) + 1]
>>> def lim3(t0, t1):
... return [scale * (t0+t1) - 1, scale * (t0+t1) + 1]
>>> def opts0(x1, x2, x3, t0, t1):
... return {'points' : [t0 - t1*x1]}
>>> def opts1(x2, x3, t0, t1):
... return {}
>>> def opts2(x3, t0, t1):
... return {}
>>> def opts3(t0, t1):
... return {}
>>> integrate.nquad(func2, [lim0, lim1, lim2, lim3], args=(0,0),
... opts=[opts0, opts1, opts2, opts3])
(25.066666666666666, 2.7829590483937256e-13)
"""
depth = len(ranges)
ranges = [rng if callable(rng) else _RangeFunc(rng) for rng in ranges]
if args is None:
args = ()
if opts is None:
opts = [dict([])] * depth
if isinstance(opts, dict):
opts = [_OptFunc(opts)] * depth
else:
opts = [opt if callable(opt) else _OptFunc(opt) for opt in opts]
return _NQuad(func, ranges, opts).integrate(*args)
class _RangeFunc(object):
def __init__(self, range_):
self.range_ = range_
def __call__(self, *args):
"""Return stored value.
*args needed because range_ can be float or func, and is called with
variable number of parameters.
"""
return self.range_
class _OptFunc(object):
def __init__(self, opt):
self.opt = opt
def __call__(self, *args):
"""Return stored dict."""
return self.opt
class _NQuad(object):
def __init__(self, func, ranges, opts):
self.abserr = 0
self.func = func
self.ranges = ranges
self.opts = opts
self.maxdepth = len(ranges)
def integrate(self, *args, **kwargs):
depth = kwargs.pop('depth', 0)
if kwargs:
raise ValueError('unexpected kwargs')
# Get the integration range and options for this depth.
ind = -(depth + 1)
fn_range = self.ranges[ind]
low, high = fn_range(*args)
fn_opt = self.opts[ind]
opt = dict(fn_opt(*args))
if 'points' in opt:
opt['points'] = [x for x in opt['points'] if low <= x <= high]
if depth + 1 == self.maxdepth:
f = self.func
else:
f = partial(self.integrate, depth=depth+1)
value, abserr = quad(f, low, high, args=args, **opt)
self.abserr = max(self.abserr, abserr)
if depth > 0:
return value
else:
# Final result of n-D integration with error
return value, self.abserr
|
wgertler/wgertler.github.io | refs/heads/master | articles/bht/classical-thermodynamics/multiplicity_graph.py | 1 | from matplotlib.pyplot import bar, xlabel, ylabel, title, show
from factorial import factorial
from multiplicity import choice
import sys
def mult_plotter(x,y):
a = []
b = []
d = []
i = 0
while i <= int(x):
a.append(choice(int(y) + i -1 , i))
b.append(choice(int(y) + int(x) -i -1 , int(x) -i))
d.append(i)
i += 1
c = []
for i in range(0, len(a)):
c.append(a[i]*b[i])
m = max(c)
title = "multiplicity distribution"
xlabel("$q_a$")
ylabel("$\Omega_{total}$")
bar(d,c)
show()
mult_plotter(sys.argv[1],sys.argv[2])
|
g19-hs/personfinder | refs/heads/master | app/script_variant.py | 2 | import jautils
from unidecode import unidecode
import os.path
import re
def read_dictionary(file_name):
"""
Reads dictionary file.
Args:
file_name: file name.
format: kanji + '\t' + yomigana
Return:
{kanj: yomigana, ...}
"""
dictionary = {}
try:
if os.path.exists(file_name):
with open(file_name, 'r') as f:
for line in f:
kanji, hiragana = line[:-1].split('\t')
dictionary[kanji.decode('utf-8')] = hiragana.decode('utf-8')
except IOError:
return None
return dictionary
JAPANESE_NAME_DICTIONARY = read_dictionary('japanese_name_dict.txt')
JAPANESE_LOCATION_DICTIONARY = read_dictionary('jp_location_dict.txt')
def romanize_japanese_name_by_name_dict(word):
"""
This method romanizes japanese name by using name dictionary.
If word isn't found in dictionary, this method doesn't
apply romanize.
"""
if not word:
return word
if word in JAPANESE_NAME_DICTIONARY:
yomigana = JAPANESE_NAME_DICTIONARY[word]
return jautils.hiragana_to_romaji(yomigana)
return word
def romanize_japanese_location(word):
"""
This method romanizes japanese name by using name dictionary.
If word isn't found in dictionary, this method doesn't
apply romanize.
"""
if not word:
return word
if word in JAPANESE_LOCATION_DICTIONARY:
yomigana = JAPANESE_LOCATION_DICTIONARY[word]
return jautils.hiragana_to_romaji(yomigana)
return word
def romanize_word(word):
"""
This method romanizes all languages by unidecode.
If word is hiragana or katakana, it is romanized by jautils.
Args:
word: should be script varianted
Returns:
script varianted word
"""
if not word:
return word
if re.match(ur'([\u3400-\u9fff])', word):
word = romanize_japanese_name_by_name_dict(word)
word = romanize_japanese_location(word)
if jautils.should_normalize(word):
hiragana_word = jautils.normalize(word)
return jautils.hiragana_to_romaji(hiragana_word)
romanized_word = unidecode(word)
return romanized_word.strip()
def romanize_text(query_txt):
"""
Applies romanization to each word in query_txt.
This method uses unidecode and jautils for script variant.
Args:
query_txt: Search query
Returns:
script varianted query_txt (except kanji)
"""
query_words = query_txt.split(' ')
return ' '.join([romanize_word(word) for word in query_words])
|
apocalypsebg/odoo | refs/heads/8.0 | addons/account/res_currency.py | 340 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2010 OpenERP s.a. (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
"""Inherit res.currency to handle accounting date values when converting currencies"""
class res_currency_account(osv.osv):
_inherit = "res.currency"
def _get_conversion_rate(self, cr, uid, from_currency, to_currency, context=None):
if context is None:
context = {}
rate = super(res_currency_account, self)._get_conversion_rate(cr, uid, from_currency, to_currency, context=context)
#process the case where the account doesn't work with an outgoing currency rate method 'at date' but 'average'
account = context.get('res.currency.compute.account')
account_invert = context.get('res.currency.compute.account_invert')
if account and account.currency_mode == 'average' and account.currency_id:
query = self.pool.get('account.move.line')._query_get(cr, uid, context=context)
cr.execute('select sum(debit-credit),sum(amount_currency) from account_move_line l ' \
'where l.currency_id=%s and l.account_id=%s and '+query, (account.currency_id.id,account.id,))
tot1,tot2 = cr.fetchone()
if tot2 and not account_invert:
rate = float(tot1)/float(tot2)
elif tot1 and account_invert:
rate = float(tot2)/float(tot1)
return rate
|
wesm/statsmodels | refs/heads/master | scikits/statsmodels/base/data.py | 1 | """
Base tools for handling various kinds of data structures, attaching metadata to
results, and doing data cleaning
"""
import numpy as np
from pandas import DataFrame, Series, TimeSeries
from scikits.timeseries import time_series
from scikits.statsmodels.tools.decorators import (resettable_cache,
cache_readonly, cache_writable)
import scikits.statsmodels.tools.data as data_util
class ModelData(object):
"""
Class responsible for handling input data and extracting metadata into the
appropriate form
"""
def __init__(self, endog, exog=None, **kwds):
self._orig_endog = endog
self._orig_exog = exog
self.endog, self.exog = self._convert_endog_exog(endog, exog)
self._check_integrity()
self._cache = resettable_cache()
def _convert_endog_exog(self, endog, exog):
# for consistent outputs if endog is (n,1)
yarr = self._get_yarr(endog)
xarr = None
if exog is not None:
xarr = self._get_xarr(exog)
if xarr.ndim == 1:
xarr = xarr[:, None]
if xarr.ndim != 2:
raise ValueError("exog is not 1d or 2d")
return yarr, xarr
@cache_writable()
def ynames(self):
endog = self._orig_endog
ynames = self._get_names(endog)
if not ynames:
ynames = _make_endog_names(endog)
if len(ynames) == 1:
return ynames[0]
else:
return list(ynames)
@cache_writable()
def xnames(self):
exog = self._orig_exog
if exog is not None:
xnames = self._get_names(exog)
if not xnames:
xnames = _make_exog_names(exog)
return list(xnames)
return None
@cache_readonly
def row_labels(self):
exog = self._orig_exog
if exog is not None:
row_labels = self._get_row_labels(exog)
else:
endog = self._orig_endog
row_labels = self._get_row_labels(endog)
return row_labels
def _get_row_labels(self, arr):
return None
def _get_names(self, arr):
if isinstance(arr, DataFrame):
return list(arr.columns)
elif isinstance(arr, Series):
return [arr.name]
else:
try:
return arr.dtype.names
except AttributeError:
pass
return None
def _get_yarr(self, endog):
if data_util.is_structured_ndarray(endog):
endog = data_util.struct_to_ndarray(endog)
return np.asarray(endog).squeeze()
def _get_xarr(self, exog):
if data_util.is_structured_ndarray(exog):
exog = data_util.struct_to_ndarray(exog)
return np.asarray(exog)
def _check_integrity(self):
if self.exog is not None:
if len(self.exog) != len(self.endog):
raise ValueError("endog and exog matrices are different sizes")
def wrap_output(self, obj, how='columns'):
if how == 'columns':
return self.attach_columns(obj)
elif how == 'rows':
return self.attach_rows(obj)
elif how == 'cov':
return self.attach_cov(obj)
elif how == 'dates':
return self.attach_dates(obj)
elif how == 'columns_eq':
return self.attach_columns_eq(obj)
elif how == 'cov_eq':
return self.attach_cov_eq(obj)
else:
return obj
def attach_columns(self, result):
return result
def attach_columns_eq(self, result):
return result
def attach_cov(self, result):
return result
def attach_cov_eq(self, result):
return result
def attach_rows(self, result):
return result
def attach_dates(self, result):
return result
class PandasData(ModelData):
"""
Data handling class which knows how to reattach pandas metadata to model
results
"""
def _get_row_labels(self, arr):
return arr.index
def attach_columns(self, result):
if result.squeeze().ndim == 1:
return Series(result, index=self.xnames)
else: # for e.g., confidence intervals
return DataFrame(result, index=self.xnames)
def attach_columns_eq(self, result):
return DataFrame(result, index=self.xnames, columns=self.ynames)
def attach_cov(self, result):
return DataFrame(result, index=self.xnames, columns=self.xnames)
def attach_cov_eq(self, result):
return DataFrame(result, index=self.ynames, columns=self.ynames)
def attach_rows(self, result):
# assumes if len(row_labels) > len(result) it's bc it was truncated
# at the front, for AR lags, for example
if result.squeeze().ndim == 1:
return Series(result, index=self.row_labels[-len(result):])
else: # this is for VAR results, may not be general enough
return DataFrame(result, index=self.row_labels[-len(result):],
columns=self.ynames)
def attach_dates(self, result):
return TimeSeries(result, index=self.predict_dates)
class TimeSeriesData(ModelData):
"""
Data handling class which returns scikits.timeseries model results
"""
def _get_row_labels(self, arr):
return arr.dates
#def attach_columns(self, result):
# return recarray?
#def attach_cov(self, result):
# return recarray?
def attach_rows(self, result):
return time_series(result, dates = self.row_labels[-len(result):])
def attach_dates(self, result):
return time_series(result, dates = self.predict_dates)
_la = None
def _lazy_import_larry():
global _la
import la
_la = la
class LarryData(ModelData):
"""
Data handling class which knows how to reattach pandas metadata to model
results
"""
def __init__(self, endog, exog=None, **kwds):
_lazy_import_larry()
super(LarryData, self).__init__(endog, exog=exog, **kwds)
def _get_yarr(self, endog):
try:
return endog.x
except AttributeError:
return np.asarray(endog).squeeze()
def _get_xarr(self, exog):
try:
return exog.x
except AttributeError:
return np.asarray(exog)
def _get_names(self, exog):
try:
return exog.label[1]
except Exception:
pass
return None
def _get_row_labels(self, arr):
return arr.label[0]
def attach_columns(self, result):
if result.ndim == 1:
return _la.larry(result, [self.xnames])
else:
shape = results.shape
return _la.larray(result, [self.xnames, range(shape[1])])
def attach_columns_eq(self, result):
return _la.larray(result, [self.xnames], [self.xnames])
def attach_cov(self, result):
return _la.larry(result, [self.xnames], [self.xnames])
def attach_cov_eq(self, result):
return _la.larray(result, [self.ynames], [self.ynames])
def attach_rows(self, result):
return _la.larry(result, [self.row_labels[-len(result):]])
def attach_dates(self, result):
return _la.larray(result, [self.predict_dates])
def _is_structured_array(data):
return isinstance(data, np.ndarray) and data.dtype.names is not None
def _make_endog_names(endog):
if endog.ndim == 1 or endog.shape[1] == 1:
ynames = ['y']
else: # for VAR
ynames = ['y%d' % (i+1) for i in range(endog.shape[1])]
return ynames
def _make_exog_names(exog):
exog_var = exog.var(0)
if (exog_var == 0).any():
# assumes one constant in first or last position
# avoid exception if more than one constant
const_idx = exog_var.argmin()
if const_idx == exog.shape[1] - 1:
exog_names = ['x%d' % i for i in range(1,exog.shape[1])]
exog_names += ['const']
else:
exog_names = ['x%d' % i for i in range(exog.shape[1])]
exog_names[const_idx] = 'const'
else:
exog_names = ['x%d' % i for i in range(exog.shape[1])]
return exog_names
def handle_data(endog, exog):
"""
Given inputs
"""
if _is_using_pandas(endog, exog):
klass = PandasData
elif _is_using_larry(endog, exog):
klass = LarryData
elif _is_using_timeseries(endog, exog):
klass = TimeSeriesData
# keep this check last
elif _is_using_ndarray(endog, exog):
klass = ModelData
else:
raise ValueError('unrecognized data structures: %s / %s' %
(type(endog), type(exog)))
return klass(endog, exog=exog)
def _is_using_ndarray(endog, exog):
return (isinstance(endog, np.ndarray) and
(isinstance(exog, np.ndarray) or exog is None))
def _is_using_pandas(endog, exog):
from pandas import Series, DataFrame, WidePanel
klasses = (Series, DataFrame, WidePanel)
return (isinstance(endog, klasses) or isinstance(exog, klasses))
def _is_using_larry(endog, exog):
try:
import la
return isinstance(endog, la.larry) or isinstance(exog, la.larry)
except ImportError:
return False
def _is_using_timeseries(endog, exog):
from scikits.timeseries import TimeSeries as tsTimeSeries
return isinstance(endog, tsTimeSeries) or isinstance(exog, tsTimeSeries)
|
g0v/nowin_core | refs/heads/master | nowin_core/source/command.py | 2 | from twisted.protocols.basic import LineReceiver
from nowin_core.source import channel
class CommandReceiver(channel.ChannelReceiver):
def __init__(self):
self.lineReceiver = LineReceiver()
self.lineReceiver.lineReceived = self.lineReceived
def lineReceived(self, line):
cmd, data = line.split(':', 1)
self.commandReceived(cmd.strip(), data.strip())
def commandReceived(self, cmd, data):
raise NotImplementedError
def sendCommand(self, cmd, data):
return self.send(self.cmd_channel, '%s: %s\r\n' % (cmd, data))
def channeReceived(self, channel, type, data):
if channel == self.cmd_channel:
self.lineReceiver.dataReceived(data)
|
dongweiming/wechat-admin | refs/heads/master | wechat/celeryconfig.py | 1 | from config import REDIS_URL
BROKER_URL = REDIS_URL
CELERY_RESULT_BACKEND = REDIS_URL
CELERY_TASK_SERIALIZER = 'msgpack'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TASK_RESULT_EXPIRES = 60 * 60 * 24
CELERY_ACCEPT_CONTENT = ['json', 'msgpack']
|
tinkerthaler/odoo | refs/heads/8.0 | addons/auth_signup/res_users.py | 90 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from datetime import datetime, timedelta
import random
from urlparse import urljoin
import werkzeug
from openerp.addons.base.ir.ir_mail_server import MailDeliveryException
from openerp.osv import osv, fields
from openerp.tools.misc import DEFAULT_SERVER_DATETIME_FORMAT, ustr
from ast import literal_eval
from openerp.tools.translate import _
class SignupError(Exception):
pass
def random_token():
# the token has an entropy of about 120 bits (6 bits/char * 20 chars)
chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
return ''.join(random.choice(chars) for i in xrange(20))
def now(**kwargs):
dt = datetime.now() + timedelta(**kwargs)
return dt.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
class res_partner(osv.Model):
_inherit = 'res.partner'
def _get_signup_valid(self, cr, uid, ids, name, arg, context=None):
dt = now()
res = {}
for partner in self.browse(cr, uid, ids, context):
res[partner.id] = bool(partner.signup_token) and \
(not partner.signup_expiration or dt <= partner.signup_expiration)
return res
def _get_signup_url_for_action(self, cr, uid, ids, action=None, view_type=None, menu_id=None, res_id=None, model=None, context=None):
""" generate a signup url for the given partner ids and action, possibly overriding
the url state components (menu_id, id, view_type) """
if context is None:
context= {}
res = dict.fromkeys(ids, False)
base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url')
for partner in self.browse(cr, uid, ids, context):
# when required, make sure the partner has a valid signup token
if context.get('signup_valid') and not partner.user_ids:
self.signup_prepare(cr, uid, [partner.id], context=context)
route = 'login'
# the parameters to encode for the query
query = dict(db=cr.dbname)
signup_type = context.get('signup_force_type_in_url', partner.signup_type or '')
if signup_type:
route = 'reset_password' if signup_type == 'reset' else signup_type
if partner.signup_token and signup_type:
query['token'] = partner.signup_token
elif partner.user_ids:
query['login'] = partner.user_ids[0].login
else:
continue # no signup token, no user, thus no signup url!
fragment = dict()
if action:
fragment['action'] = action
if view_type:
fragment['view_type'] = view_type
if menu_id:
fragment['menu_id'] = menu_id
if model:
fragment['model'] = model
if res_id:
fragment['id'] = res_id
if fragment:
query['redirect'] = '/web#' + werkzeug.url_encode(fragment)
res[partner.id] = urljoin(base_url, "/web/%s?%s" % (route, werkzeug.url_encode(query)))
return res
def _get_signup_url(self, cr, uid, ids, name, arg, context=None):
""" proxy for function field towards actual implementation """
return self._get_signup_url_for_action(cr, uid, ids, context=context)
_columns = {
'signup_token': fields.char('Signup Token', copy=False),
'signup_type': fields.char('Signup Token Type', copy=False),
'signup_expiration': fields.datetime('Signup Expiration', copy=False),
'signup_valid': fields.function(_get_signup_valid, type='boolean', string='Signup Token is Valid'),
'signup_url': fields.function(_get_signup_url, type='char', string='Signup URL'),
}
def action_signup_prepare(self, cr, uid, ids, context=None):
return self.signup_prepare(cr, uid, ids, context=context)
def signup_cancel(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'signup_token': False, 'signup_type': False, 'signup_expiration': False}, context=context)
def signup_prepare(self, cr, uid, ids, signup_type="signup", expiration=False, context=None):
""" generate a new token for the partners with the given validity, if necessary
:param expiration: the expiration datetime of the token (string, optional)
"""
for partner in self.browse(cr, uid, ids, context):
if expiration or not partner.signup_valid:
token = random_token()
while self._signup_retrieve_partner(cr, uid, token, context=context):
token = random_token()
partner.write({'signup_token': token, 'signup_type': signup_type, 'signup_expiration': expiration})
return True
def _signup_retrieve_partner(self, cr, uid, token,
check_validity=False, raise_exception=False, context=None):
""" find the partner corresponding to a token, and possibly check its validity
:param token: the token to resolve
:param check_validity: if True, also check validity
:param raise_exception: if True, raise exception instead of returning False
:return: partner (browse record) or False (if raise_exception is False)
"""
partner_ids = self.search(cr, uid, [('signup_token', '=', token)], context=context)
if not partner_ids:
if raise_exception:
raise SignupError("Signup token '%s' is not valid" % token)
return False
partner = self.browse(cr, uid, partner_ids[0], context)
if check_validity and not partner.signup_valid:
if raise_exception:
raise SignupError("Signup token '%s' is no longer valid" % token)
return False
return partner
def signup_retrieve_info(self, cr, uid, token, context=None):
""" retrieve the user info about the token
:return: a dictionary with the user information:
- 'db': the name of the database
- 'token': the token, if token is valid
- 'name': the name of the partner, if token is valid
- 'login': the user login, if the user already exists
- 'email': the partner email, if the user does not exist
"""
partner = self._signup_retrieve_partner(cr, uid, token, raise_exception=True, context=None)
res = {'db': cr.dbname}
if partner.signup_valid:
res['token'] = token
res['name'] = partner.name
if partner.user_ids:
res['login'] = partner.user_ids[0].login
else:
res['email'] = partner.email or ''
return res
class res_users(osv.Model):
_inherit = 'res.users'
def _get_state(self, cr, uid, ids, name, arg, context=None):
res = {}
for user in self.browse(cr, uid, ids, context):
res[user.id] = ('active' if user.login_date else 'new')
return res
_columns = {
'state': fields.function(_get_state, string='Status', type='selection',
selection=[('new', 'Never Connected'), ('active', 'Activated')]),
}
def signup(self, cr, uid, values, token=None, context=None):
""" signup a user, to either:
- create a new user (no token), or
- create a user for a partner (with token, but no user for partner), or
- change the password of a user (with token, and existing user).
:param values: a dictionary with field values that are written on user
:param token: signup token (optional)
:return: (dbname, login, password) for the signed up user
"""
if token:
# signup with a token: find the corresponding partner id
res_partner = self.pool.get('res.partner')
partner = res_partner._signup_retrieve_partner(
cr, uid, token, check_validity=True, raise_exception=True, context=None)
# invalidate signup token
partner.write({'signup_token': False, 'signup_type': False, 'signup_expiration': False})
partner_user = partner.user_ids and partner.user_ids[0] or False
# avoid overwriting existing (presumably correct) values with geolocation data
if partner.country_id or partner.zip or partner.city:
values.pop('city', None)
values.pop('country_id', None)
if partner.lang:
values.pop('lang', None)
if partner_user:
# user exists, modify it according to values
values.pop('login', None)
values.pop('name', None)
partner_user.write(values)
return (cr.dbname, partner_user.login, values.get('password'))
else:
# user does not exist: sign up invited user
values.update({
'name': partner.name,
'partner_id': partner.id,
'email': values.get('email') or values.get('login'),
})
if partner.company_id:
values['company_id'] = partner.company_id.id
values['company_ids'] = [(6, 0, [partner.company_id.id])]
self._signup_create_user(cr, uid, values, context=context)
else:
# no token, sign up an external user
values['email'] = values.get('email') or values.get('login')
self._signup_create_user(cr, uid, values, context=context)
return (cr.dbname, values.get('login'), values.get('password'))
def _signup_create_user(self, cr, uid, values, context=None):
""" create a new user from the template user """
ir_config_parameter = self.pool.get('ir.config_parameter')
template_user_id = literal_eval(ir_config_parameter.get_param(cr, uid, 'auth_signup.template_user_id', 'False'))
assert template_user_id and self.exists(cr, uid, template_user_id, context=context), 'Signup: invalid template user'
# check that uninvited users may sign up
if 'partner_id' not in values:
if not literal_eval(ir_config_parameter.get_param(cr, uid, 'auth_signup.allow_uninvited', 'False')):
raise SignupError('Signup is not allowed for uninvited users')
assert values.get('login'), "Signup: no login given for new user"
assert values.get('partner_id') or values.get('name'), "Signup: no name or partner given for new user"
# create a copy of the template user (attached to a specific partner_id if given)
values['active'] = True
context = dict(context or {}, no_reset_password=True)
try:
with cr.savepoint():
return self.copy(cr, uid, template_user_id, values, context=context)
except Exception, e:
# copy may failed if asked login is not available.
raise SignupError(ustr(e))
def reset_password(self, cr, uid, login, context=None):
""" retrieve the user corresponding to login (login or email),
and reset their password
"""
user_ids = self.search(cr, uid, [('login', '=', login)], context=context)
if not user_ids:
user_ids = self.search(cr, uid, [('email', '=', login)], context=context)
if len(user_ids) != 1:
raise Exception('Reset password: invalid username or email')
return self.action_reset_password(cr, uid, user_ids, context=context)
def action_reset_password(self, cr, uid, ids, context=None):
""" create signup token for each user, and send their signup url by email """
# prepare reset password signup
res_partner = self.pool.get('res.partner')
partner_ids = [user.partner_id.id for user in self.browse(cr, uid, ids, context)]
res_partner.signup_prepare(cr, uid, partner_ids, signup_type="reset", expiration=now(days=+1), context=context)
if not context:
context = {}
# send email to users with their signup url
template = False
if context.get('create_user'):
try:
# get_object() raises ValueError if record does not exist
template = self.pool.get('ir.model.data').get_object(cr, uid, 'auth_signup', 'set_password_email')
except ValueError:
pass
if not bool(template):
template = self.pool.get('ir.model.data').get_object(cr, uid, 'auth_signup', 'reset_password_email')
assert template._name == 'email.template'
for user in self.browse(cr, uid, ids, context):
if not user.email:
raise osv.except_osv(_("Cannot send email: user has no email address."), user.name)
self.pool.get('email.template').send_mail(cr, uid, template.id, user.id, force_send=True, raise_exception=True, context=context)
def create(self, cr, uid, values, context=None):
if context is None:
context = {}
# overridden to automatically invite user to sign up
user_id = super(res_users, self).create(cr, uid, values, context=context)
user = self.browse(cr, uid, user_id, context=context)
if user.email and not context.get('no_reset_password'):
context = dict(context, create_user=True)
try:
self.action_reset_password(cr, uid, [user.id], context=context)
except MailDeliveryException:
self.pool.get('res.partner').signup_cancel(cr, uid, [user.partner_id.id], context=context)
return user_id
|
iradul/qtwebkit | refs/heads/pjs | Tools/Scripts/webkitpy/port/win_unittest.py | 115 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import StringIO
import unittest2 as unittest
from webkitpy.common.system.executive import ScriptError
from webkitpy.common.system.executive_mock import MockExecutive, MockExecutive2
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.systemhost_mock import MockSystemHost
from webkitpy.port import port_testcase
from webkitpy.port.win import WinPort
from webkitpy.tool.mocktool import MockOptions
class WinPortTest(port_testcase.PortTestCase):
os_name = 'win'
os_version = 'xp'
port_name = 'win-xp'
port_maker = WinPort
def test_show_results_html_file(self):
port = self.make_port()
port._executive = MockExecutive(should_log=True)
capture = OutputCapture()
capture.capture_output()
port.show_results_html_file('test.html')
_, _, logs = capture.restore_output()
# We can't know for sure what path will be produced by cygpath, but we can assert about
# everything else.
self.assertTrue(logs.startswith("MOCK run_command: ['Tools/Scripts/run-safari', '--release', '"))
self.assertTrue(logs.endswith("test.html'], cwd=/mock-checkout\n"))
def _assert_search_path(self, expected_search_paths, version, use_webkit2=False):
port = self.make_port(port_name='win', os_version=version, options=MockOptions(webkit_test_runner=use_webkit2))
absolute_search_paths = map(port._webkit_baseline_path, expected_search_paths)
self.assertEqual(port.baseline_search_path(), absolute_search_paths)
def test_baseline_search_path(self):
self._assert_search_path(['win-xp', 'win-vista', 'win-7sp0', 'win', 'mac-lion', 'mac'], 'xp')
self._assert_search_path(['win-vista', 'win-7sp0', 'win', 'mac-lion', 'mac'], 'vista')
self._assert_search_path(['win-7sp0', 'win', 'mac-lion', 'mac'], '7sp0')
self._assert_search_path(['win-wk2', 'win-xp', 'win-vista', 'win-7sp0', 'win', 'mac-wk2', 'mac-lion', 'mac'], 'xp', use_webkit2=True)
self._assert_search_path(['win-wk2', 'win-vista', 'win-7sp0', 'win', 'mac-wk2', 'mac-lion', 'mac'], 'vista', use_webkit2=True)
self._assert_search_path(['win-wk2', 'win-7sp0', 'win', 'mac-wk2', 'mac-lion', 'mac'], '7sp0', use_webkit2=True)
def _assert_version(self, port_name, expected_version):
host = MockSystemHost(os_name='win', os_version=expected_version)
port = WinPort(host, port_name=port_name)
self.assertEqual(port.version(), expected_version)
def test_versions(self):
self._assert_version('win-xp', 'xp')
self._assert_version('win-vista', 'vista')
self._assert_version('win-7sp0', '7sp0')
self.assertRaises(AssertionError, self._assert_version, 'win-me', 'xp')
def test_compare_text(self):
expected = "EDITING DELEGATE: webViewDidChangeSelection:WebViewDidChangeSelectionNotification\nfoo\nEDITING DELEGATE: webViewDidChangeSelection:WebViewDidChangeSelectionNotification\n"
port = self.make_port()
self.assertFalse(port.do_text_results_differ(expected, "foo\n"))
self.assertTrue(port.do_text_results_differ(expected, "foo"))
self.assertTrue(port.do_text_results_differ(expected, "bar"))
# This hack doesn't exist in WK2.
port._options = MockOptions(webkit_test_runner=True)
self.assertTrue(port.do_text_results_differ(expected, "foo\n"))
def test_operating_system(self):
self.assertEqual('win', self.make_port().operating_system())
def test_runtime_feature_list(self):
port = self.make_port()
port._executive.run_command = lambda command, cwd=None, error_handler=None: "Nonsense"
# runtime_features_list returns None when its results are meaningless (it couldn't run DRT or parse the output, etc.)
self.assertEqual(port._runtime_feature_list(), None)
port._executive.run_command = lambda command, cwd=None, error_handler=None: "SupportedFeatures:foo bar"
self.assertEqual(port._runtime_feature_list(), ['foo', 'bar'])
def test_expectations_files(self):
self.assertEqual(len(self.make_port().expectations_files()), 3)
self.assertEqual(len(self.make_port(options=MockOptions(webkit_test_runner=True, configuration='Release')).expectations_files()), 5)
def test_get_crash_log(self):
# Win crash logs are tested elsewhere, so here we just make sure we don't crash.
def fake_time_cb():
times = [0, 20, 40]
return lambda: times.pop(0)
port = self.make_port(port_name='win')
port._get_crash_log('DumpRenderTree', 1234, '', '', 0,
time_fn=fake_time_cb(), sleep_fn=lambda delay: None)
|
hlange/LogSoCR | refs/heads/master | .waf/waflib/extras/run_r_script.py | 51 | #!/usr/bin/env python
# encoding: utf-8
# Hans-Martin von Gaudecker, 2012
"""
Run a R script in the directory specified by **ctx.bldnode**.
For error-catching purposes, keep an own log-file that is destroyed if the
task finished without error. If not, it will show up as rscript_[index].log
in the bldnode directory.
Usage::
ctx(features='run_r_script',
source='some_script.r',
target=['some_table.tex', 'some_figure.eps'],
deps='some_data.csv')
"""
import os, sys
from waflib import Task, TaskGen, Logs
R_COMMANDS = ['RTerm', 'R', 'r']
def configure(ctx):
ctx.find_program(R_COMMANDS, var='RCMD', errmsg = """\n
No R executable found!\n\n
If R is needed:\n
1) Check the settings of your system path.
2) Note we are looking for R executables called: %s
If yours has a different name, please report to hmgaudecker [at] gmail\n
Else:\n
Do not load the 'run_r_script' tool in the main wscript.\n\n""" % R_COMMANDS)
ctx.env.RFLAGS = 'CMD BATCH --slave'
class run_r_script_base(Task.Task):
"""Run a R script."""
run_str = '"${RCMD}" ${RFLAGS} "${SRC[0].abspath()}" "${LOGFILEPATH}"'
shell = True
class run_r_script(run_r_script_base):
"""Erase the R overall log file if everything went okay, else raise an
error and print its 10 last lines.
"""
def run(self):
ret = run_r_script_base.run(self)
logfile = self.env.LOGFILEPATH
if ret:
mode = 'r'
if sys.version_info.major >= 3:
mode = 'rb'
with open(logfile, mode=mode) as f:
tail = f.readlines()[-10:]
Logs.error("""Running R on %r returned the error %r\n\nCheck the log file %s, last 10 lines\n\n%s\n\n\n""",
self.inputs[0], ret, logfile, '\n'.join(tail))
else:
os.remove(logfile)
return ret
@TaskGen.feature('run_r_script')
@TaskGen.before_method('process_source')
def apply_run_r_script(tg):
"""Task generator customising the options etc. to call R in batch
mode for running a R script.
"""
# Convert sources and targets to nodes
src_node = tg.path.find_resource(tg.source)
tgt_nodes = [tg.path.find_or_declare(t) for t in tg.to_list(tg.target)]
tsk = tg.create_task('run_r_script', src=src_node, tgt=tgt_nodes)
tsk.env.LOGFILEPATH = os.path.join(tg.bld.bldnode.abspath(), '%s_%d.log' % (os.path.splitext(src_node.name)[0], tg.idx))
# dependencies (if the attribute 'deps' changes, trigger a recompilation)
for x in tg.to_list(getattr(tg, 'deps', [])):
node = tg.path.find_resource(x)
if not node:
tg.bld.fatal('Could not find dependency %r for running %r' % (x, src_node.abspath()))
tsk.dep_nodes.append(node)
Logs.debug('deps: found dependencies %r for running %r', tsk.dep_nodes, src_node.abspath())
# Bypass the execution of process_source by setting the source to an empty list
tg.source = []
|
hroncok/freeipa | refs/heads/master | ipatests/test_integration/test_dnssec.py | 5 | #
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license
#
import dns.dnssec
import dns.resolver
import dns.name
import time
from ipatests.test_integration.base import IntegrationTest
from ipatests.test_integration import tasks
from ipaplatform.paths import paths
test_zone = "dnssec.test."
test_zone_repl = "dnssec-replica.test."
root_zone = "."
example_test_zone = "example.test."
def resolve_with_dnssec(nameserver, query, log, rtype="SOA"):
res = dns.resolver.Resolver()
res.nameservers = [nameserver]
res.lifetime = 10 # wait max 10 seconds for reply
# enable Authenticated Data + Checking Disabled flags
res.set_flags(dns.flags.AD | dns.flags.CD)
# enable EDNS v0 + enable DNSSEC-Ok flag
res.use_edns(0, dns.flags.DO, 0)
ans = res.query(query, rtype)
return ans
def is_record_signed(nameserver, query, log, rtype="SOA"):
try:
ans = resolve_with_dnssec(nameserver, query, log, rtype=rtype)
ans.response.find_rrset(ans.response.answer, dns.name.from_text(query),
dns.rdataclass.IN, dns.rdatatype.RRSIG,
dns.rdatatype.from_text(rtype))
except KeyError:
return False
except dns.exception.DNSException:
return False
return True
def wait_until_record_is_signed(nameserver, record, log, rtype="SOA",
timeout=100):
"""
Returns True if record is signed, or False on timeout
:param nameserver: nameserver to query
:param record: query
:param log: logger
:param rtype: record type
:param timeout:
:return: True if records is signed, False if timeout
"""
log.info("Waiting for signed %s record of %s from server %s (timeout %s "
"sec)", rtype, record, nameserver, timeout)
wait_until = time.time() + timeout
while time.time() < wait_until:
if is_record_signed(nameserver, record, log, rtype=rtype):
return True
time.sleep(1)
return False
class TestInstallDNSSECLast(IntegrationTest):
"""Simple DNSSEC test
Install a server and a replica with DNS, then reinstall server
as DNSSEC master
"""
num_replicas = 1
topology = 'star'
@classmethod
def install(cls, mh):
tasks.install_master(cls.master, setup_dns=True)
tasks.install_replica(cls.master, cls.replicas[0], setup_dns=True)
def test_install_dnssec_master(self):
"""Both master and replica have DNS installed"""
args = [
"ipa-dns-install",
"--dnssec-master",
"--forwarder", self.master.config.dns_forwarder,
"-p", self.master.config.dirman_password,
"-U",
]
self.master.run_command(args)
def test_if_zone_is_signed_master(self):
# add zone with enabled DNSSEC signing on master
args = [
"ipa",
"dnszone-add", test_zone,
"--dnssec", "true",
]
self.master.run_command(args)
# test master
assert wait_until_record_is_signed(
self.master.ip, test_zone, self.log, timeout=100
), "Zone %s is not signed (master)" % test_zone
# test replica
assert wait_until_record_is_signed(
self.replicas[0].ip, test_zone, self.log, timeout=200
), "DNS zone %s is not signed (replica)" % test_zone
def test_if_zone_is_signed_replica(self):
# add zone with enabled DNSSEC signing on replica
args = [
"ipa",
"dnszone-add", test_zone_repl,
"--dnssec", "true",
]
self.replicas[0].run_command(args)
# test replica
assert wait_until_record_is_signed(
self.replicas[0].ip, test_zone_repl, self.log, timeout=300
), "Zone %s is not signed (replica)" % test_zone_repl
# we do not need to wait, on master zones should be singed faster
# than on replicas
assert wait_until_record_is_signed(
self.master.ip, test_zone_repl, self.log, timeout=5
), "DNS zone %s is not signed (master)" % test_zone
class TestInstallDNSSECFirst(IntegrationTest):
"""Simple DNSSEC test
Install the server with DNSSEC and then install the replica with DNS
"""
num_replicas = 1
topology = 'star'
@classmethod
def install(cls, mh):
tasks.install_master(cls.master, setup_dns=False)
args = [
"ipa-dns-install",
"--dnssec-master",
"--forwarder", cls.master.config.dns_forwarder,
"-p", cls.master.config.dirman_password,
"-U",
]
cls.master.run_command(args)
tasks.install_replica(cls.master, cls.replicas[0], setup_dns=True)
# backup trusted key
tasks.backup_file(cls.master, paths.DNSSEC_TRUSTED_KEY)
tasks.backup_file(cls.replicas[0], paths.DNSSEC_TRUSTED_KEY)
@classmethod
def uninstall(cls, mh):
# restore trusted key
tasks.restore_files(cls.master)
tasks.restore_files(cls.replicas[0])
super(TestInstallDNSSECFirst, cls).uninstall(mh)
def test_sign_root_zone(self):
args = [
"ipa", "dnszone-add", root_zone, "--dnssec", "true"
]
self.master.run_command(args)
# make BIND happy, and delegate zone which contains A record of master
args = [
"ipa", "dnsrecord-add", root_zone, self.master.domain.name,
"--ns-rec=" + self.master.hostname
]
self.master.run_command(args)
# test master
assert wait_until_record_is_signed(
self.master.ip, root_zone, self.log, timeout=100
), "Zone %s is not signed (master)" % root_zone
# test replica
assert wait_until_record_is_signed(
self.replicas[0].ip, root_zone, self.log, timeout=300
), "Zone %s is not signed (replica)" % root_zone
def test_chain_of_trust(self):
"""
Validate signed DNS records, using our own signed root zone
:return:
"""
# add test zone
args = [
"ipa", "dnszone-add", example_test_zone, "--dnssec", "true"
]
self.master.run_command(args)
# wait until zone is signed
assert wait_until_record_is_signed(
self.master.ip, example_test_zone, self.log, timeout=100
), "Zone %s is not signed (master)" % example_test_zone
# GET DNSKEY records from zone
ans = resolve_with_dnssec(self.master.ip, example_test_zone, self.log,
rtype="DNSKEY")
dnskey_rrset = ans.response.get_rrset(
ans.response.answer,
dns.name.from_text(example_test_zone),
dns.rdataclass.IN,
dns.rdatatype.DNSKEY)
assert dnskey_rrset, "No DNSKEY records received"
self.log.debug("DNSKEY records returned: %s", dnskey_rrset.to_text())
# generate DS records
ds_records = []
for key_rdata in dnskey_rrset:
if key_rdata.flags != 257:
continue # it is not KSK
ds_records.append(dns.dnssec.make_ds(example_test_zone, key_rdata,
'sha256'))
assert ds_records, ("No KSK returned from the %s zone" %
example_test_zone)
self.log.debug("DS records for %s created: %r", example_test_zone,
ds_records)
# add DS records to root zone
args = [
"ipa", "dnsrecord-add", root_zone, example_test_zone,
# DS record requires to coexists with NS
"--ns-rec", self.master.hostname,
]
for ds in ds_records:
args.append("--ds-rec")
args.append(ds.to_text())
self.master.run_command(args)
# extract DSKEY from root zone
ans = resolve_with_dnssec(self.master.ip, root_zone, self.log,
rtype="DNSKEY")
dnskey_rrset = ans.response.get_rrset(ans.response.answer,
dns.name.from_text(root_zone),
dns.rdataclass.IN,
dns.rdatatype.DNSKEY)
assert dnskey_rrset, "No DNSKEY records received"
self.log.debug("DNSKEY records returned: %s", dnskey_rrset.to_text())
# export trust keys for root zone
root_key_rdatas = []
for key_rdata in dnskey_rrset:
if key_rdata.flags != 257:
continue # it is not KSK
root_key_rdatas.append(key_rdata)
assert root_key_rdatas, "No KSK returned from the root zone"
root_keys_rrset = dns.rrset.from_rdata_list(dnskey_rrset.name,
dnskey_rrset.ttl,
root_key_rdatas)
self.log.debug("Root zone trusted key: %s", root_keys_rrset.to_text())
# set trusted key for our root zone
self.master.put_file_contents(paths.DNSSEC_TRUSTED_KEY,
root_keys_rrset.to_text() + '\n')
self.replicas[0].put_file_contents(paths.DNSSEC_TRUSTED_KEY,
root_keys_rrset.to_text() + '\n')
# verify signatures
args = [
"drill", "@localhost", "-k",
paths.DNSSEC_TRUSTED_KEY, "-S",
example_test_zone, "SOA"
]
# test if signature chains are valid
self.master.run_command(args)
self.replicas[0].run_command(args)
|
emakarov/django-parakeet | refs/heads/master | djparakeet/routing.py | 1 | from channels import route
from .consumers import ws_message, ws_connect, ws_disconnect
websocket_routing = [
route("websocket.receive", ws_message),
route("websocket.connect", ws_connect),
route("websocket.disconnect", ws_disconnect),
]
|
mrquim/repository.mrquim | refs/heads/master | plugin.video.netflix/resources/lib/UniversalAnalytics/__init__.py | 19 | import Tracker |
alberto-antonietti/nest-simulator | refs/heads/master | pynest/nest/tests/test_getconnections.py | 3 | # -*- coding: utf-8 -*-
#
# test_getconnections.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
GetConnections
"""
import unittest
import nest
@nest.ll_api.check_stack
class GetConnectionsTestCase(unittest.TestCase):
"""Find connections and test if values can be set."""
def test_GetConnections(self):
"""GetConnections"""
nest.ResetKernel()
a = nest.Create("iaf_psc_alpha", 3)
nest.Connect(a, a)
c1 = nest.GetConnections(a)
c2 = nest.GetConnections(a, synapse_model="static_synapse")
self.assertEqual(c1, c2)
weights = (2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0)
d1 = tuple({"weight": w} for w in weights)
c3 = nest.GetConnections(a, a)
nest.SetStatus(c3, d1)
s1 = nest.GetStatus(c3, "weight")
self.assertEqual(s1, weights)
c4 = nest.GetConnections()
self.assertEqual(c1, c4)
weights = (11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0)
d1 = tuple({"weight": w} for w in weights)
c5 = nest.GetConnections(a, a)
c5.set(d1)
s2 = c5.get('weight')
self.assertEqual(s2, list(weights))
c6 = nest.GetConnections()
self.assertEqual(c1, c6)
def test_GetConnectionsTargetModels(self):
"""GetConnections iterating models for target"""
for model in nest.Models():
alpha = nest.Create('iaf_psc_alpha')
try:
other = nest.Create(model)
nest.Connect(alpha, other)
except nest.kernel.NESTError:
# If we can't create a node with this model, or connect
# to a node of this model, we ignore it.
continue
conns = nest.GetConnections(alpha, other)
self.assertEqual(
len(conns), 1,
'Failed to get connection with target model {}'.format(model))
def test_GetConnectionsSourceModels(self):
"""GetConnections iterating models for source"""
for model in nest.Models():
alpha = nest.Create('iaf_psc_alpha')
try:
other = nest.Create(model)
nest.Connect(other, alpha)
except nest.kernel.NESTError:
# If we can't create a node with this model, or connect
# to a node of this model, we ignore it.
continue
conns = nest.GetConnections(other, alpha)
self.assertEqual(
len(conns), 1,
'Failed to get connection with source model {}'.format(model))
def suite():
suite = unittest.makeSuite(GetConnectionsTestCase, 'test')
return suite
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
|
jhseu/tensorflow | refs/heads/master | tensorflow/python/autograph/pyct/testing/decorators.py | 24 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module with test decorators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
def wrapping_decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
return wrapper
def standalone_decorator(f):
def standalone_wrapper(*args, **kwargs):
return f(*args, **kwargs)
return standalone_wrapper
def functional_decorator():
def decorator(f):
def functional_wrapper(*args, **kwargs):
return f(*args, **kwargs)
return functional_wrapper
return decorator
|
PennartLoettring/Poettrix | refs/heads/master | rootfs/usr/lib/python3.4/xmlrpc/client.py | 69 | #
# XML-RPC CLIENT LIBRARY
# $Id$
#
# an XML-RPC client interface for Python.
#
# the marshalling and response parser code can also be used to
# implement XML-RPC servers.
#
# Notes:
# this version is designed to work with Python 2.1 or newer.
#
# History:
# 1999-01-14 fl Created
# 1999-01-15 fl Changed dateTime to use localtime
# 1999-01-16 fl Added Binary/base64 element, default to RPC2 service
# 1999-01-19 fl Fixed array data element (from Skip Montanaro)
# 1999-01-21 fl Fixed dateTime constructor, etc.
# 1999-02-02 fl Added fault handling, handle empty sequences, etc.
# 1999-02-10 fl Fixed problem with empty responses (from Skip Montanaro)
# 1999-06-20 fl Speed improvements, pluggable parsers/transports (0.9.8)
# 2000-11-28 fl Changed boolean to check the truth value of its argument
# 2001-02-24 fl Added encoding/Unicode/SafeTransport patches
# 2001-02-26 fl Added compare support to wrappers (0.9.9/1.0b1)
# 2001-03-28 fl Make sure response tuple is a singleton
# 2001-03-29 fl Don't require empty params element (from Nicholas Riley)
# 2001-06-10 fl Folded in _xmlrpclib accelerator support (1.0b2)
# 2001-08-20 fl Base xmlrpclib.Error on built-in Exception (from Paul Prescod)
# 2001-09-03 fl Allow Transport subclass to override getparser
# 2001-09-10 fl Lazy import of urllib, cgi, xmllib (20x import speedup)
# 2001-10-01 fl Remove containers from memo cache when done with them
# 2001-10-01 fl Use faster escape method (80% dumps speedup)
# 2001-10-02 fl More dumps microtuning
# 2001-10-04 fl Make sure import expat gets a parser (from Guido van Rossum)
# 2001-10-10 sm Allow long ints to be passed as ints if they don't overflow
# 2001-10-17 sm Test for int and long overflow (allows use on 64-bit systems)
# 2001-11-12 fl Use repr() to marshal doubles (from Paul Felix)
# 2002-03-17 fl Avoid buffered read when possible (from James Rucker)
# 2002-04-07 fl Added pythondoc comments
# 2002-04-16 fl Added __str__ methods to datetime/binary wrappers
# 2002-05-15 fl Added error constants (from Andrew Kuchling)
# 2002-06-27 fl Merged with Python CVS version
# 2002-10-22 fl Added basic authentication (based on code from Phillip Eby)
# 2003-01-22 sm Add support for the bool type
# 2003-02-27 gvr Remove apply calls
# 2003-04-24 sm Use cStringIO if available
# 2003-04-25 ak Add support for nil
# 2003-06-15 gn Add support for time.struct_time
# 2003-07-12 gp Correct marshalling of Faults
# 2003-10-31 mvl Add multicall support
# 2004-08-20 mvl Bump minimum supported Python version to 2.1
#
# Copyright (c) 1999-2002 by Secret Labs AB.
# Copyright (c) 1999-2002 by Fredrik Lundh.
#
# info@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The XML-RPC client interface is
#
# Copyright (c) 1999-2002 by Secret Labs AB
# Copyright (c) 1999-2002 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
"""
An XML-RPC client interface for Python.
The marshalling and response parser code can also be used to
implement XML-RPC servers.
Exported exceptions:
Error Base class for client errors
ProtocolError Indicates an HTTP protocol error
ResponseError Indicates a broken response package
Fault Indicates an XML-RPC fault package
Exported classes:
ServerProxy Represents a logical connection to an XML-RPC server
MultiCall Executor of boxcared xmlrpc requests
DateTime dateTime wrapper for an ISO 8601 string or time tuple or
localtime integer value to generate a "dateTime.iso8601"
XML-RPC value
Binary binary data wrapper
Marshaller Generate an XML-RPC params chunk from a Python data structure
Unmarshaller Unmarshal an XML-RPC response from incoming XML event message
Transport Handles an HTTP transaction to an XML-RPC server
SafeTransport Handles an HTTPS transaction to an XML-RPC server
Exported constants:
(none)
Exported functions:
getparser Create instance of the fastest available parser & attach
to an unmarshalling object
dumps Convert an argument tuple or a Fault instance to an XML-RPC
request (or response, if the methodresponse option is used).
loads Convert an XML-RPC packet to unmarshalled data plus a method
name (None if not present).
"""
import base64
import sys
import time
from datetime import datetime
import http.client
import urllib.parse
from xml.parsers import expat
import errno
from io import BytesIO
try:
import gzip
except ImportError:
gzip = None #python can be built without zlib/gzip support
# --------------------------------------------------------------------
# Internal stuff
def escape(s):
s = s.replace("&", "&")
s = s.replace("<", "<")
return s.replace(">", ">",)
# used in User-Agent header sent
__version__ = sys.version[:3]
# xmlrpc integer limits
MAXINT = 2**31-1
MININT = -2**31
# --------------------------------------------------------------------
# Error constants (from Dan Libby's specification at
# http://xmlrpc-epi.sourceforge.net/specs/rfc.fault_codes.php)
# Ranges of errors
PARSE_ERROR = -32700
SERVER_ERROR = -32600
APPLICATION_ERROR = -32500
SYSTEM_ERROR = -32400
TRANSPORT_ERROR = -32300
# Specific errors
NOT_WELLFORMED_ERROR = -32700
UNSUPPORTED_ENCODING = -32701
INVALID_ENCODING_CHAR = -32702
INVALID_XMLRPC = -32600
METHOD_NOT_FOUND = -32601
INVALID_METHOD_PARAMS = -32602
INTERNAL_ERROR = -32603
# --------------------------------------------------------------------
# Exceptions
##
# Base class for all kinds of client-side errors.
class Error(Exception):
"""Base class for client errors."""
def __str__(self):
return repr(self)
##
# Indicates an HTTP-level protocol error. This is raised by the HTTP
# transport layer, if the server returns an error code other than 200
# (OK).
#
# @param url The target URL.
# @param errcode The HTTP error code.
# @param errmsg The HTTP error message.
# @param headers The HTTP header dictionary.
class ProtocolError(Error):
"""Indicates an HTTP protocol error."""
def __init__(self, url, errcode, errmsg, headers):
Error.__init__(self)
self.url = url
self.errcode = errcode
self.errmsg = errmsg
self.headers = headers
def __repr__(self):
return (
"<ProtocolError for %s: %s %s>" %
(self.url, self.errcode, self.errmsg)
)
##
# Indicates a broken XML-RPC response package. This exception is
# raised by the unmarshalling layer, if the XML-RPC response is
# malformed.
class ResponseError(Error):
"""Indicates a broken response package."""
pass
##
# Indicates an XML-RPC fault response package. This exception is
# raised by the unmarshalling layer, if the XML-RPC response contains
# a fault string. This exception can also be used as a class, to
# generate a fault XML-RPC message.
#
# @param faultCode The XML-RPC fault code.
# @param faultString The XML-RPC fault string.
class Fault(Error):
"""Indicates an XML-RPC fault package."""
def __init__(self, faultCode, faultString, **extra):
Error.__init__(self)
self.faultCode = faultCode
self.faultString = faultString
def __repr__(self):
return "<Fault %s: %r>" % (self.faultCode, self.faultString)
# --------------------------------------------------------------------
# Special values
##
# Backwards compatibility
boolean = Boolean = bool
##
# Wrapper for XML-RPC DateTime values. This converts a time value to
# the format used by XML-RPC.
# <p>
# The value can be given as a datetime object, as a string in the
# format "yyyymmddThh:mm:ss", as a 9-item time tuple (as returned by
# time.localtime()), or an integer value (as returned by time.time()).
# The wrapper uses time.localtime() to convert an integer to a time
# tuple.
#
# @param value The time, given as a datetime object, an ISO 8601 string,
# a time tuple, or an integer time value.
# Issue #13305: different format codes across platforms
_day0 = datetime(1, 1, 1)
if _day0.strftime('%Y') == '0001': # Mac OS X
def _iso8601_format(value):
return value.strftime("%Y%m%dT%H:%M:%S")
elif _day0.strftime('%4Y') == '0001': # Linux
def _iso8601_format(value):
return value.strftime("%4Y%m%dT%H:%M:%S")
else:
def _iso8601_format(value):
return value.strftime("%Y%m%dT%H:%M:%S").zfill(17)
del _day0
def _strftime(value):
if isinstance(value, datetime):
return _iso8601_format(value)
if not isinstance(value, (tuple, time.struct_time)):
if value == 0:
value = time.time()
value = time.localtime(value)
return "%04d%02d%02dT%02d:%02d:%02d" % value[:6]
class DateTime:
"""DateTime wrapper for an ISO 8601 string or time tuple or
localtime integer value to generate 'dateTime.iso8601' XML-RPC
value.
"""
def __init__(self, value=0):
if isinstance(value, str):
self.value = value
else:
self.value = _strftime(value)
def make_comparable(self, other):
if isinstance(other, DateTime):
s = self.value
o = other.value
elif isinstance(other, datetime):
s = self.value
o = _iso8601_format(other)
elif isinstance(other, str):
s = self.value
o = other
elif hasattr(other, "timetuple"):
s = self.timetuple()
o = other.timetuple()
else:
otype = (hasattr(other, "__class__")
and other.__class__.__name__
or type(other))
raise TypeError("Can't compare %s and %s" %
(self.__class__.__name__, otype))
return s, o
def __lt__(self, other):
s, o = self.make_comparable(other)
return s < o
def __le__(self, other):
s, o = self.make_comparable(other)
return s <= o
def __gt__(self, other):
s, o = self.make_comparable(other)
return s > o
def __ge__(self, other):
s, o = self.make_comparable(other)
return s >= o
def __eq__(self, other):
s, o = self.make_comparable(other)
return s == o
def __ne__(self, other):
s, o = self.make_comparable(other)
return s != o
def timetuple(self):
return time.strptime(self.value, "%Y%m%dT%H:%M:%S")
##
# Get date/time value.
#
# @return Date/time value, as an ISO 8601 string.
def __str__(self):
return self.value
def __repr__(self):
return "<DateTime %r at %x>" % (self.value, id(self))
def decode(self, data):
self.value = str(data).strip()
def encode(self, out):
out.write("<value><dateTime.iso8601>")
out.write(self.value)
out.write("</dateTime.iso8601></value>\n")
def _datetime(data):
# decode xml element contents into a DateTime structure.
value = DateTime()
value.decode(data)
return value
def _datetime_type(data):
return datetime.strptime(data, "%Y%m%dT%H:%M:%S")
##
# Wrapper for binary data. This can be used to transport any kind
# of binary data over XML-RPC, using BASE64 encoding.
#
# @param data An 8-bit string containing arbitrary data.
class Binary:
"""Wrapper for binary data."""
def __init__(self, data=None):
if data is None:
data = b""
else:
if not isinstance(data, (bytes, bytearray)):
raise TypeError("expected bytes or bytearray, not %s" %
data.__class__.__name__)
data = bytes(data) # Make a copy of the bytes!
self.data = data
##
# Get buffer contents.
#
# @return Buffer contents, as an 8-bit string.
def __str__(self):
return str(self.data, "latin-1") # XXX encoding?!
def __eq__(self, other):
if isinstance(other, Binary):
other = other.data
return self.data == other
def __ne__(self, other):
if isinstance(other, Binary):
other = other.data
return self.data != other
def decode(self, data):
self.data = base64.decodebytes(data)
def encode(self, out):
out.write("<value><base64>\n")
encoded = base64.encodebytes(self.data)
out.write(encoded.decode('ascii'))
out.write("</base64></value>\n")
def _binary(data):
# decode xml element contents into a Binary structure
value = Binary()
value.decode(data)
return value
WRAPPERS = (DateTime, Binary)
# --------------------------------------------------------------------
# XML parsers
class ExpatParser:
# fast expat parser for Python 2.0 and later.
def __init__(self, target):
self._parser = parser = expat.ParserCreate(None, None)
self._target = target
parser.StartElementHandler = target.start
parser.EndElementHandler = target.end
parser.CharacterDataHandler = target.data
encoding = None
target.xml(encoding, None)
def feed(self, data):
self._parser.Parse(data, 0)
def close(self):
self._parser.Parse("", 1) # end of data
del self._target, self._parser # get rid of circular references
# --------------------------------------------------------------------
# XML-RPC marshalling and unmarshalling code
##
# XML-RPC marshaller.
#
# @param encoding Default encoding for 8-bit strings. The default
# value is None (interpreted as UTF-8).
# @see dumps
class Marshaller:
"""Generate an XML-RPC params chunk from a Python data structure.
Create a Marshaller instance for each set of parameters, and use
the "dumps" method to convert your data (represented as a tuple)
to an XML-RPC params chunk. To write a fault response, pass a
Fault instance instead. You may prefer to use the "dumps" module
function for this purpose.
"""
# by the way, if you don't understand what's going on in here,
# that's perfectly ok.
def __init__(self, encoding=None, allow_none=False):
self.memo = {}
self.data = None
self.encoding = encoding
self.allow_none = allow_none
dispatch = {}
def dumps(self, values):
out = []
write = out.append
dump = self.__dump
if isinstance(values, Fault):
# fault instance
write("<fault>\n")
dump({'faultCode': values.faultCode,
'faultString': values.faultString},
write)
write("</fault>\n")
else:
# parameter block
# FIXME: the xml-rpc specification allows us to leave out
# the entire <params> block if there are no parameters.
# however, changing this may break older code (including
# old versions of xmlrpclib.py), so this is better left as
# is for now. See @XMLRPC3 for more information. /F
write("<params>\n")
for v in values:
write("<param>\n")
dump(v, write)
write("</param>\n")
write("</params>\n")
result = "".join(out)
return result
def __dump(self, value, write):
try:
f = self.dispatch[type(value)]
except KeyError:
# check if this object can be marshalled as a structure
if not hasattr(value, '__dict__'):
raise TypeError("cannot marshal %s objects" % type(value))
# check if this class is a sub-class of a basic type,
# because we don't know how to marshal these types
# (e.g. a string sub-class)
for type_ in type(value).__mro__:
if type_ in self.dispatch.keys():
raise TypeError("cannot marshal %s objects" % type(value))
# XXX(twouters): using "_arbitrary_instance" as key as a quick-fix
# for the p3yk merge, this should probably be fixed more neatly.
f = self.dispatch["_arbitrary_instance"]
f(self, value, write)
def dump_nil (self, value, write):
if not self.allow_none:
raise TypeError("cannot marshal None unless allow_none is enabled")
write("<value><nil/></value>")
dispatch[type(None)] = dump_nil
def dump_bool(self, value, write):
write("<value><boolean>")
write(value and "1" or "0")
write("</boolean></value>\n")
dispatch[bool] = dump_bool
def dump_long(self, value, write):
if value > MAXINT or value < MININT:
raise OverflowError("int exceeds XML-RPC limits")
write("<value><int>")
write(str(int(value)))
write("</int></value>\n")
dispatch[int] = dump_long
# backward compatible
dump_int = dump_long
def dump_double(self, value, write):
write("<value><double>")
write(repr(value))
write("</double></value>\n")
dispatch[float] = dump_double
def dump_unicode(self, value, write, escape=escape):
write("<value><string>")
write(escape(value))
write("</string></value>\n")
dispatch[str] = dump_unicode
def dump_bytes(self, value, write):
write("<value><base64>\n")
encoded = base64.encodebytes(value)
write(encoded.decode('ascii'))
write("</base64></value>\n")
dispatch[bytes] = dump_bytes
dispatch[bytearray] = dump_bytes
def dump_array(self, value, write):
i = id(value)
if i in self.memo:
raise TypeError("cannot marshal recursive sequences")
self.memo[i] = None
dump = self.__dump
write("<value><array><data>\n")
for v in value:
dump(v, write)
write("</data></array></value>\n")
del self.memo[i]
dispatch[tuple] = dump_array
dispatch[list] = dump_array
def dump_struct(self, value, write, escape=escape):
i = id(value)
if i in self.memo:
raise TypeError("cannot marshal recursive dictionaries")
self.memo[i] = None
dump = self.__dump
write("<value><struct>\n")
for k, v in value.items():
write("<member>\n")
if not isinstance(k, str):
raise TypeError("dictionary key must be string")
write("<name>%s</name>\n" % escape(k))
dump(v, write)
write("</member>\n")
write("</struct></value>\n")
del self.memo[i]
dispatch[dict] = dump_struct
def dump_datetime(self, value, write):
write("<value><dateTime.iso8601>")
write(_strftime(value))
write("</dateTime.iso8601></value>\n")
dispatch[datetime] = dump_datetime
def dump_instance(self, value, write):
# check for special wrappers
if value.__class__ in WRAPPERS:
self.write = write
value.encode(self)
del self.write
else:
# store instance attributes as a struct (really?)
self.dump_struct(value.__dict__, write)
dispatch[DateTime] = dump_instance
dispatch[Binary] = dump_instance
# XXX(twouters): using "_arbitrary_instance" as key as a quick-fix
# for the p3yk merge, this should probably be fixed more neatly.
dispatch["_arbitrary_instance"] = dump_instance
##
# XML-RPC unmarshaller.
#
# @see loads
class Unmarshaller:
"""Unmarshal an XML-RPC response, based on incoming XML event
messages (start, data, end). Call close() to get the resulting
data structure.
Note that this reader is fairly tolerant, and gladly accepts bogus
XML-RPC data without complaining (but not bogus XML).
"""
# and again, if you don't understand what's going on in here,
# that's perfectly ok.
def __init__(self, use_datetime=False, use_builtin_types=False):
self._type = None
self._stack = []
self._marks = []
self._data = []
self._methodname = None
self._encoding = "utf-8"
self.append = self._stack.append
self._use_datetime = use_builtin_types or use_datetime
self._use_bytes = use_builtin_types
def close(self):
# return response tuple and target method
if self._type is None or self._marks:
raise ResponseError()
if self._type == "fault":
raise Fault(**self._stack[0])
return tuple(self._stack)
def getmethodname(self):
return self._methodname
#
# event handlers
def xml(self, encoding, standalone):
self._encoding = encoding
# FIXME: assert standalone == 1 ???
def start(self, tag, attrs):
# prepare to handle this element
if tag == "array" or tag == "struct":
self._marks.append(len(self._stack))
self._data = []
self._value = (tag == "value")
def data(self, text):
self._data.append(text)
def end(self, tag):
# call the appropriate end tag handler
try:
f = self.dispatch[tag]
except KeyError:
pass # unknown tag ?
else:
return f(self, "".join(self._data))
#
# accelerator support
def end_dispatch(self, tag, data):
# dispatch data
try:
f = self.dispatch[tag]
except KeyError:
pass # unknown tag ?
else:
return f(self, data)
#
# element decoders
dispatch = {}
def end_nil (self, data):
self.append(None)
self._value = 0
dispatch["nil"] = end_nil
def end_boolean(self, data):
if data == "0":
self.append(False)
elif data == "1":
self.append(True)
else:
raise TypeError("bad boolean value")
self._value = 0
dispatch["boolean"] = end_boolean
def end_int(self, data):
self.append(int(data))
self._value = 0
dispatch["i4"] = end_int
dispatch["i8"] = end_int
dispatch["int"] = end_int
def end_double(self, data):
self.append(float(data))
self._value = 0
dispatch["double"] = end_double
def end_string(self, data):
if self._encoding:
data = data.decode(self._encoding)
self.append(data)
self._value = 0
dispatch["string"] = end_string
dispatch["name"] = end_string # struct keys are always strings
def end_array(self, data):
mark = self._marks.pop()
# map arrays to Python lists
self._stack[mark:] = [self._stack[mark:]]
self._value = 0
dispatch["array"] = end_array
def end_struct(self, data):
mark = self._marks.pop()
# map structs to Python dictionaries
dict = {}
items = self._stack[mark:]
for i in range(0, len(items), 2):
dict[items[i]] = items[i+1]
self._stack[mark:] = [dict]
self._value = 0
dispatch["struct"] = end_struct
def end_base64(self, data):
value = Binary()
value.decode(data.encode("ascii"))
if self._use_bytes:
value = value.data
self.append(value)
self._value = 0
dispatch["base64"] = end_base64
def end_dateTime(self, data):
value = DateTime()
value.decode(data)
if self._use_datetime:
value = _datetime_type(data)
self.append(value)
dispatch["dateTime.iso8601"] = end_dateTime
def end_value(self, data):
# if we stumble upon a value element with no internal
# elements, treat it as a string element
if self._value:
self.end_string(data)
dispatch["value"] = end_value
def end_params(self, data):
self._type = "params"
dispatch["params"] = end_params
def end_fault(self, data):
self._type = "fault"
dispatch["fault"] = end_fault
def end_methodName(self, data):
if self._encoding:
data = data.decode(self._encoding)
self._methodname = data
self._type = "methodName" # no params
dispatch["methodName"] = end_methodName
## Multicall support
#
class _MultiCallMethod:
# some lesser magic to store calls made to a MultiCall object
# for batch execution
def __init__(self, call_list, name):
self.__call_list = call_list
self.__name = name
def __getattr__(self, name):
return _MultiCallMethod(self.__call_list, "%s.%s" % (self.__name, name))
def __call__(self, *args):
self.__call_list.append((self.__name, args))
class MultiCallIterator:
"""Iterates over the results of a multicall. Exceptions are
raised in response to xmlrpc faults."""
def __init__(self, results):
self.results = results
def __getitem__(self, i):
item = self.results[i]
if type(item) == type({}):
raise Fault(item['faultCode'], item['faultString'])
elif type(item) == type([]):
return item[0]
else:
raise ValueError("unexpected type in multicall result")
class MultiCall:
"""server -> a object used to boxcar method calls
server should be a ServerProxy object.
Methods can be added to the MultiCall using normal
method call syntax e.g.:
multicall = MultiCall(server_proxy)
multicall.add(2,3)
multicall.get_address("Guido")
To execute the multicall, call the MultiCall object e.g.:
add_result, address = multicall()
"""
def __init__(self, server):
self.__server = server
self.__call_list = []
def __repr__(self):
return "<MultiCall at %x>" % id(self)
__str__ = __repr__
def __getattr__(self, name):
return _MultiCallMethod(self.__call_list, name)
def __call__(self):
marshalled_list = []
for name, args in self.__call_list:
marshalled_list.append({'methodName' : name, 'params' : args})
return MultiCallIterator(self.__server.system.multicall(marshalled_list))
# --------------------------------------------------------------------
# convenience functions
FastMarshaller = FastParser = FastUnmarshaller = None
##
# Create a parser object, and connect it to an unmarshalling instance.
# This function picks the fastest available XML parser.
#
# return A (parser, unmarshaller) tuple.
def getparser(use_datetime=False, use_builtin_types=False):
"""getparser() -> parser, unmarshaller
Create an instance of the fastest available parser, and attach it
to an unmarshalling object. Return both objects.
"""
if FastParser and FastUnmarshaller:
if use_builtin_types:
mkdatetime = _datetime_type
mkbytes = base64.decodebytes
elif use_datetime:
mkdatetime = _datetime_type
mkbytes = _binary
else:
mkdatetime = _datetime
mkbytes = _binary
target = FastUnmarshaller(True, False, mkbytes, mkdatetime, Fault)
parser = FastParser(target)
else:
target = Unmarshaller(use_datetime=use_datetime, use_builtin_types=use_builtin_types)
if FastParser:
parser = FastParser(target)
else:
parser = ExpatParser(target)
return parser, target
##
# Convert a Python tuple or a Fault instance to an XML-RPC packet.
#
# @def dumps(params, **options)
# @param params A tuple or Fault instance.
# @keyparam methodname If given, create a methodCall request for
# this method name.
# @keyparam methodresponse If given, create a methodResponse packet.
# If used with a tuple, the tuple must be a singleton (that is,
# it must contain exactly one element).
# @keyparam encoding The packet encoding.
# @return A string containing marshalled data.
def dumps(params, methodname=None, methodresponse=None, encoding=None,
allow_none=False):
"""data [,options] -> marshalled data
Convert an argument tuple or a Fault instance to an XML-RPC
request (or response, if the methodresponse option is used).
In addition to the data object, the following options can be given
as keyword arguments:
methodname: the method name for a methodCall packet
methodresponse: true to create a methodResponse packet.
If this option is used with a tuple, the tuple must be
a singleton (i.e. it can contain only one element).
encoding: the packet encoding (default is UTF-8)
All byte strings in the data structure are assumed to use the
packet encoding. Unicode strings are automatically converted,
where necessary.
"""
assert isinstance(params, (tuple, Fault)), "argument must be tuple or Fault instance"
if isinstance(params, Fault):
methodresponse = 1
elif methodresponse and isinstance(params, tuple):
assert len(params) == 1, "response tuple must be a singleton"
if not encoding:
encoding = "utf-8"
if FastMarshaller:
m = FastMarshaller(encoding)
else:
m = Marshaller(encoding, allow_none)
data = m.dumps(params)
if encoding != "utf-8":
xmlheader = "<?xml version='1.0' encoding='%s'?>\n" % str(encoding)
else:
xmlheader = "<?xml version='1.0'?>\n" # utf-8 is default
# standard XML-RPC wrappings
if methodname:
# a method call
if not isinstance(methodname, str):
methodname = methodname.encode(encoding)
data = (
xmlheader,
"<methodCall>\n"
"<methodName>", methodname, "</methodName>\n",
data,
"</methodCall>\n"
)
elif methodresponse:
# a method response, or a fault structure
data = (
xmlheader,
"<methodResponse>\n",
data,
"</methodResponse>\n"
)
else:
return data # return as is
return "".join(data)
##
# Convert an XML-RPC packet to a Python object. If the XML-RPC packet
# represents a fault condition, this function raises a Fault exception.
#
# @param data An XML-RPC packet, given as an 8-bit string.
# @return A tuple containing the unpacked data, and the method name
# (None if not present).
# @see Fault
def loads(data, use_datetime=False, use_builtin_types=False):
"""data -> unmarshalled data, method name
Convert an XML-RPC packet to unmarshalled data plus a method
name (None if not present).
If the XML-RPC packet represents a fault condition, this function
raises a Fault exception.
"""
p, u = getparser(use_datetime=use_datetime, use_builtin_types=use_builtin_types)
p.feed(data)
p.close()
return u.close(), u.getmethodname()
##
# Encode a string using the gzip content encoding such as specified by the
# Content-Encoding: gzip
# in the HTTP header, as described in RFC 1952
#
# @param data the unencoded data
# @return the encoded data
def gzip_encode(data):
"""data -> gzip encoded data
Encode data using the gzip content encoding as described in RFC 1952
"""
if not gzip:
raise NotImplementedError
f = BytesIO()
gzf = gzip.GzipFile(mode="wb", fileobj=f, compresslevel=1)
gzf.write(data)
gzf.close()
encoded = f.getvalue()
f.close()
return encoded
##
# Decode a string using the gzip content encoding such as specified by the
# Content-Encoding: gzip
# in the HTTP header, as described in RFC 1952
#
# @param data The encoded data
# @return the unencoded data
# @raises ValueError if data is not correctly coded.
def gzip_decode(data):
"""gzip encoded data -> unencoded data
Decode data using the gzip content encoding as described in RFC 1952
"""
if not gzip:
raise NotImplementedError
f = BytesIO(data)
gzf = gzip.GzipFile(mode="rb", fileobj=f)
try:
decoded = gzf.read()
except OSError:
raise ValueError("invalid data")
f.close()
gzf.close()
return decoded
##
# Return a decoded file-like object for the gzip encoding
# as described in RFC 1952.
#
# @param response A stream supporting a read() method
# @return a file-like object that the decoded data can be read() from
class GzipDecodedResponse(gzip.GzipFile if gzip else object):
"""a file-like object to decode a response encoded with the gzip
method, as described in RFC 1952.
"""
def __init__(self, response):
#response doesn't support tell() and read(), required by
#GzipFile
if not gzip:
raise NotImplementedError
self.io = BytesIO(response.read())
gzip.GzipFile.__init__(self, mode="rb", fileobj=self.io)
def close(self):
gzip.GzipFile.close(self)
self.io.close()
# --------------------------------------------------------------------
# request dispatcher
class _Method:
# some magic to bind an XML-RPC method to an RPC server.
# supports "nested" methods (e.g. examples.getStateName)
def __init__(self, send, name):
self.__send = send
self.__name = name
def __getattr__(self, name):
return _Method(self.__send, "%s.%s" % (self.__name, name))
def __call__(self, *args):
return self.__send(self.__name, args)
##
# Standard transport class for XML-RPC over HTTP.
# <p>
# You can create custom transports by subclassing this method, and
# overriding selected methods.
class Transport:
"""Handles an HTTP transaction to an XML-RPC server."""
# client identifier (may be overridden)
user_agent = "Python-xmlrpc/%s" % __version__
#if true, we'll request gzip encoding
accept_gzip_encoding = True
# if positive, encode request using gzip if it exceeds this threshold
# note that many server will get confused, so only use it if you know
# that they can decode such a request
encode_threshold = None #None = don't encode
def __init__(self, use_datetime=False, use_builtin_types=False):
self._use_datetime = use_datetime
self._use_builtin_types = use_builtin_types
self._connection = (None, None)
self._extra_headers = []
##
# Send a complete request, and parse the response.
# Retry request if a cached connection has disconnected.
#
# @param host Target host.
# @param handler Target PRC handler.
# @param request_body XML-RPC request body.
# @param verbose Debugging flag.
# @return Parsed response.
def request(self, host, handler, request_body, verbose=False):
#retry request once if cached connection has gone cold
for i in (0, 1):
try:
return self.single_request(host, handler, request_body, verbose)
except OSError as e:
if i or e.errno not in (errno.ECONNRESET, errno.ECONNABORTED,
errno.EPIPE):
raise
except http.client.BadStatusLine: #close after we sent request
if i:
raise
def single_request(self, host, handler, request_body, verbose=False):
# issue XML-RPC request
try:
http_conn = self.send_request(host, handler, request_body, verbose)
resp = http_conn.getresponse()
if resp.status == 200:
self.verbose = verbose
return self.parse_response(resp)
except Fault:
raise
except Exception:
#All unexpected errors leave connection in
# a strange state, so we clear it.
self.close()
raise
#We got an error response.
#Discard any response data and raise exception
if resp.getheader("content-length", ""):
resp.read()
raise ProtocolError(
host + handler,
resp.status, resp.reason,
dict(resp.getheaders())
)
##
# Create parser.
#
# @return A 2-tuple containing a parser and a unmarshaller.
def getparser(self):
# get parser and unmarshaller
return getparser(use_datetime=self._use_datetime,
use_builtin_types=self._use_builtin_types)
##
# Get authorization info from host parameter
# Host may be a string, or a (host, x509-dict) tuple; if a string,
# it is checked for a "user:pw@host" format, and a "Basic
# Authentication" header is added if appropriate.
#
# @param host Host descriptor (URL or (URL, x509 info) tuple).
# @return A 3-tuple containing (actual host, extra headers,
# x509 info). The header and x509 fields may be None.
def get_host_info(self, host):
x509 = {}
if isinstance(host, tuple):
host, x509 = host
auth, host = urllib.parse.splituser(host)
if auth:
auth = urllib.parse.unquote_to_bytes(auth)
auth = base64.encodebytes(auth).decode("utf-8")
auth = "".join(auth.split()) # get rid of whitespace
extra_headers = [
("Authorization", "Basic " + auth)
]
else:
extra_headers = []
return host, extra_headers, x509
##
# Connect to server.
#
# @param host Target host.
# @return An HTTPConnection object
def make_connection(self, host):
#return an existing connection if possible. This allows
#HTTP/1.1 keep-alive.
if self._connection and host == self._connection[0]:
return self._connection[1]
# create a HTTP connection object from a host descriptor
chost, self._extra_headers, x509 = self.get_host_info(host)
self._connection = host, http.client.HTTPConnection(chost)
return self._connection[1]
##
# Clear any cached connection object.
# Used in the event of socket errors.
#
def close(self):
if self._connection[1]:
self._connection[1].close()
self._connection = (None, None)
##
# Send HTTP request.
#
# @param host Host descriptor (URL or (URL, x509 info) tuple).
# @param handler Targer RPC handler (a path relative to host)
# @param request_body The XML-RPC request body
# @param debug Enable debugging if debug is true.
# @return An HTTPConnection.
def send_request(self, host, handler, request_body, debug):
connection = self.make_connection(host)
headers = self._extra_headers[:]
if debug:
connection.set_debuglevel(1)
if self.accept_gzip_encoding and gzip:
connection.putrequest("POST", handler, skip_accept_encoding=True)
headers.append(("Accept-Encoding", "gzip"))
else:
connection.putrequest("POST", handler)
headers.append(("Content-Type", "text/xml"))
headers.append(("User-Agent", self.user_agent))
self.send_headers(connection, headers)
self.send_content(connection, request_body)
return connection
##
# Send request headers.
# This function provides a useful hook for subclassing
#
# @param connection httpConnection.
# @param headers list of key,value pairs for HTTP headers
def send_headers(self, connection, headers):
for key, val in headers:
connection.putheader(key, val)
##
# Send request body.
# This function provides a useful hook for subclassing
#
# @param connection httpConnection.
# @param request_body XML-RPC request body.
def send_content(self, connection, request_body):
#optionally encode the request
if (self.encode_threshold is not None and
self.encode_threshold < len(request_body) and
gzip):
connection.putheader("Content-Encoding", "gzip")
request_body = gzip_encode(request_body)
connection.putheader("Content-Length", str(len(request_body)))
connection.endheaders(request_body)
##
# Parse response.
#
# @param file Stream.
# @return Response tuple and target method.
def parse_response(self, response):
# read response data from httpresponse, and parse it
# Check for new http response object, otherwise it is a file object.
if hasattr(response, 'getheader'):
if response.getheader("Content-Encoding", "") == "gzip":
stream = GzipDecodedResponse(response)
else:
stream = response
else:
stream = response
p, u = self.getparser()
while 1:
data = stream.read(1024)
if not data:
break
if self.verbose:
print("body:", repr(data))
p.feed(data)
if stream is not response:
stream.close()
p.close()
return u.close()
##
# Standard transport class for XML-RPC over HTTPS.
class SafeTransport(Transport):
"""Handles an HTTPS transaction to an XML-RPC server."""
# FIXME: mostly untested
def make_connection(self, host):
if self._connection and host == self._connection[0]:
return self._connection[1]
if not hasattr(http.client, "HTTPSConnection"):
raise NotImplementedError(
"your version of http.client doesn't support HTTPS")
# create a HTTPS connection object from a host descriptor
# host may be a string, or a (host, x509-dict) tuple
chost, self._extra_headers, x509 = self.get_host_info(host)
self._connection = host, http.client.HTTPSConnection(chost,
None, **(x509 or {}))
return self._connection[1]
##
# Standard server proxy. This class establishes a virtual connection
# to an XML-RPC server.
# <p>
# This class is available as ServerProxy and Server. New code should
# use ServerProxy, to avoid confusion.
#
# @def ServerProxy(uri, **options)
# @param uri The connection point on the server.
# @keyparam transport A transport factory, compatible with the
# standard transport class.
# @keyparam encoding The default encoding used for 8-bit strings
# (default is UTF-8).
# @keyparam verbose Use a true value to enable debugging output.
# (printed to standard output).
# @see Transport
class ServerProxy:
"""uri [,options] -> a logical connection to an XML-RPC server
uri is the connection point on the server, given as
scheme://host/target.
The standard implementation always supports the "http" scheme. If
SSL socket support is available (Python 2.0), it also supports
"https".
If the target part and the slash preceding it are both omitted,
"/RPC2" is assumed.
The following options can be given as keyword arguments:
transport: a transport factory
encoding: the request encoding (default is UTF-8)
All 8-bit strings passed to the server proxy are assumed to use
the given encoding.
"""
def __init__(self, uri, transport=None, encoding=None, verbose=False,
allow_none=False, use_datetime=False, use_builtin_types=False):
# establish a "logical" server connection
# get the url
type, uri = urllib.parse.splittype(uri)
if type not in ("http", "https"):
raise OSError("unsupported XML-RPC protocol")
self.__host, self.__handler = urllib.parse.splithost(uri)
if not self.__handler:
self.__handler = "/RPC2"
if transport is None:
if type == "https":
handler = SafeTransport
else:
handler = Transport
transport = handler(use_datetime=use_datetime,
use_builtin_types=use_builtin_types)
self.__transport = transport
self.__encoding = encoding or 'utf-8'
self.__verbose = verbose
self.__allow_none = allow_none
def __close(self):
self.__transport.close()
def __request(self, methodname, params):
# call a method on the remote server
request = dumps(params, methodname, encoding=self.__encoding,
allow_none=self.__allow_none).encode(self.__encoding)
response = self.__transport.request(
self.__host,
self.__handler,
request,
verbose=self.__verbose
)
if len(response) == 1:
response = response[0]
return response
def __repr__(self):
return (
"<ServerProxy for %s%s>" %
(self.__host, self.__handler)
)
__str__ = __repr__
def __getattr__(self, name):
# magic method dispatcher
return _Method(self.__request, name)
# note: to call a remote object with an non-standard name, use
# result getattr(server, "strange-python-name")(args)
def __call__(self, attr):
"""A workaround to get special attributes on the ServerProxy
without interfering with the magic __getattr__
"""
if attr == "close":
return self.__close
elif attr == "transport":
return self.__transport
raise AttributeError("Attribute %r not found" % (attr,))
# compatibility
Server = ServerProxy
# --------------------------------------------------------------------
# test code
if __name__ == "__main__":
# simple test program (from the XML-RPC specification)
# local server, available from Lib/xmlrpc/server.py
server = ServerProxy("http://localhost:8000")
try:
print(server.currentTime.getCurrentTime())
except Error as v:
print("ERROR", v)
multi = MultiCall(server)
multi.getData()
multi.pow(2,9)
multi.add(1,2)
try:
for response in multi():
print(response)
except Error as v:
print("ERROR", v)
|
laiqiqi886/kbengine | refs/heads/master | kbe/src/lib/python/Lib/email/charset.py | 90 | # Copyright (C) 2001-2007 Python Software Foundation
# Author: Ben Gertzfield, Barry Warsaw
# Contact: email-sig@python.org
__all__ = [
'Charset',
'add_alias',
'add_charset',
'add_codec',
]
from functools import partial
import email.base64mime
import email.quoprimime
from email import errors
from email.encoders import encode_7or8bit
# Flags for types of header encodings
QP = 1 # Quoted-Printable
BASE64 = 2 # Base64
SHORTEST = 3 # the shorter of QP and base64, but only for headers
# In "=?charset?q?hello_world?=", the =?, ?q?, and ?= add up to 7
RFC2047_CHROME_LEN = 7
DEFAULT_CHARSET = 'us-ascii'
UNKNOWN8BIT = 'unknown-8bit'
EMPTYSTRING = ''
# Defaults
CHARSETS = {
# input header enc body enc output conv
'iso-8859-1': (QP, QP, None),
'iso-8859-2': (QP, QP, None),
'iso-8859-3': (QP, QP, None),
'iso-8859-4': (QP, QP, None),
# iso-8859-5 is Cyrillic, and not especially used
# iso-8859-6 is Arabic, also not particularly used
# iso-8859-7 is Greek, QP will not make it readable
# iso-8859-8 is Hebrew, QP will not make it readable
'iso-8859-9': (QP, QP, None),
'iso-8859-10': (QP, QP, None),
# iso-8859-11 is Thai, QP will not make it readable
'iso-8859-13': (QP, QP, None),
'iso-8859-14': (QP, QP, None),
'iso-8859-15': (QP, QP, None),
'iso-8859-16': (QP, QP, None),
'windows-1252':(QP, QP, None),
'viscii': (QP, QP, None),
'us-ascii': (None, None, None),
'big5': (BASE64, BASE64, None),
'gb2312': (BASE64, BASE64, None),
'euc-jp': (BASE64, None, 'iso-2022-jp'),
'shift_jis': (BASE64, None, 'iso-2022-jp'),
'iso-2022-jp': (BASE64, None, None),
'koi8-r': (BASE64, BASE64, None),
'utf-8': (SHORTEST, BASE64, 'utf-8'),
}
# Aliases for other commonly-used names for character sets. Map
# them to the real ones used in email.
ALIASES = {
'latin_1': 'iso-8859-1',
'latin-1': 'iso-8859-1',
'latin_2': 'iso-8859-2',
'latin-2': 'iso-8859-2',
'latin_3': 'iso-8859-3',
'latin-3': 'iso-8859-3',
'latin_4': 'iso-8859-4',
'latin-4': 'iso-8859-4',
'latin_5': 'iso-8859-9',
'latin-5': 'iso-8859-9',
'latin_6': 'iso-8859-10',
'latin-6': 'iso-8859-10',
'latin_7': 'iso-8859-13',
'latin-7': 'iso-8859-13',
'latin_8': 'iso-8859-14',
'latin-8': 'iso-8859-14',
'latin_9': 'iso-8859-15',
'latin-9': 'iso-8859-15',
'latin_10':'iso-8859-16',
'latin-10':'iso-8859-16',
'cp949': 'ks_c_5601-1987',
'euc_jp': 'euc-jp',
'euc_kr': 'euc-kr',
'ascii': 'us-ascii',
}
# Map charsets to their Unicode codec strings.
CODEC_MAP = {
'gb2312': 'eucgb2312_cn',
'big5': 'big5_tw',
# Hack: We don't want *any* conversion for stuff marked us-ascii, as all
# sorts of garbage might be sent to us in the guise of 7-bit us-ascii.
# Let that stuff pass through without conversion to/from Unicode.
'us-ascii': None,
}
# Convenience functions for extending the above mappings
def add_charset(charset, header_enc=None, body_enc=None, output_charset=None):
"""Add character set properties to the global registry.
charset is the input character set, and must be the canonical name of a
character set.
Optional header_enc and body_enc is either Charset.QP for
quoted-printable, Charset.BASE64 for base64 encoding, Charset.SHORTEST for
the shortest of qp or base64 encoding, or None for no encoding. SHORTEST
is only valid for header_enc. It describes how message headers and
message bodies in the input charset are to be encoded. Default is no
encoding.
Optional output_charset is the character set that the output should be
in. Conversions will proceed from input charset, to Unicode, to the
output charset when the method Charset.convert() is called. The default
is to output in the same character set as the input.
Both input_charset and output_charset must have Unicode codec entries in
the module's charset-to-codec mapping; use add_codec(charset, codecname)
to add codecs the module does not know about. See the codecs module's
documentation for more information.
"""
if body_enc == SHORTEST:
raise ValueError('SHORTEST not allowed for body_enc')
CHARSETS[charset] = (header_enc, body_enc, output_charset)
def add_alias(alias, canonical):
"""Add a character set alias.
alias is the alias name, e.g. latin-1
canonical is the character set's canonical name, e.g. iso-8859-1
"""
ALIASES[alias] = canonical
def add_codec(charset, codecname):
"""Add a codec that map characters in the given charset to/from Unicode.
charset is the canonical name of a character set. codecname is the name
of a Python codec, as appropriate for the second argument to the unicode()
built-in, or to the encode() method of a Unicode string.
"""
CODEC_MAP[charset] = codecname
# Convenience function for encoding strings, taking into account
# that they might be unknown-8bit (ie: have surrogate-escaped bytes)
def _encode(string, codec):
if codec == UNKNOWN8BIT:
return string.encode('ascii', 'surrogateescape')
else:
return string.encode(codec)
class Charset:
"""Map character sets to their email properties.
This class provides information about the requirements imposed on email
for a specific character set. It also provides convenience routines for
converting between character sets, given the availability of the
applicable codecs. Given a character set, it will do its best to provide
information on how to use that character set in an email in an
RFC-compliant way.
Certain character sets must be encoded with quoted-printable or base64
when used in email headers or bodies. Certain character sets must be
converted outright, and are not allowed in email. Instances of this
module expose the following information about a character set:
input_charset: The initial character set specified. Common aliases
are converted to their `official' email names (e.g. latin_1
is converted to iso-8859-1). Defaults to 7-bit us-ascii.
header_encoding: If the character set must be encoded before it can be
used in an email header, this attribute will be set to
Charset.QP (for quoted-printable), Charset.BASE64 (for
base64 encoding), or Charset.SHORTEST for the shortest of
QP or BASE64 encoding. Otherwise, it will be None.
body_encoding: Same as header_encoding, but describes the encoding for the
mail message's body, which indeed may be different than the
header encoding. Charset.SHORTEST is not allowed for
body_encoding.
output_charset: Some character sets must be converted before they can be
used in email headers or bodies. If the input_charset is
one of them, this attribute will contain the name of the
charset output will be converted to. Otherwise, it will
be None.
input_codec: The name of the Python codec used to convert the
input_charset to Unicode. If no conversion codec is
necessary, this attribute will be None.
output_codec: The name of the Python codec used to convert Unicode
to the output_charset. If no conversion codec is necessary,
this attribute will have the same value as the input_codec.
"""
def __init__(self, input_charset=DEFAULT_CHARSET):
# RFC 2046, $4.1.2 says charsets are not case sensitive. We coerce to
# unicode because its .lower() is locale insensitive. If the argument
# is already a unicode, we leave it at that, but ensure that the
# charset is ASCII, as the standard (RFC XXX) requires.
try:
if isinstance(input_charset, str):
input_charset.encode('ascii')
else:
input_charset = str(input_charset, 'ascii')
except UnicodeError:
raise errors.CharsetError(input_charset)
input_charset = input_charset.lower()
# Set the input charset after filtering through the aliases
self.input_charset = ALIASES.get(input_charset, input_charset)
# We can try to guess which encoding and conversion to use by the
# charset_map dictionary. Try that first, but let the user override
# it.
henc, benc, conv = CHARSETS.get(self.input_charset,
(SHORTEST, BASE64, None))
if not conv:
conv = self.input_charset
# Set the attributes, allowing the arguments to override the default.
self.header_encoding = henc
self.body_encoding = benc
self.output_charset = ALIASES.get(conv, conv)
# Now set the codecs. If one isn't defined for input_charset,
# guess and try a Unicode codec with the same name as input_codec.
self.input_codec = CODEC_MAP.get(self.input_charset,
self.input_charset)
self.output_codec = CODEC_MAP.get(self.output_charset,
self.output_charset)
def __str__(self):
return self.input_charset.lower()
__repr__ = __str__
def __eq__(self, other):
return str(self) == str(other).lower()
def __ne__(self, other):
return not self.__eq__(other)
def get_body_encoding(self):
"""Return the content-transfer-encoding used for body encoding.
This is either the string `quoted-printable' or `base64' depending on
the encoding used, or it is a function in which case you should call
the function with a single argument, the Message object being
encoded. The function should then set the Content-Transfer-Encoding
header itself to whatever is appropriate.
Returns "quoted-printable" if self.body_encoding is QP.
Returns "base64" if self.body_encoding is BASE64.
Returns conversion function otherwise.
"""
assert self.body_encoding != SHORTEST
if self.body_encoding == QP:
return 'quoted-printable'
elif self.body_encoding == BASE64:
return 'base64'
else:
return encode_7or8bit
def get_output_charset(self):
"""Return the output character set.
This is self.output_charset if that is not None, otherwise it is
self.input_charset.
"""
return self.output_charset or self.input_charset
def header_encode(self, string):
"""Header-encode a string by converting it first to bytes.
The type of encoding (base64 or quoted-printable) will be based on
this charset's `header_encoding`.
:param string: A unicode string for the header. It must be possible
to encode this string to bytes using the character set's
output codec.
:return: The encoded string, with RFC 2047 chrome.
"""
codec = self.output_codec or 'us-ascii'
header_bytes = _encode(string, codec)
# 7bit/8bit encodings return the string unchanged (modulo conversions)
encoder_module = self._get_encoder(header_bytes)
if encoder_module is None:
return string
return encoder_module.header_encode(header_bytes, codec)
def header_encode_lines(self, string, maxlengths):
"""Header-encode a string by converting it first to bytes.
This is similar to `header_encode()` except that the string is fit
into maximum line lengths as given by the argument.
:param string: A unicode string for the header. It must be possible
to encode this string to bytes using the character set's
output codec.
:param maxlengths: Maximum line length iterator. Each element
returned from this iterator will provide the next maximum line
length. This parameter is used as an argument to built-in next()
and should never be exhausted. The maximum line lengths should
not count the RFC 2047 chrome. These line lengths are only a
hint; the splitter does the best it can.
:return: Lines of encoded strings, each with RFC 2047 chrome.
"""
# See which encoding we should use.
codec = self.output_codec or 'us-ascii'
header_bytes = _encode(string, codec)
encoder_module = self._get_encoder(header_bytes)
encoder = partial(encoder_module.header_encode, charset=codec)
# Calculate the number of characters that the RFC 2047 chrome will
# contribute to each line.
charset = self.get_output_charset()
extra = len(charset) + RFC2047_CHROME_LEN
# Now comes the hard part. We must encode bytes but we can't split on
# bytes because some character sets are variable length and each
# encoded word must stand on its own. So the problem is you have to
# encode to bytes to figure out this word's length, but you must split
# on characters. This causes two problems: first, we don't know how
# many octets a specific substring of unicode characters will get
# encoded to, and second, we don't know how many ASCII characters
# those octets will get encoded to. Unless we try it. Which seems
# inefficient. In the interest of being correct rather than fast (and
# in the hope that there will be few encoded headers in any such
# message), brute force it. :(
lines = []
current_line = []
maxlen = next(maxlengths) - extra
for character in string:
current_line.append(character)
this_line = EMPTYSTRING.join(current_line)
length = encoder_module.header_length(_encode(this_line, charset))
if length > maxlen:
# This last character doesn't fit so pop it off.
current_line.pop()
# Does nothing fit on the first line?
if not lines and not current_line:
lines.append(None)
else:
separator = (' ' if lines else '')
joined_line = EMPTYSTRING.join(current_line)
header_bytes = _encode(joined_line, codec)
lines.append(encoder(header_bytes))
current_line = [character]
maxlen = next(maxlengths) - extra
joined_line = EMPTYSTRING.join(current_line)
header_bytes = _encode(joined_line, codec)
lines.append(encoder(header_bytes))
return lines
def _get_encoder(self, header_bytes):
if self.header_encoding == BASE64:
return email.base64mime
elif self.header_encoding == QP:
return email.quoprimime
elif self.header_encoding == SHORTEST:
len64 = email.base64mime.header_length(header_bytes)
lenqp = email.quoprimime.header_length(header_bytes)
if len64 < lenqp:
return email.base64mime
else:
return email.quoprimime
else:
return None
def body_encode(self, string):
"""Body-encode a string by converting it first to bytes.
The type of encoding (base64 or quoted-printable) will be based on
self.body_encoding. If body_encoding is None, we assume the
output charset is a 7bit encoding, so re-encoding the decoded
string using the ascii codec produces the correct string version
of the content.
"""
if not string:
return string
if self.body_encoding is BASE64:
if isinstance(string, str):
string = string.encode(self.output_charset)
return email.base64mime.body_encode(string)
elif self.body_encoding is QP:
# quopromime.body_encode takes a string, but operates on it as if
# it were a list of byte codes. For a (minimal) history on why
# this is so, see changeset 0cf700464177. To correctly encode a
# character set, then, we must turn it into pseudo bytes via the
# latin1 charset, which will encode any byte as a single code point
# between 0 and 255, which is what body_encode is expecting.
if isinstance(string, str):
string = string.encode(self.output_charset)
string = string.decode('latin1')
return email.quoprimime.body_encode(string)
else:
if isinstance(string, str):
string = string.encode(self.output_charset).decode('ascii')
return string
|
GraFiddle/grafiddle | refs/heads/master | server/lib/werkzeug/contrib/iterio.py | 318 | # -*- coding: utf-8 -*-
r"""
werkzeug.contrib.iterio
~~~~~~~~~~~~~~~~~~~~~~~
This module implements a :class:`IterIO` that converts an iterator into
a stream object and the other way round. Converting streams into
iterators requires the `greenlet`_ module.
To convert an iterator into a stream all you have to do is to pass it
directly to the :class:`IterIO` constructor. In this example we pass it
a newly created generator::
def foo():
yield "something\n"
yield "otherthings"
stream = IterIO(foo())
print stream.read() # read the whole iterator
The other way round works a bit different because we have to ensure that
the code execution doesn't take place yet. An :class:`IterIO` call with a
callable as first argument does two things. The function itself is passed
an :class:`IterIO` stream it can feed. The object returned by the
:class:`IterIO` constructor on the other hand is not an stream object but
an iterator::
def foo(stream):
stream.write("some")
stream.write("thing")
stream.flush()
stream.write("otherthing")
iterator = IterIO(foo)
print iterator.next() # prints something
print iterator.next() # prints otherthing
iterator.next() # raises StopIteration
.. _greenlet: http://codespeak.net/py/dist/greenlet.html
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
try:
import greenlet
except ImportError:
greenlet = None
from werkzeug._compat import implements_iterator
def _mixed_join(iterable, sentinel):
"""concatenate any string type in an intelligent way."""
iterator = iter(iterable)
first_item = next(iterator, sentinel)
if isinstance(first_item, bytes):
return first_item + b''.join(iterator)
return first_item + u''.join(iterator)
def _newline(reference_string):
if isinstance(reference_string, bytes):
return b'\n'
return u'\n'
@implements_iterator
class IterIO(object):
"""Instances of this object implement an interface compatible with the
standard Python :class:`file` object. Streams are either read-only or
write-only depending on how the object is created.
If the first argument is an iterable a file like object is returned that
returns the contents of the iterable. In case the iterable is empty
read operations will return the sentinel value.
If the first argument is a callable then the stream object will be
created and passed to that function. The caller itself however will
not receive a stream but an iterable. The function will be be executed
step by step as something iterates over the returned iterable. Each
call to :meth:`flush` will create an item for the iterable. If
:meth:`flush` is called without any writes in-between the sentinel
value will be yielded.
Note for Python 3: due to the incompatible interface of bytes and
streams you should set the sentinel value explicitly to an empty
bytestring (``b''``) if you are expecting to deal with bytes as
otherwise the end of the stream is marked with the wrong sentinel
value.
.. versionadded:: 0.9
`sentinel` parameter was added.
"""
def __new__(cls, obj, sentinel=''):
try:
iterator = iter(obj)
except TypeError:
return IterI(obj, sentinel)
return IterO(iterator, sentinel)
def __iter__(self):
return self
def tell(self):
if self.closed:
raise ValueError('I/O operation on closed file')
return self.pos
def isatty(self):
if self.closed:
raise ValueError('I/O operation on closed file')
return False
def seek(self, pos, mode=0):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def truncate(self, size=None):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def write(self, s):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def writelines(self, list):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def read(self, n=-1):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def readlines(self, sizehint=0):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def readline(self, length=None):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def flush(self):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def __next__(self):
if self.closed:
raise StopIteration()
line = self.readline()
if not line:
raise StopIteration()
return line
class IterI(IterIO):
"""Convert an stream into an iterator."""
def __new__(cls, func, sentinel=''):
if greenlet is None:
raise RuntimeError('IterI requires greenlet support')
stream = object.__new__(cls)
stream._parent = greenlet.getcurrent()
stream._buffer = []
stream.closed = False
stream.sentinel = sentinel
stream.pos = 0
def run():
func(stream)
stream.close()
g = greenlet.greenlet(run, stream._parent)
while 1:
rv = g.switch()
if not rv:
return
yield rv[0]
def close(self):
if not self.closed:
self.closed = True
self._flush_impl()
def write(self, s):
if self.closed:
raise ValueError('I/O operation on closed file')
if s:
self.pos += len(s)
self._buffer.append(s)
def writelines(self, list):
for item in list:
self.write(item)
def flush(self):
if self.closed:
raise ValueError('I/O operation on closed file')
self._flush_impl()
def _flush_impl(self):
data = _mixed_join(self._buffer, self.sentinel)
self._buffer = []
if not data and self.closed:
self._parent.switch()
else:
self._parent.switch((data,))
class IterO(IterIO):
"""Iter output. Wrap an iterator and give it a stream like interface."""
def __new__(cls, gen, sentinel=''):
self = object.__new__(cls)
self._gen = gen
self._buf = None
self.sentinel = sentinel
self.closed = False
self.pos = 0
return self
def __iter__(self):
return self
def _buf_append(self, string):
'''Replace string directly without appending to an empty string,
avoiding type issues.'''
if not self._buf:
self._buf = string
else:
self._buf += string
def close(self):
if not self.closed:
self.closed = True
if hasattr(self._gen, 'close'):
self._gen.close()
def seek(self, pos, mode=0):
if self.closed:
raise ValueError('I/O operation on closed file')
if mode == 1:
pos += self.pos
elif mode == 2:
self.read()
self.pos = min(self.pos, self.pos + pos)
return
elif mode != 0:
raise IOError('Invalid argument')
buf = []
try:
tmp_end_pos = len(self._buf)
while pos > tmp_end_pos:
item = self._gen.next()
tmp_end_pos += len(item)
buf.append(item)
except StopIteration:
pass
if buf:
self._buf_append(_mixed_join(buf, self.sentinel))
self.pos = max(0, pos)
def read(self, n=-1):
if self.closed:
raise ValueError('I/O operation on closed file')
if n < 0:
self._buf_append(_mixed_join(self._gen, self.sentinel))
result = self._buf[self.pos:]
self.pos += len(result)
return result
new_pos = self.pos + n
buf = []
try:
tmp_end_pos = 0 if self._buf is None else len(self._buf)
while new_pos > tmp_end_pos or (self._buf is None and not buf):
item = next(self._gen)
tmp_end_pos += len(item)
buf.append(item)
except StopIteration:
pass
if buf:
self._buf_append(_mixed_join(buf, self.sentinel))
if self._buf is None:
return self.sentinel
new_pos = max(0, new_pos)
try:
return self._buf[self.pos:new_pos]
finally:
self.pos = min(new_pos, len(self._buf))
def readline(self, length=None):
if self.closed:
raise ValueError('I/O operation on closed file')
nl_pos = -1
if self._buf:
nl_pos = self._buf.find(_newline(self._buf), self.pos)
buf = []
try:
pos = self.pos
while nl_pos < 0:
item = next(self._gen)
local_pos = item.find(_newline(item))
buf.append(item)
if local_pos >= 0:
nl_pos = pos + local_pos
break
pos += len(item)
except StopIteration:
pass
if buf:
self._buf_append(_mixed_join(buf, self.sentinel))
if self._buf is None:
return self.sentinel
if nl_pos < 0:
new_pos = len(self._buf)
else:
new_pos = nl_pos + 1
if length is not None and self.pos + length < new_pos:
new_pos = self.pos + length
try:
return self._buf[self.pos:new_pos]
finally:
self.pos = min(new_pos, len(self._buf))
def readlines(self, sizehint=0):
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
|
schlueter/ansible | refs/heads/devel | lib/ansible/modules/notification/office_365_connector_card.py | 50 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Marc Sensenich <hello@marc-sensenich.com>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
module: office_365_connector_card
short_description: Use webhooks to create Connector Card messages within an Office 365 group
description:
- Creates Connector Card messages through
- Office 365 Connectors U(https://dev.outlook.com/Connectors)
version_added: "2.4"
author: "Marc Sensenich"
notes:
- This module is not idempotent, therefore if the same task is run twice
- there will be two Connector Cards created
options:
webhook:
description:
- The webhook URL is given to you when you create a new Connector.
required: true
summary:
description:
- A string used for summarizing card content.
- This will be shown as the message subject.
- This is required if the text parameter isn't populated.
color:
description:
- Accent color used for branding or indicating status in the card.
title:
description:
- A title for the Connector message. Shown at the top of the message.
text:
description:
- The main text of the card.
- This will be rendered below the sender information and optional title,
- and above any sections or actions present.
actions:
description:
- This array of objects will power the action links
- found at the bottom of the card.
sections:
description:
- Contains a list of sections to display in the card.
- For more information see https://dev.outlook.com/Connectors/reference.
"""
EXAMPLES = """
- name: Create a simple Connector Card
office_365_connector_card:
webhook: https://outlook.office.com/webhook/GUID/IncomingWebhook/GUID/GUID
text: 'Hello, World!'
- name: Create a Connector Card with the full format
office_365_connector_card:
webhook: https://outlook.office.com/webhook/GUID/IncomingWebhook/GUID/GUID
summary: This is the summary property
title: This is the **card's title** property
text: This is the **card's text** property. Lorem ipsum dolor sit amet, consectetur
adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
color: E81123
sections:
- title: This is the **section's title** property
activity_image: http://connectorsdemo.azurewebsites.net/images/MSC12_Oscar_002.jpg
activity_title: This is the section's **activityTitle** property
activity_subtitle: This is the section's **activitySubtitle** property
activity_text: This is the section's **activityText** property.
hero_image:
image: http://connectorsdemo.azurewebsites.net/images/WIN12_Scene_01.jpg
title: This is the image's alternate text
text: This is the section's text property. Lorem ipsum dolor sit amet, consectetur
adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
facts:
- name: This is a fact name
value: This is a fact value
- name: This is a fact name
value: This is a fact value
- name: This is a fact name
value: This is a fact value
images:
- image: http://connectorsdemo.azurewebsites.net/images/MicrosoftSurface_024_Cafe_OH-06315_VS_R1c.jpg
title: This is the image's alternate text
- image: http://connectorsdemo.azurewebsites.net/images/WIN12_Scene_01.jpg
title: This is the image's alternate text
- image: http://connectorsdemo.azurewebsites.net/images/WIN12_Anthony_02.jpg
title: This is the image's alternate text
actions:
- "@type": ActionCard
name: Comment
inputs:
- "@type": TextInput
id: comment
is_multiline: true
title: Input's title property
actions:
- "@type": HttpPOST
name: Save
target: http://...
- "@type": ActionCard
name: Due Date
inputs:
- "@type": DateInput
id: dueDate
title: Input's title property
actions:
- "@type": HttpPOST
name: Save
target: http://...
- "@type": HttpPOST
name: Action's name prop.
target: http://...
- "@type": OpenUri
name: Action's name prop
targets:
- os: default
uri: http://...
- start_group: true
title: This is the title of a **second section**
text: This second section is visually separated from the first one by setting its
**startGroup** property to true.
"""
RETURN = """
"""
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.ec2 import snake_dict_to_camel_dict
OFFICE_365_CARD_CONTEXT = "http://schema.org/extensions"
OFFICE_365_CARD_TYPE = "MessageCard"
OFFICE_365_CARD_EMPTY_PAYLOAD_MSG = "Summary or Text is required."
OFFICE_365_INVALID_WEBHOOK_MSG = "The Incoming Webhook was not reachable."
def build_actions(actions):
action_items = []
for action in actions:
action_item = snake_dict_to_camel_dict(action)
action_items.append(action_item)
return action_items
def build_sections(sections):
sections_created = []
for section in sections:
sections_created.append(build_section(section))
return sections_created
def build_section(section):
section_payload = dict()
if 'title' in section:
section_payload['title'] = section['title']
if 'start_group' in section:
section_payload['startGroup'] = section['start_group']
if 'activity_image' in section:
section_payload['activityImage'] = section['activity_image']
if 'activity_title' in section:
section_payload['activityTitle'] = section['activity_title']
if 'activity_subtitle' in section:
section_payload['activitySubtitle'] = section['activity_subtitle']
if 'activity_text' in section:
section_payload['activityText'] = section['activity_text']
if 'hero_image' in section:
section_payload['heroImage'] = section['hero_image']
if 'text' in section:
section_payload['text'] = section['text']
if 'facts' in section:
section_payload['facts'] = section['facts']
if 'images' in section:
section_payload['images'] = section['images']
if 'actions' in section:
section_payload['potentialAction'] = build_actions(section['actions'])
return section_payload
def build_payload_for_connector_card(module, summary=None, color=None, title=None, text=None, actions=None, sections=None):
payload = dict()
payload['@context'] = OFFICE_365_CARD_CONTEXT
payload['@type'] = OFFICE_365_CARD_TYPE
if summary is not None:
payload['summary'] = summary
if color is not None:
payload['themeColor'] = color
if title is not None:
payload['title'] = title
if text is not None:
payload['text'] = text
if actions:
payload['potentialAction'] = build_actions(actions)
if sections:
payload['sections'] = build_sections(sections)
payload = module.jsonify(payload)
return payload
def do_notify_connector_card_webhook(module, webhook, payload):
headers = {
'Content-Type': 'application/json'
}
response, info = fetch_url(
module=module,
url=webhook,
headers=headers,
method='POST',
data=payload
)
if info['status'] == 200:
module.exit_json(changed=True)
elif info['status'] == 400 and module.check_mode:
if info['body'] == OFFICE_365_CARD_EMPTY_PAYLOAD_MSG:
module.exit_json(changed=True)
else:
module.fail_json(msg=OFFICE_365_INVALID_WEBHOOK_MSG)
else:
module.fail_json(
msg="failed to send %s as a connector card to Incoming Webhook: %s"
% (payload, info['msg'])
)
def main():
module = AnsibleModule(
argument_spec=dict(
webhook=dict(required=True, no_log=True),
summary=dict(type='str'),
color=dict(type='str'),
title=dict(type='str'),
text=dict(type='str'),
actions=dict(type='list'),
sections=dict(type='list')
),
supports_check_mode=True
)
webhook = module.params['webhook']
summary = module.params['summary']
color = module.params['color']
title = module.params['title']
text = module.params['text']
actions = module.params['actions']
sections = module.params['sections']
payload = build_payload_for_connector_card(
module,
summary,
color,
title,
text,
actions,
sections)
if module.check_mode:
# In check mode, send an empty payload to validate connection
check_mode_payload = build_payload_for_connector_card(module)
do_notify_connector_card_webhook(module, webhook, check_mode_payload)
do_notify_connector_card_webhook(module, webhook, payload)
if __name__ == '__main__':
main()
|
minhphung171093/GreenERP | refs/heads/master | openerp/addons/payment/__init__.py | 1516 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import models
|
puzan/ansible | refs/heads/devel | lib/ansible/module_utils/azure_rm_common.py | 48 | #
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import json
import os
import re
import sys
import copy
import importlib
import inspect
from packaging.version import Version
from os.path import expanduser
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves import configparser
AZURE_COMMON_ARGS = dict(
profile=dict(type='str'),
subscription_id=dict(type='str', no_log=True),
client_id=dict(type='str', no_log=True),
secret=dict(type='str', no_log=True),
tenant=dict(type='str', no_log=True),
ad_user=dict(type='str', no_log=True),
password=dict(type='str', no_log=True),
# debug=dict(type='bool', default=False),
)
AZURE_CREDENTIAL_ENV_MAPPING = dict(
profile='AZURE_PROFILE',
subscription_id='AZURE_SUBSCRIPTION_ID',
client_id='AZURE_CLIENT_ID',
secret='AZURE_SECRET',
tenant='AZURE_TENANT',
ad_user='AZURE_AD_USER',
password='AZURE_PASSWORD'
)
AZURE_TAG_ARGS = dict(
tags=dict(type='dict'),
append_tags=dict(type='bool', default=True),
)
AZURE_COMMON_REQUIRED_IF = [
('log_mode', 'file', ['log_path'])
]
ANSIBLE_USER_AGENT = 'Ansible-Deploy'
CIDR_PATTERN = re.compile("(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1"
"[0-9]{2}|2[0-4][0-9]|25[0-5])(\/([0-9]|[1-2][0-9]|3[0-2]))")
AZURE_SUCCESS_STATE = "Succeeded"
AZURE_FAILED_STATE = "Failed"
HAS_AZURE = True
HAS_AZURE_EXC = None
HAS_MSRESTAZURE = True
HAS_MSRESTAZURE_EXC = None
# NB: packaging issue sometimes cause msrestazure not to be installed, check it separately
try:
from msrest.serialization import Serializer
except ImportError as exc:
HAS_MSRESTAZURE_EXC = exc
HAS_MSRESTAZURE = False
try:
from enum import Enum
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.network.models import PublicIPAddress, NetworkSecurityGroup, SecurityRule, NetworkInterface, \
NetworkInterfaceIPConfiguration, Subnet
from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials
from azure.mgmt.network.version import VERSION as network_client_version
from azure.mgmt.storage.version import VERSION as storage_client_version
from azure.mgmt.compute.version import VERSION as compute_client_version
from azure.mgmt.resource.version import VERSION as resource_client_version
from azure.mgmt.network.network_management_client import NetworkManagementClient
from azure.mgmt.resource.resources.resource_management_client import ResourceManagementClient
from azure.mgmt.storage.storage_management_client import StorageManagementClient
from azure.mgmt.compute.compute_management_client import ComputeManagementClient
from azure.storage.cloudstorageaccount import CloudStorageAccount
except ImportError as exc:
HAS_AZURE_EXC = exc
HAS_AZURE = False
def azure_id_to_dict(id):
pieces = re.sub(r'^\/', '', id).split('/')
result = {}
index = 0
while index < len(pieces) - 1:
result[pieces[index]] = pieces[index + 1]
index += 1
return result
AZURE_EXPECTED_VERSIONS = dict(
storage_client_version="0.30.0rc5",
compute_client_version="0.30.0rc5",
network_client_version="0.30.0rc5",
resource_client_version="0.30.0rc5"
)
AZURE_MIN_RELEASE = '2.0.0rc5'
class AzureRMModuleBase(object):
def __init__(self, derived_arg_spec, bypass_checks=False, no_log=False,
check_invalid_arguments=True, mutually_exclusive=None, required_together=None,
required_one_of=None, add_file_common_args=False, supports_check_mode=False,
required_if=None, supports_tags=True, facts_module=False):
merged_arg_spec = dict()
merged_arg_spec.update(AZURE_COMMON_ARGS)
if supports_tags:
merged_arg_spec.update(AZURE_TAG_ARGS)
if derived_arg_spec:
merged_arg_spec.update(derived_arg_spec)
merged_required_if = list(AZURE_COMMON_REQUIRED_IF)
if required_if:
merged_required_if += required_if
self.module = AnsibleModule(argument_spec=merged_arg_spec,
bypass_checks=bypass_checks,
no_log=no_log,
check_invalid_arguments=check_invalid_arguments,
mutually_exclusive=mutually_exclusive,
required_together=required_together,
required_one_of=required_one_of,
add_file_common_args=add_file_common_args,
supports_check_mode=supports_check_mode,
required_if=merged_required_if)
if not HAS_MSRESTAZURE:
self.fail("Do you have msrestazure installed? Try `pip install msrestazure`"
"- {0}".format(HAS_MSRESTAZURE_EXC))
if not HAS_AZURE:
self.fail("Do you have azure>={1} installed? Try `pip install 'azure>={1}' --upgrade`"
"- {0}".format(HAS_AZURE_EXC, AZURE_MIN_RELEASE))
self._network_client = None
self._storage_client = None
self._resource_client = None
self._compute_client = None
self.check_mode = self.module.check_mode
self.facts_module = facts_module
# self.debug = self.module.params.get('debug')
# authenticate
self.credentials = self._get_credentials(self.module.params)
if not self.credentials:
self.fail("Failed to get credentials. Either pass as parameters, set environment variables, "
"or define a profile in ~/.azure/credentials.")
if self.credentials.get('subscription_id', None) is None:
self.fail("Credentials did not include a subscription_id value.")
self.log("setting subscription_id")
self.subscription_id = self.credentials['subscription_id']
if self.credentials.get('client_id') is not None and \
self.credentials.get('secret') is not None and \
self.credentials.get('tenant') is not None:
self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'],
secret=self.credentials['secret'],
tenant=self.credentials['tenant'])
elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None:
self.azure_credentials = UserPassCredentials(self.credentials['ad_user'], self.credentials['password'])
else:
self.fail("Failed to authenticate with provided credentials. Some attributes were missing. "
"Credentials must include client_id, secret and tenant or ad_user and password.")
# common parameter validation
if self.module.params.get('tags'):
self.validate_tags(self.module.params['tags'])
res = self.exec_module(**self.module.params)
self.module.exit_json(**res)
def check_client_version(self, client_name, client_version, expected_version):
# Ensure Azure modules are at least 2.0.0rc5.
if Version(client_version) < Version(expected_version):
self.fail("Installed {0} client version is {1}. The supported version is {2}. Try "
"`pip install azure>={3} --upgrade`".format(client_name, client_version, expected_version,
AZURE_MIN_RELEASE))
def exec_module(self, **kwargs):
self.fail("Error: {0} failed to implement exec_module method.".format(self.__class__.__name__))
def fail(self, msg, **kwargs):
'''
Shortcut for calling module.fail()
:param msg: Error message text.
:param kwargs: Any key=value pairs
:return: None
'''
self.module.fail_json(msg=msg, **kwargs)
def log(self, msg, pretty_print=False):
pass
# Use only during module development
#if self.debug:
# log_file = open('azure_rm.log', 'a')
# if pretty_print:
# log_file.write(json.dumps(msg, indent=4, sort_keys=True))
# else:
# log_file.write(msg + u'\n')
def validate_tags(self, tags):
'''
Check if tags dictionary contains string:string pairs.
:param tags: dictionary of string:string pairs
:return: None
'''
if not self.facts_module:
if not isinstance(tags, dict):
self.fail("Tags must be a dictionary of string:string values.")
for key, value in tags.items():
if not isinstance(value, str):
self.fail("Tags values must be strings. Found {0}:{1}".format(str(key), str(value)))
def update_tags(self, tags):
'''
Call from the module to update metadata tags. Returns tuple
with bool indicating if there was a change and dict of new
tags to assign to the object.
:param tags: metadata tags from the object
:return: bool, dict
'''
new_tags = copy.copy(tags) if isinstance(tags, dict) else dict()
changed = False
if isinstance(self.module.params.get('tags'), dict):
for key, value in self.module.params['tags'].items():
if not new_tags.get(key) or new_tags[key] != value:
changed = True
new_tags[key] = value
if isinstance(tags, dict):
for key, value in tags.items():
if not self.module.params['tags'].get(key):
new_tags.pop(key)
changed = True
return changed, new_tags
def has_tags(self, obj_tags, tag_list):
'''
Used in fact modules to compare object tags to list of parameter tags. Return true if list of parameter tags
exists in object tags.
:param obj_tags: dictionary of tags from an Azure object.
:param tag_list: list of tag keys or tag key:value pairs
:return: bool
'''
if not obj_tags and tag_list:
return False
if not tag_list:
return True
matches = 0
result = False
for tag in tag_list:
tag_key = tag
tag_value = None
if ':' in tag:
tag_key, tag_value = tag.split(':')
if tag_value and obj_tags.get(tag_key) == tag_value:
matches += 1
elif not tag_value and obj_tags.get(tag_key):
matches += 1
if matches == len(tag_list):
result = True
return result
def get_resource_group(self, resource_group):
'''
Fetch a resource group.
:param resource_group: name of a resource group
:return: resource group object
'''
try:
return self.rm_client.resource_groups.get(resource_group)
except CloudError:
self.fail("Parameter error: resource group {0} not found".format(resource_group))
except Exception as exc:
self.fail("Error retrieving resource group {0} - {1}".format(resource_group, str(exc)))
def _get_profile(self, profile="default"):
path = expanduser("~/.azure/credentials")
try:
config = configparser.ConfigParser()
config.read(path)
except Exception as exc:
self.fail("Failed to access {0}. Check that the file exists and you have read "
"access. {1}".format(path, str(exc)))
credentials = dict()
for key in AZURE_CREDENTIAL_ENV_MAPPING:
try:
credentials[key] = config.get(profile, key, raw=True)
except:
pass
if credentials.get('subscription_id'):
return credentials
return None
def _get_env_credentials(self):
env_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
env_credentials[attribute] = os.environ.get(env_variable, None)
if env_credentials['profile']:
credentials = self._get_profile(env_credentials['profile'])
return credentials
if env_credentials.get('subscription_id') is not None:
return env_credentials
return None
def _get_credentials(self, params):
# Get authentication credentials.
# Precedence: module parameters-> environment variables-> default profile in ~/.azure/credentials.
self.log('Getting credentials')
arg_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
arg_credentials[attribute] = params.get(attribute, None)
# try module params
if arg_credentials['profile'] is not None:
self.log('Retrieving credentials with profile parameter.')
credentials = self._get_profile(arg_credentials['profile'])
return credentials
if arg_credentials['subscription_id']:
self.log('Received credentials from parameters.')
return arg_credentials
# try environment
env_credentials = self._get_env_credentials()
if env_credentials:
self.log('Received credentials from env.')
return env_credentials
# try default profile from ~./azure/credentials
default_credentials = self._get_profile()
if default_credentials:
self.log('Retrieved default profile credentials from ~/.azure/credentials.')
return default_credentials
return None
def serialize_obj(self, obj, class_name, enum_modules=[]):
'''
Return a JSON representation of an Azure object.
:param obj: Azure object
:param class_name: Name of the object's class
:param enum_modules: List of module names to build enum dependencies from.
:return: serialized result
'''
dependencies = dict()
if enum_modules:
for module_name in enum_modules:
mod = importlib.import_module(module_name)
for mod_class_name, mod_class_obj in inspect.getmembers(mod, predicate=inspect.isclass):
dependencies[mod_class_name] = mod_class_obj
self.log("dependencies: ")
self.log(str(dependencies))
serializer = Serializer(classes=dependencies)
return serializer.body(obj, class_name)
def get_poller_result(self, poller, wait=5):
'''
Consistent method of waiting on and retrieving results from Azure's long poller
:param poller Azure poller object
:return object resulting from the original request
'''
try:
delay = wait
while not poller.done():
self.log("Waiting for {0} sec".format(delay))
poller.wait(timeout=delay)
return poller.result()
except Exception as exc:
self.log(str(exc))
raise
def check_provisioning_state(self, azure_object, requested_state='present'):
'''
Check an Azure object's provisioning state. If something did not complete the provisioning
process, then we cannot operate on it.
:param azure_object An object such as a subnet, storageaccount, etc. Must have provisioning_state
and name attributes.
:return None
'''
if hasattr(azure_object, 'properties') and hasattr(azure_object.properties, 'provisioning_state') and \
hasattr(azure_object, 'name'):
# resource group object fits this model
if isinstance(azure_object.properties.provisioning_state, Enum):
if azure_object.properties.provisioning_state.value != AZURE_SUCCESS_STATE and \
requested_state != 'absent':
self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
azure_object.name, azure_object.properties.provisioning_state, AZURE_SUCCESS_STATE))
return
if azure_object.properties.provisioning_state != AZURE_SUCCESS_STATE and \
requested_state != 'absent':
self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
azure_object.name, azure_object.properties.provisioning_state, AZURE_SUCCESS_STATE))
return
if hasattr(azure_object, 'provisioning_state') or not hasattr(azure_object, 'name'):
if isinstance(azure_object.provisioning_state, Enum):
if azure_object.provisioning_state.value != AZURE_SUCCESS_STATE and requested_state != 'absent':
self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
azure_object.name, azure_object.provisioning_state, AZURE_SUCCESS_STATE))
return
if azure_object.provisioning_state != AZURE_SUCCESS_STATE and requested_state != 'absent':
self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
azure_object.name, azure_object.provisioning_state, AZURE_SUCCESS_STATE))
def get_blob_client(self, resource_group_name, storage_account_name):
keys = dict()
try:
# Get keys from the storage account
self.log('Getting keys')
account_keys = self.storage_client.storage_accounts.list_keys(resource_group_name, storage_account_name)
except Exception as exc:
self.fail("Error getting keys for account {0} - {1}".format(storage_account_name, str(exc)))
try:
self.log('Create blob service')
return CloudStorageAccount(storage_account_name, account_keys.keys[0].value).create_block_blob_service()
except Exception as exc:
self.fail("Error creating blob service client for storage account {0} - {1}".format(storage_account_name,
str(exc)))
def create_default_pip(self, resource_group, location, name, allocation_method='Dynamic'):
'''
Create a default public IP address <name>01 to associate with a network interface.
If a PIP address matching <vm name>01 exists, return it. Otherwise, create one.
:param resource_group: name of an existing resource group
:param location: a valid azure location
:param name: base name to assign the public IP address
:param allocation_method: one of 'Static' or 'Dynamic'
:return: PIP object
'''
public_ip_name = name + '01'
pip = None
self.log("Starting create_default_pip {0}".format(public_ip_name))
self.log("Check to see if public IP {0} exists".format(public_ip_name))
try:
pip = self.network_client.public_ip_addresses.get(resource_group, public_ip_name)
except CloudError:
pass
if pip:
self.log("Public ip {0} found.".format(public_ip_name))
self.check_provisioning_state(pip)
return pip
params = PublicIPAddress(
location=location,
public_ip_allocation_method=allocation_method,
)
self.log('Creating default public IP {0}'.format(public_ip_name))
try:
poller = self.network_client.public_ip_addresses.create_or_update(resource_group, public_ip_name, params)
except Exception as exc:
self.fail("Error creating {0} - {1}".format(public_ip_name, str(exc)))
return self.get_poller_result(poller)
def create_default_securitygroup(self, resource_group, location, name, os_type, open_ports):
'''
Create a default security group <name>01 to associate with a network interface. If a security group matching
<name>01 exists, return it. Otherwise, create one.
:param resource_group: Resource group name
:param location: azure location name
:param name: base name to use for the security group
:param os_type: one of 'Windows' or 'Linux'. Determins any default rules added to the security group.
:param ssh_port: for os_type 'Linux' port used in rule allowing SSH access.
:param rdp_port: for os_type 'Windows' port used in rule allowing RDP access.
:return: security_group object
'''
security_group_name = name + '01'
group = None
self.log("Create security group {0}".format(security_group_name))
self.log("Check to see if security group {0} exists".format(security_group_name))
try:
group = self.network_client.network_security_groups.get(resource_group, security_group_name)
except CloudError:
pass
if group:
self.log("Security group {0} found.".format(security_group_name))
self.check_provisioning_state(group)
return group
parameters = NetworkSecurityGroup()
parameters.location = location
if not open_ports:
# Open default ports based on OS type
if os_type == 'Linux':
# add an inbound SSH rule
parameters.security_rules = [
SecurityRule('Tcp', '*', '*', 'Allow', 'Inbound', description='Allow SSH Access',
source_port_range='*', destination_port_range='22', priority=100, name='SSH')
]
parameters.location = location
else:
# for windows add inbound RDP and WinRM rules
parameters.security_rules = [
SecurityRule('Tcp', '*', '*', 'Allow', 'Inbound', description='Allow RDP port 3389',
source_port_range='*', destination_port_range='3389', priority=100, name='RDP01'),
SecurityRule('Tcp', '*', '*', 'Allow', 'Inbound', description='Allow WinRM HTTPS port 5986',
source_port_range='*', destination_port_range='5986', priority=101, name='WinRM01'),
]
else:
# Open custom ports
parameters.security_rules = []
priority = 100
for port in open_ports:
priority += 1
rule_name = "Rule_{0}".format(priority)
parameters.security_rules.append(
SecurityRule('Tcp', '*', '*', 'Allow', 'Inbound', source_port_range='*',
destination_port_range=str(port), priority=priority, name=rule_name)
)
self.log('Creating default security group {0}'.format(security_group_name))
try:
poller = self.network_client.network_security_groups.create_or_update(resource_group,
security_group_name,
parameters)
except Exception as exc:
self.fail("Error creating default security rule {0} - {1}".format(security_group_name, str(exc)))
return self.get_poller_result(poller)
def _register(self, key):
try:
# We have to perform the one-time registration here. Otherwise, we receive an error the first
# time we attempt to use the requested client.
resource_client = self.rm_client
resource_client.providers.register(key)
except Exception as exc:
self.log("One-time registration of {0} failed - {1}".format(key, str(exc)))
self.log("You might need to register {0} using an admin account".format(key))
self.log(("To register a provider using the Python CLI: "
"https://docs.microsoft.com/azure/azure-resource-manager/"
"resource-manager-common-deployment-errors#noregisteredproviderfound"))
@property
def storage_client(self):
self.log('Getting storage client...')
if not self._storage_client:
self.check_client_version('storage', storage_client_version, AZURE_EXPECTED_VERSIONS['storage_client_version'])
self._storage_client = StorageManagementClient(self.azure_credentials, self.subscription_id)
self._register('Microsoft.Storage')
return self._storage_client
@property
def network_client(self):
self.log('Getting network client')
if not self._network_client:
self.check_client_version('network', network_client_version, AZURE_EXPECTED_VERSIONS['network_client_version'])
self._network_client = NetworkManagementClient(self.azure_credentials, self.subscription_id)
self._register('Microsoft.Network')
return self._network_client
@property
def rm_client(self):
self.log('Getting resource manager client')
if not self._resource_client:
self.check_client_version('resource', resource_client_version, AZURE_EXPECTED_VERSIONS['resource_client_version'])
self._resource_client = ResourceManagementClient(self.azure_credentials, self.subscription_id)
return self._resource_client
@property
def compute_client(self):
self.log('Getting compute client')
if not self._compute_client:
self.check_client_version('compute', compute_client_version, AZURE_EXPECTED_VERSIONS['compute_client_version'])
self._compute_client = ComputeManagementClient(self.azure_credentials, self.subscription_id)
self._register('Microsoft.Compute')
return self._compute_client
|
jrclaramunt/django-cms | refs/heads/develop | cms/test_utils/project/sampleapp/ns_urls.py | 12 | from django.conf.urls import patterns, url
from django.utils.translation import ugettext_lazy as _
"""
Also used in cms.tests.ApphooksTestCase
"""
urlpatterns = patterns('cms.test_utils.project.sampleapp.views',
url(r'^current-app/$', 'current_app', name='current-app'),
url(_('page'), 'current_app', name='translated-url'),
)
|
belmiromoreira/nova | refs/heads/master | nova/tests/unit/api/openstack/compute/contrib/test_aggregates.py | 25 | # Copyright (c) 2012 Citrix Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the aggregates admin api."""
import mock
from webob import exc
from nova.api.openstack.compute.contrib import aggregates as aggregates_v2
from nova.api.openstack.compute.plugins.v3 import aggregates as aggregates_v21
from nova import context
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import matchers
AGGREGATE_LIST = [
{"name": "aggregate1", "id": "1", "availability_zone": "nova1"},
{"name": "aggregate2", "id": "2", "availability_zone": "nova1"},
{"name": "aggregate3", "id": "3", "availability_zone": "nova2"},
{"name": "aggregate1", "id": "4", "availability_zone": "nova1"}]
AGGREGATE = {"name": "aggregate1",
"id": "1",
"availability_zone": "nova1",
"metadata": {"foo": "bar"},
"hosts": ["host1, host2"]}
FORMATTED_AGGREGATE = {"name": "aggregate1",
"id": "1",
"availability_zone": "nova1"}
class FakeRequest(object):
environ = {"nova.context": context.get_admin_context()}
class AggregateTestCaseV21(test.NoDBTestCase):
"""Test Case for aggregates admin api."""
add_host = 'self.controller._add_host'
remove_host = 'self.controller._remove_host'
set_metadata = 'self.controller._set_metadata'
bad_request = exception.ValidationError
def _set_up(self):
self.controller = aggregates_v21.AggregateController()
self.req = fakes.HTTPRequest.blank('/v3/os-aggregates',
use_admin_context=True)
self.user_req = fakes.HTTPRequest.blank('/v3/os-aggregates')
self.context = self.req.environ['nova.context']
def setUp(self):
super(AggregateTestCaseV21, self).setUp()
self._set_up()
def test_index(self):
def stub_list_aggregates(context):
if context is None:
raise Exception()
return AGGREGATE_LIST
self.stubs.Set(self.controller.api, 'get_aggregate_list',
stub_list_aggregates)
result = self.controller.index(self.req)
self.assertEqual(AGGREGATE_LIST, result["aggregates"])
def test_index_no_admin(self):
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.index,
self.user_req)
def test_create(self):
def stub_create_aggregate(context, name, availability_zone):
self.assertEqual(context, self.context, "context")
self.assertEqual("test", name, "name")
self.assertEqual("nova1", availability_zone, "availability_zone")
return AGGREGATE
self.stubs.Set(self.controller.api, "create_aggregate",
stub_create_aggregate)
result = self.controller.create(self.req, body={"aggregate":
{"name": "test",
"availability_zone": "nova1"}})
self.assertEqual(FORMATTED_AGGREGATE, result["aggregate"])
def test_create_no_admin(self):
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.create, self.user_req,
body={"aggregate":
{"name": "test",
"availability_zone": "nova1"}})
def test_create_with_duplicate_aggregate_name(self):
def stub_create_aggregate(context, name, availability_zone):
raise exception.AggregateNameExists(aggregate_name=name)
self.stubs.Set(self.controller.api, "create_aggregate",
stub_create_aggregate)
self.assertRaises(exc.HTTPConflict, self.controller.create,
self.req, body={"aggregate":
{"name": "test",
"availability_zone": "nova1"}})
def test_create_with_incorrect_availability_zone(self):
def stub_create_aggregate(context, name, availability_zone):
raise exception.InvalidAggregateAction(action='create_aggregate',
aggregate_id="'N/A'",
reason='invalid zone')
self.stubs.Set(self.controller.api, "create_aggregate",
stub_create_aggregate)
self.assertRaises(exc.HTTPBadRequest,
self.controller.create,
self.req, body={"aggregate":
{"name": "test",
"availability_zone": "nova_bad"}})
def test_create_with_no_aggregate(self):
self.assertRaises(self.bad_request, self.controller.create,
self.req, body={"foo":
{"name": "test",
"availability_zone": "nova1"}})
def test_create_with_no_name(self):
self.assertRaises(self.bad_request, self.controller.create,
self.req, body={"aggregate":
{"foo": "test",
"availability_zone": "nova1"}})
def test_create_with_no_availability_zone(self):
def stub_create_aggregate(context, name, availability_zone):
self.assertEqual(context, self.context, "context")
self.assertEqual("test", name, "name")
self.assertIsNone(availability_zone, "availability_zone")
return AGGREGATE
self.stubs.Set(self.controller.api, "create_aggregate",
stub_create_aggregate)
result = self.controller.create(self.req,
body={"aggregate": {"name": "test"}})
self.assertEqual(FORMATTED_AGGREGATE, result["aggregate"])
def test_create_with_null_name(self):
self.assertRaises(self.bad_request, self.controller.create,
self.req, body={"aggregate":
{"name": "",
"availability_zone": "nova1"}})
def test_create_with_name_too_long(self):
self.assertRaises(self.bad_request, self.controller.create,
self.req, body={"aggregate":
{"name": "x" * 256,
"availability_zone": "nova1"}})
def test_create_with_availability_zone_too_long(self):
self.assertRaises(self.bad_request, self.controller.create,
self.req, body={"aggregate":
{"name": "test",
"availability_zone": "x" * 256}})
def test_create_with_null_availability_zone(self):
aggregate = {"name": "aggregate1",
"id": "1",
"availability_zone": None,
"metadata": {},
"hosts": []}
formatted_aggregate = {"name": "aggregate1",
"id": "1",
"availability_zone": None}
def stub_create_aggregate(context, name, az_name):
self.assertEqual(context, self.context, "context")
self.assertEqual("aggregate1", name, "name")
self.assertIsNone(az_name, "availability_zone")
return aggregate
self.stubs.Set(self.controller.api, 'create_aggregate',
stub_create_aggregate)
result = self.controller.create(self.req,
body={"aggregate":
{"name": "aggregate1",
"availability_zone": None}})
self.assertEqual(formatted_aggregate, result["aggregate"])
def test_create_with_empty_availability_zone(self):
self.assertRaises(self.bad_request, self.controller.create,
self.req, body={"aggregate":
{"name": "test",
"availability_zone": ""}})
def test_create_with_extra_invalid_arg(self):
self.assertRaises(self.bad_request, self.controller.create,
self.req, body={"name": "test",
"availability_zone": "nova1",
"foo": 'bar'})
def test_show(self):
def stub_get_aggregate(context, id):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", id, "id")
return AGGREGATE
self.stubs.Set(self.controller.api, 'get_aggregate',
stub_get_aggregate)
aggregate = self.controller.show(self.req, "1")
self.assertEqual(AGGREGATE, aggregate["aggregate"])
def test_show_no_admin(self):
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.show,
self.user_req, "1")
def test_show_with_invalid_id(self):
def stub_get_aggregate(context, id):
raise exception.AggregateNotFound(aggregate_id=2)
self.stubs.Set(self.controller.api, 'get_aggregate',
stub_get_aggregate)
self.assertRaises(exc.HTTPNotFound,
self.controller.show, self.req, "2")
def test_update(self):
body = {"aggregate": {"name": "new_name",
"availability_zone": "nova1"}}
def stub_update_aggregate(context, aggregate, values):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", aggregate, "aggregate")
self.assertEqual(body["aggregate"], values, "values")
return AGGREGATE
self.stubs.Set(self.controller.api, "update_aggregate",
stub_update_aggregate)
result = self.controller.update(self.req, "1", body=body)
self.assertEqual(AGGREGATE, result["aggregate"])
def test_update_no_admin(self):
body = {"aggregate": {"availability_zone": "nova"}}
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.update,
self.user_req, "1", body=body)
def test_update_with_only_name(self):
body = {"aggregate": {"name": "new_name"}}
def stub_update_aggregate(context, aggregate, values):
return AGGREGATE
self.stubs.Set(self.controller.api, "update_aggregate",
stub_update_aggregate)
result = self.controller.update(self.req, "1", body=body)
self.assertEqual(AGGREGATE, result["aggregate"])
def test_update_with_only_availability_zone(self):
body = {"aggregate": {"availability_zone": "nova1"}}
def stub_update_aggregate(context, aggregate, values):
return AGGREGATE
self.stubs.Set(self.controller.api, "update_aggregate",
stub_update_aggregate)
result = self.controller.update(self.req, "1", body=body)
self.assertEqual(AGGREGATE, result["aggregate"])
def test_update_with_no_updates(self):
test_metadata = {"aggregate": {}}
self.assertRaises(self.bad_request, self.controller.update,
self.req, "2", body=test_metadata)
def test_update_with_no_update_key(self):
test_metadata = {"asdf": {}}
self.assertRaises(self.bad_request, self.controller.update,
self.req, "2", body=test_metadata)
def test_update_with_wrong_updates(self):
test_metadata = {"aggregate": {"status": "disable",
"foo": "bar"}}
self.assertRaises(self.bad_request, self.controller.update,
self.req, "2", body=test_metadata)
def test_update_with_null_name(self):
test_metadata = {"aggregate": {"name": ""}}
self.assertRaises(self.bad_request, self.controller.update,
self.req, "2", body=test_metadata)
def test_update_with_name_too_long(self):
test_metadata = {"aggregate": {"name": "x" * 256}}
self.assertRaises(self.bad_request, self.controller.update,
self.req, "2", body=test_metadata)
def test_update_with_availability_zone_too_long(self):
test_metadata = {"aggregate": {"availability_zone": "x" * 256}}
self.assertRaises(self.bad_request, self.controller.update,
self.req, "2", body=test_metadata)
def test_update_with_empty_availability_zone(self):
test_metadata = {"aggregate": {"availability_zone": ""}}
self.assertRaises(self.bad_request, self.controller.update,
self.req, "2", body=test_metadata)
def test_update_with_null_availability_zone(self):
body = {"aggregate": {"availability_zone": None}}
aggre = {"name": "aggregate1",
"id": "1",
"availability_zone": None}
def stub_update_aggregate(context, aggregate, values):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", aggregate, "aggregate")
self.assertIsNone(values["availability_zone"], "availability_zone")
return aggre
self.stubs.Set(self.controller.api, "update_aggregate",
stub_update_aggregate)
result = self.controller.update(self.req, "1", body=body)
self.assertEqual(aggre, result["aggregate"])
def test_update_with_bad_aggregate(self):
test_metadata = {"aggregate": {"name": "test_name"}}
def stub_update_aggregate(context, aggregate, metadata):
raise exception.AggregateNotFound(aggregate_id=2)
self.stubs.Set(self.controller.api, "update_aggregate",
stub_update_aggregate)
self.assertRaises(exc.HTTPNotFound, self.controller.update,
self.req, "2", body=test_metadata)
def test_update_with_duplicated_name(self):
test_metadata = {"aggregate": {"name": "test_name"}}
def stub_update_aggregate(context, aggregate, metadata):
raise exception.AggregateNameExists(aggregate_name="test_name")
self.stubs.Set(self.controller.api, "update_aggregate",
stub_update_aggregate)
self.assertRaises(exc.HTTPConflict, self.controller.update,
self.req, "2", body=test_metadata)
def test_invalid_action(self):
body = {"append_host": {"host": "host1"}}
self.assertRaises(self.bad_request,
eval(self.add_host), self.req, "1", body=body)
def test_update_with_invalid_action(self):
with mock.patch.object(self.controller.api, "update_aggregate",
side_effect=exception.InvalidAggregateAction(
action='invalid', aggregate_id='agg1', reason= "not empty")):
body = {"aggregate": {"availability_zone": "nova"}}
self.assertRaises(exc.HTTPBadRequest, self.controller.update,
self.req, "1", body=body)
def test_add_host(self):
def stub_add_host_to_aggregate(context, aggregate, host):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", aggregate, "aggregate")
self.assertEqual("host1", host, "host")
return AGGREGATE
self.stubs.Set(self.controller.api, "add_host_to_aggregate",
stub_add_host_to_aggregate)
aggregate = eval(self.add_host)(self.req, "1",
body={"add_host": {"host":
"host1"}})
self.assertEqual(aggregate["aggregate"], AGGREGATE)
def test_add_host_no_admin(self):
self.assertRaises(exception.PolicyNotAuthorized,
eval(self.add_host),
self.user_req, "1",
body={"add_host": {"host": "host1"}})
def test_add_host_with_already_added_host(self):
def stub_add_host_to_aggregate(context, aggregate, host):
raise exception.AggregateHostExists(aggregate_id=aggregate,
host=host)
self.stubs.Set(self.controller.api, "add_host_to_aggregate",
stub_add_host_to_aggregate)
self.assertRaises(exc.HTTPConflict, eval(self.add_host),
self.req, "1",
body={"add_host": {"host": "host1"}})
def test_add_host_with_bad_aggregate(self):
def stub_add_host_to_aggregate(context, aggregate, host):
raise exception.AggregateNotFound(aggregate_id=aggregate)
self.stubs.Set(self.controller.api, "add_host_to_aggregate",
stub_add_host_to_aggregate)
self.assertRaises(exc.HTTPNotFound, eval(self.add_host),
self.req, "bogus_aggregate",
body={"add_host": {"host": "host1"}})
def test_add_host_with_bad_host(self):
def stub_add_host_to_aggregate(context, aggregate, host):
raise exception.ComputeHostNotFound(host=host)
self.stubs.Set(self.controller.api, "add_host_to_aggregate",
stub_add_host_to_aggregate)
self.assertRaises(exc.HTTPNotFound, eval(self.add_host),
self.req, "1",
body={"add_host": {"host": "bogus_host"}})
def test_add_host_with_missing_host(self):
self.assertRaises(self.bad_request, eval(self.add_host),
self.req, "1", body={"add_host": {"asdf": "asdf"}})
def test_add_host_with_invalid_format_host(self):
self.assertRaises(self.bad_request, eval(self.add_host),
self.req, "1", body={"add_host": {"host": "a" * 300}})
def test_add_host_with_multiple_hosts(self):
self.assertRaises(self.bad_request, eval(self.add_host),
self.req, "1", body={"add_host": {"host": ["host1", "host2"]}})
def test_add_host_raises_key_error(self):
def stub_add_host_to_aggregate(context, aggregate, host):
raise KeyError
self.stubs.Set(self.controller.api, "add_host_to_aggregate",
stub_add_host_to_aggregate)
self.assertRaises(exc.HTTPInternalServerError,
eval(self.add_host), self.req, "1",
body={"add_host": {"host": "host1"}})
def test_add_host_with_invalid_request(self):
self.assertRaises(self.bad_request, eval(self.add_host),
self.req, "1", body={"add_host": "1"})
def test_add_host_with_non_string(self):
self.assertRaises(self.bad_request, eval(self.add_host),
self.req, "1", body={"add_host": {"host": 1}})
def test_remove_host(self):
def stub_remove_host_from_aggregate(context, aggregate, host):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", aggregate, "aggregate")
self.assertEqual("host1", host, "host")
stub_remove_host_from_aggregate.called = True
return {}
self.stubs.Set(self.controller.api,
"remove_host_from_aggregate",
stub_remove_host_from_aggregate)
eval(self.remove_host)(self.req, "1",
body={"remove_host": {"host": "host1"}})
self.assertTrue(stub_remove_host_from_aggregate.called)
def test_remove_host_no_admin(self):
self.assertRaises(exception.PolicyNotAuthorized,
eval(self.remove_host),
self.user_req, "1",
body={"remove_host": {"host": "host1"}})
def test_remove_host_with_bad_aggregate(self):
def stub_remove_host_from_aggregate(context, aggregate, host):
raise exception.AggregateNotFound(aggregate_id=aggregate)
self.stubs.Set(self.controller.api,
"remove_host_from_aggregate",
stub_remove_host_from_aggregate)
self.assertRaises(exc.HTTPNotFound, eval(self.remove_host),
self.req, "bogus_aggregate",
body={"remove_host": {"host": "host1"}})
def test_remove_host_with_host_not_in_aggregate(self):
def stub_remove_host_from_aggregate(context, aggregate, host):
raise exception.AggregateHostNotFound(aggregate_id=aggregate,
host=host)
self.stubs.Set(self.controller.api,
"remove_host_from_aggregate",
stub_remove_host_from_aggregate)
self.assertRaises(exc.HTTPNotFound, eval(self.remove_host),
self.req, "1",
body={"remove_host": {"host": "host1"}})
def test_remove_host_with_bad_host(self):
def stub_remove_host_from_aggregate(context, aggregate, host):
raise exception.ComputeHostNotFound(host=host)
self.stubs.Set(self.controller.api,
"remove_host_from_aggregate",
stub_remove_host_from_aggregate)
self.assertRaises(exc.HTTPNotFound, eval(self.remove_host),
self.req, "1", body={"remove_host": {"host": "bogushost"}})
def test_remove_host_with_missing_host(self):
self.assertRaises(self.bad_request, eval(self.remove_host),
self.req, "1", body={"asdf": "asdf"})
def test_remove_host_with_multiple_hosts(self):
self.assertRaises(self.bad_request, eval(self.remove_host),
self.req, "1", body={"remove_host": {"host":
["host1", "host2"]}})
def test_remove_host_with_extra_param(self):
self.assertRaises(self.bad_request, eval(self.remove_host),
self.req, "1", body={"remove_host": {"asdf": "asdf",
"host": "asdf"}})
def test_remove_host_with_invalid_request(self):
self.assertRaises(self.bad_request,
eval(self.remove_host),
self.req, "1", body={"remove_host": "1"})
def test_remove_host_with_missing_host_empty(self):
self.assertRaises(self.bad_request,
eval(self.remove_host),
self.req, "1", body={"remove_host": {}})
def test_set_metadata(self):
body = {"set_metadata": {"metadata": {"foo": "bar"}}}
def stub_update_aggregate(context, aggregate, values):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", aggregate, "aggregate")
self.assertThat(body["set_metadata"]['metadata'],
matchers.DictMatches(values))
return AGGREGATE
self.stubs.Set(self.controller.api,
"update_aggregate_metadata",
stub_update_aggregate)
result = eval(self.set_metadata)(self.req, "1", body=body)
self.assertEqual(AGGREGATE, result["aggregate"])
def test_set_metadata_delete(self):
body = {"set_metadata": {"metadata": {"foo": None}}}
with mock.patch.object(self.controller.api,
'update_aggregate_metadata') as mocked:
mocked.return_value = AGGREGATE
result = eval(self.set_metadata)(self.req, "1", body=body)
self.assertEqual(AGGREGATE, result["aggregate"])
mocked.assert_called_once_with(self.context, "1",
body["set_metadata"]["metadata"])
def test_set_metadata_no_admin(self):
self.assertRaises(exception.PolicyNotAuthorized,
eval(self.set_metadata),
self.user_req, "1",
body={"set_metadata": {"metadata":
{"foo": "bar"}}})
def test_set_metadata_with_bad_aggregate(self):
body = {"set_metadata": {"metadata": {"foo": "bar"}}}
def stub_update_aggregate(context, aggregate, metadata):
raise exception.AggregateNotFound(aggregate_id=aggregate)
self.stubs.Set(self.controller.api,
"update_aggregate_metadata",
stub_update_aggregate)
self.assertRaises(exc.HTTPNotFound, eval(self.set_metadata),
self.req, "bad_aggregate", body=body)
def test_set_metadata_with_missing_metadata(self):
body = {"asdf": {"foo": "bar"}}
self.assertRaises(self.bad_request, eval(self.set_metadata),
self.req, "1", body=body)
def test_set_metadata_with_extra_params(self):
body = {"metadata": {"foo": "bar"}, "asdf": {"foo": "bar"}}
self.assertRaises(self.bad_request, eval(self.set_metadata),
self.req, "1", body=body)
def test_set_metadata_without_dict(self):
body = {"set_metadata": {'metadata': 1}}
self.assertRaises(self.bad_request, eval(self.set_metadata),
self.req, "1", body=body)
def test_set_metadata_with_empty_key(self):
body = {"set_metadata": {"metadata": {"": "value"}}}
self.assertRaises(self.bad_request, eval(self.set_metadata),
self.req, "1", body=body)
def test_set_metadata_with_key_too_long(self):
body = {"set_metadata": {"metadata": {"x" * 256: "value"}}}
self.assertRaises(self.bad_request, eval(self.set_metadata),
self.req, "1", body=body)
def test_set_metadata_with_value_too_long(self):
body = {"set_metadata": {"metadata": {"key": "x" * 256}}}
self.assertRaises(self.bad_request, eval(self.set_metadata),
self.req, "1", body=body)
def test_set_metadata_with_string(self):
body = {"set_metadata": {"metadata": "test"}}
self.assertRaises(self.bad_request, eval(self.set_metadata),
self.req, "1", body=body)
def test_delete_aggregate(self):
def stub_delete_aggregate(context, aggregate):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", aggregate, "aggregate")
stub_delete_aggregate.called = True
self.stubs.Set(self.controller.api, "delete_aggregate",
stub_delete_aggregate)
self.controller.delete(self.req, "1")
self.assertTrue(stub_delete_aggregate.called)
def test_delete_aggregate_no_admin(self):
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.delete,
self.user_req, "1")
def test_delete_aggregate_with_bad_aggregate(self):
def stub_delete_aggregate(context, aggregate):
raise exception.AggregateNotFound(aggregate_id=aggregate)
self.stubs.Set(self.controller.api, "delete_aggregate",
stub_delete_aggregate)
self.assertRaises(exc.HTTPNotFound, self.controller.delete,
self.req, "bogus_aggregate")
def test_delete_aggregate_with_host(self):
with mock.patch.object(self.controller.api, "delete_aggregate",
side_effect=exception.InvalidAggregateAction(
action="delete", aggregate_id="agg1",
reason="not empty")):
self.assertRaises(exc.HTTPBadRequest,
self.controller.delete,
self.req, "agg1")
class AggregateTestCaseV2(AggregateTestCaseV21):
add_host = 'self.controller.action'
remove_host = 'self.controller.action'
set_metadata = 'self.controller.action'
bad_request = exc.HTTPBadRequest
def _set_up(self):
self.controller = aggregates_v2.AggregateController()
self.req = FakeRequest()
self.user_req = fakes.HTTPRequest.blank('/v2/os-aggregates')
self.context = self.req.environ['nova.context']
def test_add_host_raises_key_error(self):
def stub_add_host_to_aggregate(context, aggregate, host):
raise KeyError
self.stubs.Set(self.controller.api, "add_host_to_aggregate",
stub_add_host_to_aggregate)
# NOTE(mtreinish) The check for a KeyError here is to ensure that
# if add_host_to_aggregate() raises a KeyError it propagates. At
# one point the api code would mask the error as a HTTPBadRequest.
# This test is to ensure that this doesn't occur again.
self.assertRaises(KeyError, eval(self.add_host), self.req, "1",
body={"add_host": {"host": "host1"}})
def test_add_host_to_aggregate_with_non_admin(self):
rule_name = "compute_extension:aggregates"
self.policy.set_rules({rule_name: ""})
self.assertRaises(exception.AdminRequired, self.controller._add_host,
self.user_req, '1', {'host': 'fake_host'})
def test_remove_host_from_aggregate_with_non_admin(self):
rule_name = "compute_extension:aggregates"
self.policy.set_rules({rule_name: ""})
self.assertRaises(exception.AdminRequired,
self.controller._remove_host, self.user_req,
'1', {'host': 'fake_host'})
|
hmen89/odoo | refs/heads/master | addons/document/__init__.py | 434 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import content_index
import std_index
import document
import report
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
bejmy/backend | refs/heads/master | bejmy/accounts/migrations/0004_account_order.py | 1 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-13 09:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0003_account_balance_initial'),
]
operations = [
migrations.AddField(
model_name='account',
name='order',
field=models.PositiveSmallIntegerField(default=0, verbose_name='order'),
),
]
|
thenakliman/nirikshak | refs/heads/basic_framework | nirikshak/tests/unit/post_task/test_dummy.py | 1 | # Copyright 2017 <thenakliman@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import unittest
from nirikshak.post_task import dummy
from nirikshak.tests.unit import base
class PostTaskTest(unittest.TestCase):
def test_format_main_dict(self):
dct = base.get_main_yaml()
self.assertEqual(copy.deepcopy(dct),
dummy.FormatOutputConsole().format_output(**dct))
def test_format_soochi(self):
dct = base.get_test_keystone_soochi()[0]
self.assertEqual(copy.deepcopy(dct),
dummy.FormatOutputConsole().format_output(**dct))
|
zeroonegit/python | refs/heads/master | crossincode.com/Python_Getting_Started/isEqual2.py | 2 | def isEqual(num1, num2):
if num1 < num2:
print('too small')
return False
if num1 > num2:
print('too big')
return False
if num1 == num2:
print('bingo')
return True
from random import randint
num = randint(1, 100)
print('Guess what I think? ')
bingo = False
while bingo == False:
answer = input()
bingo = isEqual(answer, num)
|
JorisE/xhtml2pdf | refs/heads/master | test/witherror.py | 154 | # -*- coding: utf-8 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "$Revision: 194 $"
__author__ = "$Author: holtwick $"
__date__ = "$Date: 2008-04-18 18:59:53 +0200 (Fr, 18 Apr 2008) $"
import ho.pisa as pisa
def helloWorld():
filename = __file__ + ".pdf"
pdf = pisa.CreatePDF(
u"Hello <strong>World</strong> <img src='data:image/jpg;base64,?´*'>",
file(filename, "wb"),
show_error_as_pdf=True,
)
if not pdf.err:
pisa.startViewer(filename)
if __name__=="__main__":
pisa.showLogging()
helloWorld()
|
michaelgallacher/intellij-community | refs/heads/master | python/testData/formatter/newLineAfterColonMultiClause.py | 83 | try: pass
finally: pass
|
wwj718/ANALYSE | refs/heads/master | cms/djangoapps/contentstore/management/commands/tests/test_export_all_courses.py | 37 | """
Test for export all courses.
"""
import shutil
from tempfile import mkdtemp
from contentstore.management.commands.export_all_courses import export_courses_to_output_path
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class ExportAllCourses(ModuleStoreTestCase):
"""
Tests exporting all courses.
"""
def setUp(self):
""" Common setup. """
self.store = modulestore()._get_modulestore_by_type(ModuleStoreEnum.Type.mongo)
self.temp_dir = mkdtemp()
self.first_course = CourseFactory.create(org="test", course="course1", display_name="run1")
self.second_course = CourseFactory.create(org="test", course="course2", display_name="run2")
def test_export_all_courses(self):
"""
Test exporting good and faulty courses
"""
# check that both courses exported successfully
courses, failed_export_courses = export_courses_to_output_path(self.temp_dir)
self.assertEqual(len(courses), 2)
self.assertEqual(len(failed_export_courses), 0)
# manually make second course faulty and check that it fails on export
second_course_id = self.second_course.id
self.store.collection.update(
{'_id.org': second_course_id.org, '_id.course': second_course_id.course, '_id.name': second_course_id.run},
{'$set': {'metadata.tags': 'crash'}}
)
courses, failed_export_courses = export_courses_to_output_path(self.temp_dir)
self.assertEqual(len(courses), 2)
self.assertEqual(len(failed_export_courses), 1)
self.assertEqual(failed_export_courses[0], unicode(second_course_id))
def tearDown(self):
""" Common cleanup. """
shutil.rmtree(self.temp_dir)
|
sungkim11/mhargadh | refs/heads/master | django/utils/simplejson/__init__.py | 324 | r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility with Python 2.4 and Python 2.5 and (currently) has
significant performance advantages, even without using the optional C
extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print json.dumps("\"foo\bar")
"\"foo\bar"
>>> print json.dumps(u'\u1234')
"\u1234"
>>> print json.dumps('\\')
"\\"
>>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)
>>> print '\n'.join([l.rstrip() for l in s.splitlines()])
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> import decimal
>>> json.loads('1.1', parse_float=decimal.Decimal) == decimal.Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError("%r is not JSON serializable" % (o,))
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -msimplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -msimplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
# Django modification: try to use the system version first, providing it's
# either of a later version of has the C speedups in place. Otherwise, fall
# back to our local copy.
__version__ = '2.0.7'
use_system_version = False
try:
# The system-installed version has priority providing it is either not an
# earlier version or it contains the C speedups.
import simplejson
if (simplejson.__version__.split('.') >= __version__.split('.') or
hasattr(simplejson, '_speedups')):
from simplejson import *
use_system_version = True
except ImportError:
pass
if not use_system_version:
try:
from json import * # Python 2.6 preferred over local copy.
# There is a "json" package around that is not Python's "json", so we
# check for something that is only in the namespace of the version we
# want.
JSONDecoder
use_system_version = True
except (ImportError, NameError):
pass
# If all else fails, we have a bundled version that can be used.
if not use_system_version:
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONEncoder',
]
from django.utils.simplejson.decoder import JSONDecoder
from django.utils.simplejson.encoder import JSONEncoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and object
members will be pretty-printed with that indent level. An indent level
of 0 will only insert newlines. ``None`` is the most compact representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
If the contents of ``fp`` is encoded with an ASCII based encoding other
than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
be specified. Encodings that are not ASCII based (such as UCS-2) are
not allowed, and should be wrapped with
``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
object and passed to ``loads()``
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
must be specified. Encodings that are not ASCII based (such as UCS-2)
are not allowed and should be decoded to ``unicode`` first.
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN, null, true, false.
This can be used to raise an exception if invalid JSON numbers
are encountered.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
return cls(encoding=encoding, **kw).decode(s)
|
zedshaw/librelist | refs/heads/master | lib/metaphone.py | 1 | #!python
#coding= latin-1
# This script implements the Double Metaphone algorythm (c) 1998, 1999 by Lawrence Philips
# it was translated to Python from the C source written by Kevin Atkinson (http://aspell.net/metaphone/)
# By Andrew Collins - January 12, 2007 who claims no rights to this work
# http://atomboy.isa-geek.com:8080/plone/Members/acoil/programing/double-metaphone
# Tested with Pyhon 2.4.3
# Updated Feb 14, 2007 - Found a typo in the 'gh' section
# Updated Dec 17, 2007 - Bugs fixed in 'S', 'Z', and 'J' sections. Thanks Chris Leong!
def dm(st) :
"""dm(string) -> (string, string or None)
returns the double metaphone codes for given string - always a tuple
there are no checks done on the input string, but it should be a single word or name."""
vowels = ['A', 'E', 'I', 'O', 'U', 'Y']
st = st.decode('ascii', 'ignore')
st = st.upper() # st is short for string. I usually prefer descriptive over short, but this var is used a lot!
is_slavo_germanic = (st.find('W') > -1 or st.find('K') > -1 or st.find('CZ') > -1 or st.find('WITZ') > -1)
length = len(st)
first = 2
st = '-' * first + st + '------' # so we can index beyond the begining and end of the input string
last = first + length -1
pos = first # pos is short for position
pri = sec = '' # primary and secondary metaphone codes
#skip these silent letters when at start of word
if st[first:first+2] in ["GN", "KN", "PN", "WR", "PS"] :
pos += 1
# Initial 'X' is pronounced 'Z' e.g. 'Xavier'
if st[first] == 'X' :
pri = sec = 'S' #'Z' maps to 'S'
pos += 1
# main loop through chars in st
while pos <= last :
#print str(pos) + '\t' + st[pos]
ch = st[pos] # ch is short for character
# nxt (short for next characters in metaphone code) is set to a tuple of the next characters in
# the primary and secondary codes and how many characters to move forward in the string.
# the secondary code letter is given only when it is different than the primary.
# This is just a trick to make the code easier to write and read.
nxt = (None, 1) # default action is to add nothing and move to next char
if ch in vowels :
nxt = (None, 1)
if pos == first : # all init vowels now map to 'A'
nxt = ('A', 1)
elif ch == 'B' :
#"-mb", e.g", "dumb", already skipped over... see 'M' below
if st[pos+1] == 'B' :
nxt = ('P', 2)
else :
nxt = ('P', 1)
elif ch == 'C' :
# various germanic
if (pos > first and st[pos-2] in vowels and st[pos-1:pos+1] == 'ACH' and \
(st[pos+2] not in ['I', 'E'] or st[pos-2:pos+4] in ['BACHER', 'MACHER'])) :
nxt = ('K', 2)
# special case 'CAESAR'
elif pos == first and st[first:first+6] == 'CAESAR' :
nxt = ('S', 2)
elif st[pos:pos+4] == 'CHIA' : #italian 'chianti'
nxt = ('K', 2)
elif st[pos:pos+2] == 'CH' :
# find 'michael'
if pos > first and st[pos:pos+4] == 'CHAE' :
nxt = ('K', 'X', 2)
elif pos == first and (st[pos+1:pos+6] in ['HARAC', 'HARIS'] or \
st[pos+1:pos+4] in ["HOR", "HYM", "HIA", "HEM"]) and st[first:first+5] != 'CHORE' :
nxt = ('K', 2)
#germanic, greek, or otherwise 'ch' for 'kh' sound
elif st[first:first+4] in ['VAN ', 'VON '] or st[first:first+3] == 'SCH' \
or st[pos-2:pos+4] in ["ORCHES", "ARCHIT", "ORCHID"] \
or st[pos+2] in ['T', 'S'] \
or ((st[pos-1] in ["A", "O", "U", "E"] or pos == first) \
and st[pos+2] in ["L", "R", "N", "M", "B", "H", "F", "V", "W"]) :
nxt = ('K', 1)
else :
if pos == first :
if st[first:first+2] == 'MC' :
nxt = ('K', 2)
else :
nxt = ('X', 'K', 2)
else :
nxt = ('X', 2)
#e.g, 'czerny'
elif st[pos:pos+2] == 'CZ' and st[pos-2:pos+2] != 'WICZ' :
nxt = ('S', 'X', 2)
#e.g., 'focaccia'
elif st[pos+1:pos+4] == 'CIA' :
nxt = ('X', 3)
#double 'C', but not if e.g. 'McClellan'
elif st[pos:pos+2] == 'CC' and not (pos == (first +1) and st[first] == 'M') :
#'bellocchio' but not 'bacchus'
if st[pos+2] in ["I", "E", "H"] and st[pos+2:pos+4] != 'HU' :
#'accident', 'accede' 'succeed'
if (pos == (first +1) and st[first] == 'A') or \
st[pos-1:pos+4] in ['UCCEE', 'UCCES'] :
nxt = ('KS', 3)
#'bacci', 'bertucci', other italian
else:
nxt = ('X', 3)
else :
nxt = ('K', 2)
elif st[pos:pos+2] in ["CK", "CG", "CQ"] :
nxt = ('K', 'K', 2)
elif st[pos:pos+2] in ["CI", "CE", "CY"] :
#italian vs. english
if st[pos:pos+3] in ["CIO", "CIE", "CIA"] :
nxt = ('S', 'X', 2)
else :
nxt = ('S', 2)
else :
#name sent in 'mac caffrey', 'mac gregor
if st[pos+1:pos+3] in [" C", " Q", " G"] :
nxt = ('K', 3)
else :
if st[pos+1] in ["C", "K", "Q"] and st[pos+1:pos+3] not in ["CE", "CI"] :
nxt = ('K', 2)
else : # default for 'C'
nxt = ('K', 1)
elif ch == u'Ç' : # will never get here with st.encode('ascii', 'replace') above
nxt = ('S', 1)
elif ch == 'D' :
if st[pos:pos+2] == 'DG' :
if st[pos+2] in ['I', 'E', 'Y'] : #e.g. 'edge'
nxt = ('J', 3)
else :
nxt = ('TK', 2)
elif st[pos:pos+2] in ['DT', 'DD'] :
nxt = ('T', 2)
else :
nxt = ('T', 1)
elif ch == 'F' :
if st[pos+1] == 'F' :
nxt = ('F', 2)
else :
nxt = ('F', 1)
elif ch == 'G' :
if st[pos+1] == 'H' :
if pos > first and st[pos-1] not in vowels :
nxt = ('K', 2)
elif pos < (first + 3) :
if pos == first : #'ghislane', ghiradelli
if st[pos+2] == 'I' :
nxt = ('J', 2)
else :
nxt = ('K', 2)
#Parker's rule (with some further refinements) - e.g., 'hugh'
elif (pos > (first + 1) and st[pos-2] in ['B', 'H', 'D'] ) \
or (pos > (first + 2) and st[pos-3] in ['B', 'H', 'D'] ) \
or (pos > (first + 3) and st[pos-3] in ['B', 'H'] ) :
nxt = (None, 2)
else :
# e.g., 'laugh', 'McLaughlin', 'cough', 'gough', 'rough', 'tough'
if pos > (first + 2) and st[pos-1] == 'U' \
and st[pos-3] in ["C", "G", "L", "R", "T"] :
nxt = ('F', 2)
else :
if pos > first and st[pos-1] != 'I' :
nxt = ('K', 2)
elif st[pos+1] == 'N' :
if pos == (first +1) and st[first] in vowels and not is_slavo_germanic :
nxt = ('KN', 'N', 2)
else :
# not e.g. 'cagney'
if st[pos+2:pos+4] != 'EY' and st[pos+1] != 'Y' and not is_slavo_germanic :
nxt = ('N', 'KN', 2)
else :
nxt = ('KN', 2)
# 'tagliaro'
elif st[pos+1:pos+3] == 'LI' and not is_slavo_germanic :
nxt = ('KL', 'L', 2)
# -ges-,-gep-,-gel-, -gie- at beginning
elif pos == first and (st[pos+1] == 'Y' \
or st[pos+1:pos+3] in ["ES", "EP", "EB", "EL", "EY", "IB", "IL", "IN", "IE", "EI", "ER"]) :
nxt = ('K', 'J', 2)
# -ger-, -gy-
elif (st[pos+1:pos+2] == 'ER' or st[pos+1] == 'Y') \
and st[first:first+6] not in ["DANGER", "RANGER", "MANGER"] \
and st[pos-1] not in ['E', 'I'] and st[pos-1:pos+2] not in ['RGY', 'OGY'] :
nxt = ('K', 'J', 2)
# italian e.g, 'biaggi'
elif st[pos+1] in ['E', 'I', 'Y'] or st[pos-1:pos+3] in ["AGGI", "OGGI"] :
# obvious germanic
if st[first:first+4] in ['VON ', 'VAN '] or st[first:first+3] == 'SCH' \
or st[pos+1:pos+3] == 'ET' :
nxt = ('K', 2)
else :
# always soft if french ending
if st[pos+1:pos+5] == 'IER ' :
nxt = ('J', 2)
else :
nxt = ('J', 'K', 2)
elif st[pos+1] == 'G' :
nxt = ('K', 2)
else :
nxt = ('K', 1)
elif ch == 'H' :
# only keep if first & before vowel or btw. 2 vowels
if (pos == first or st[pos-1] in vowels) and st[pos+1] in vowels :
nxt = ('H', 2)
else : # (also takes care of 'HH')
nxt = (None, 1)
elif ch == 'J' :
# obvious spanish, 'jose', 'san jacinto'
if st[pos:pos+4] == 'JOSE' or st[first:first+4] == 'SAN ' :
if (pos == first and st[pos+4] == ' ') or st[first:first+4] == 'SAN ' :
nxt = ('H',)
else :
nxt = ('J', 'H')
elif pos == first and st[pos:pos+4] != 'JOSE' :
nxt = ('J', 'A') # Yankelovich/Jankelowicz
else :
# spanish pron. of e.g. 'bajador'
if st[pos-1] in vowels and not is_slavo_germanic \
and st[pos+1] in ['A', 'O'] :
nxt = ('J', 'H')
else :
if pos == last :
nxt = ('J', ' ')
else :
if st[pos+1] not in ["L", "T", "K", "S", "N", "M", "B", "Z"] \
and st[pos-1] not in ["S", "K", "L"] :
nxt = ('J',)
else :
nxt = (None, )
if st[pos+1] == 'J' :
nxt = nxt + (2,)
else :
nxt = nxt + (1,)
elif ch == 'K' :
if st[pos+1] == 'K' :
nxt = ('K', 2)
else :
nxt = ('K', 1)
elif ch == 'L' :
if st[pos+1] == 'L' :
# spanish e.g. 'cabrillo', 'gallegos'
if (pos == (last - 2) and st[pos-1:pos+3] in ["ILLO", "ILLA", "ALLE"]) \
or (st[last-1:last+1] in ["AS", "OS"] or st[last] in ["A", "O"] \
and st[pos-1:pos+3] == 'ALLE') :
nxt = ('L', ' ', 2)
else :
nxt = ('L', 2)
else :
nxt = ('L', 1)
elif ch == 'M' :
if st[pos+1:pos+4] == 'UMB' \
and (pos + 1 == last or st[pos+2:pos+4] == 'ER') \
or st[pos+1] == 'M' :
nxt = ('M', 2)
else :
nxt = ('M', 1)
elif ch == 'N' :
if st[pos+1] == 'N' :
nxt = ('N', 2)
else :
nxt = ('N', 1)
elif ch == u'Ñ' :
nxt = ('N', 1)
elif ch == 'P' :
if st[pos+1] == 'H' :
nxt = ('F', 2)
elif st[pos+1] in ['P', 'B'] : # also account for "campbell", "raspberry"
nxt = ('P', 2)
else :
nxt = ('P', 1)
elif ch == 'Q' :
if st[pos+1] == 'Q' :
nxt = ('K', 2)
else :
nxt = ('K', 1)
elif ch == 'R' :
# french e.g. 'rogier', but exclude 'hochmeier'
if pos == last and not is_slavo_germanic \
and st[pos-2:pos] == 'IE' and st[pos-4:pos-2] not in ['ME', 'MA'] :
nxt = ('', 'R')
else :
nxt = ('R',)
if st[pos+1] == 'R' :
nxt = nxt + (2,)
else :
nxt = nxt + (1,)
elif ch == 'S' :
# special cases 'island', 'isle', 'carlisle', 'carlysle'
if st[pos-1:pos+2] in ['ISL', 'YSL'] :
nxt = (None, 1)
# special case 'sugar-'
elif pos == first and st[first:first+5] == 'SUGAR' :
nxt =('X', 'S', 1)
elif st[pos:pos+2] == 'SH' :
# germanic
if st[pos+1:pos+5] in ["HEIM", "HOEK", "HOLM", "HOLZ"] :
nxt = ('S', 2)
else :
nxt = ('X', 2)
# italian & armenian
elif st[pos:pos+3] in ["SIO", "SIA"] or st[pos:pos+4] == 'SIAN' :
if not is_slavo_germanic :
nxt = ('S', 'X', 3)
else :
nxt = ('S', 3)
# german & anglicisations, e.g. 'smith' match 'schmidt', 'snider' match 'schneider'
# also, -sz- in slavic language altho in hungarian it is pronounced 's'
elif (pos == first and st[pos+1] in ["M", "N", "L", "W"]) or st[pos+1] == 'Z' :
nxt = ('S', 'X')
if st[pos+1] == 'Z' :
nxt = nxt + (2,)
else :
nxt = nxt + (1,)
elif st[pos+2:pos+4] == 'SC' :
# Schlesinger's rule
if st[pos+2] == 'H' :
# dutch origin, e.g. 'school', 'schooner'
if st[pos+3:pos+5] in ["OO", "ER", "EN", "UY", "ED", "EM"] :
# 'schermerhorn', 'schenker'
if st[pos+3:pos+5] in ['ER', 'EN'] :
nxt = ('X', 'SK', 3)
else :
nxt = ('SK', 3)
else :
if pos == first and st[first+3] not in vowels and st[first+3] != 'W' :
nxt = ('X', 'S', 3)
else :
nxt = ('X', 3)
elif st[pos+2] in ['I', 'E', 'Y'] :
nxt = ('S', 3)
else :
nxt = ('SK', 3)
# french e.g. 'resnais', 'artois'
elif pos == last and st[pos-2:pos] in ['AI', 'OI'] :
nxt = ('', 'S', 1)
else :
nxt = ('S',)
if st[pos+1] in ['S', 'Z'] :
nxt = nxt + (2,)
else :
nxt = nxt + (1,)
elif ch == 'T' :
if st[pos:pos+4] == 'TION' :
nxt = ('X', 3)
elif st[pos:pos+3] in ['TIA', 'TCH'] :
nxt = ('X', 3)
elif st[pos:pos+2] == 'TH' or st[pos:pos+3] == 'TTH' :
# special case 'thomas', 'thames' or germanic
if st[pos+2:pos+4] in ['OM', 'AM'] or st[first:first+4] in ['VON ', 'VAN '] \
or st[first:first+3] == 'SCH' :
nxt = ('T', 2)
else :
nxt = ('0', 'T', 2)
elif st[pos+1] in ['T', 'D'] :
nxt = ('T', 2)
else :
nxt = ('T', 1)
elif ch == 'V' :
if st[pos+1] == 'V' :
nxt = ('F', 2)
else :
nxt = ('F', 1)
elif ch == 'W' :
# can also be in middle of word
if st[pos:pos+2] == 'WR' :
nxt = ('R', 2)
elif pos == first and st[pos+1] in vowels or st[pos:pos+2] == 'WH' :
# Wasserman should match Vasserman
if st[pos+1] in vowels :
nxt = ('A', 'F', 1)
else :
nxt = ('A', 1)
# Arnow should match Arnoff
elif (pos == last and st[pos-1] in vowels) \
or st[pos-1:pos+5] in ["EWSKI", "EWSKY", "OWSKI", "OWSKY"] \
or st[first:first+3] == 'SCH' :
nxt = ('', 'F', 1)
# polish e.g. 'filipowicz'
elif st[pos:pos+4] in ["WICZ", "WITZ"] :
nxt = ('TS', 'FX', 4)
else : # default is to skip it
nxt = (None, 1)
elif ch == 'X' :
# french e.g. breaux
nxt = (None,)
if not(pos == last and (st[pos-3:pos] in ["IAU", "EAU"] \
or st[pos-2:pos] in ['AU', 'OU'])):
nxt = ('KS',)
if st[pos+1] in ['C', 'X'] :
nxt = nxt + (2,)
else :
nxt = nxt + (1,)
elif ch == 'Z' :
# chinese pinyin e.g. 'zhao'
if st[pos+1] == 'H' :
nxt = ('J',)
elif st[pos+1:pos+3] in ["ZO", "ZI", "ZA"] \
or (is_slavo_germanic and pos > first and st[pos-1] != 'T') :
nxt = ('S', 'TS')
else :
nxt = ('S',)
if st[pos+1] == 'Z' :
nxt = nxt + (2,)
else :
nxt = nxt + (1,)
# ----------------------------------
# --- end checking letters------
# ----------------------------------
#print str(nxt)
if len(nxt) == 2 :
if nxt[0] :
pri += nxt[0]
sec += nxt[0]
pos += nxt[1]
elif len(nxt) == 3 :
if nxt[0] :
pri += nxt[0]
if nxt[1] :
sec += nxt[1]
pos += nxt[2]
if pri == sec :
return (pri, None)
else :
return (pri, sec)
if __name__ == '__main__' :
names = {'maurice':'MRS','aubrey':'APR','cambrillo':'KMPR','heidi':'HT','katherine':'K0RN,KTRN',\
'catherine':'K0RN,KTRN','richard':'RXRT,RKRT','bob':'PP','eric':'ARK','geoff':'JF,KF',\
'dave':'TF','ray':'R','steven':'STFN','bryce':'PRS','randy':'RNT','bryan':'PRN',\
'brian':'PRN','otto':'AT','auto':'AT', 'maisey':'MS, None', 'zhang':'JNK, None', 'solilijs':'SLLS, None'}
for name in names.keys() :
print name + '\t-->\t' + str(dm(name)) + '\t(' +names[name] + ')'
|
fpy171/django | refs/heads/master | django/contrib/gis/db/backends/spatialite/adapter.py | 586 | from django.contrib.gis.db.backends.base.adapter import WKTAdapter
from django.db.backends.sqlite3.base import Database
class SpatiaLiteAdapter(WKTAdapter):
"SQLite adaptor for geometry objects."
def __conform__(self, protocol):
if protocol is Database.PrepareProtocol:
return str(self)
|
ErikAndren/linux | refs/heads/master | scripts/gdb/vmlinux-gdb.py | 367 | #
# gdb helper commands and functions for Linux kernel debugging
#
# loader module
#
# Copyright (c) Siemens AG, 2012, 2013
#
# Authors:
# Jan Kiszka <jan.kiszka@siemens.com>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import os
sys.path.insert(0, os.path.dirname(__file__) + "/scripts/gdb")
try:
gdb.parse_and_eval("0")
gdb.execute("", to_string=True)
except:
gdb.write("NOTE: gdb 7.2 or later required for Linux helper scripts to "
"work.\n")
else:
import linux.utils
import linux.symbols
import linux.modules
import linux.dmesg
import linux.tasks
import linux.cpus
|
naglis/mopidy-leftasrain | refs/heads/master | mopidy_leftasrain/backend.py | 2 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import urlparse
from mopidy import backend
from mopidy.exceptions import BackendError
from mopidy.models import Album, Artist, SearchResult, Track
import pykka
from . import logger
from .remote import COVER_URL, LeftAsRain, SONG_URL
def track_from_song_data(data, remote_url=False):
if remote_url:
uri = urlparse.urljoin(SONG_URL, '%s.mp3' % data['url'])
else:
uri = 'leftasrain:track:{artist:s} - {track_name:s}.{id:s}'.format(
**data)
return Track(
name=data['track_name'],
artists=[Artist(name=data['artist'])],
album=Album(name='leftasrain.com',
images=[COVER_URL.format(**data)]),
comment=data['comment'].replace('\n', ''),
date=data['date'],
track_no=int(data['id']),
last_modified=data['last_modified'],
uri=uri
)
class LeftAsRainBackend(pykka.ThreadingActor, backend.Backend):
uri_schemes = ['leftasrain']
def __init__(self, config, audio):
super(LeftAsRainBackend, self).__init__()
self.audio = audio
self.config = config
self.leftasrain = LeftAsRain(config['leftasrain']['timeout'],
config['leftasrain']['db_filename'])
try:
self.leftasrain.create_cache_dir()
except OSError as e:
raise BackendError(e)
self.playback = LeftAsRainPlaybackProvider(audio=audio, backend=self)
self.library = LeftAsRainLibraryProvider(backend=self)
class LeftAsRainPlaybackProvider(backend.PlaybackProvider):
def translate_uri(self, uri):
id_ = uri.split('.')[-1]
song_data = self.backend.leftasrain.song_from_id(id_)
if song_data:
track = track_from_song_data(song_data, remote_url=True)
return track.uri
class LeftAsRainLibraryProvider(backend.LibraryProvider):
def __init__(self, backend):
super(LeftAsRainLibraryProvider, self).__init__(backend)
self.backend.leftasrain.load_db()
def _filter(self, types, queries, track):
return any([
q.lower() in track.get(t, '').lower()
for q in queries for t in types
])
def lookup(self, uri):
if urlparse.urlsplit(uri).scheme not in self.backend.uri_schemes:
return []
result = []
song_from_id = self.backend.leftasrain.song_from_id
if uri == 'leftasrain:all':
logger.info('Looking up all leftasrain tracks')
result = [
track_from_song_data(s, remote_url=True)
for s in self.backend.leftasrain.songs]
elif uri.startswith('leftasrain:last:'):
try:
total = self.backend.leftasrain.total
n = max([1, total - int(uri.rpartition(':')[2])])
result = [
track_from_song_data(song_from_id(id_), remote_url=True)
for id_ in xrange(n, total)]
except ValueError as e:
logger.exception(e)
result = []
else:
try:
self.backend.leftasrain.validate_lookup_uri(uri)
id_ = uri.split('.')[-1]
logger.info('Looking up leftasrain track with ID: %s', id_)
result = [
track_from_song_data(song_from_id(id_), remote_url=True)
]
except ValueError as e:
logger.exception(e)
result = []
self.backend.leftasrain.maybe_save()
return result
def search(self, query=None, uris=None, exact=False):
# TODO Support exact search
filters = []
def make_filter(types, queries):
def f(t):
return self._filter(types, queries, t)
return f
def make_or_filter(filters):
def f(t):
return any([f_(t) for f_ in filters])
return f
if query:
if 'any' in query:
filters.append(make_filter(['artist', 'album', 'track_name'],
query.get('any', [])))
if 'artist' in query:
filters.append(make_filter(['artist'],
query.get('artist', [])))
if 'album' in query:
filters.append(make_filter(['album'], query.get('album', [])))
if 'track_name' in query:
filters.append(make_filter(['track_name'],
query.get('track_name', [])))
# build one filter from a list of filters
f = make_or_filter(filters)
return SearchResult(
uri='leftasrain:search',
tracks=map(track_from_song_data,
filter(f, self.backend.leftasrain.songs))
)
|
jonathonwalz/ansible | refs/heads/devel | test/runner/lib/ansible_util.py | 70 | """Miscellaneous utility functions and classes specific to ansible cli tools."""
from __future__ import absolute_import, print_function
import os
from lib.util import common_environment
def ansible_environment(args, color=True):
"""
:type args: CommonConfig
:type color: bool
:rtype: dict[str, str]
"""
env = common_environment()
path = env['PATH']
ansible_path = os.path.join(os.getcwd(), 'bin')
if not path.startswith(ansible_path + os.pathsep):
path = ansible_path + os.pathsep + path
ansible = dict(
ANSIBLE_FORCE_COLOR='%s' % 'true' if args.color and color else 'false',
ANSIBLE_DEPRECATION_WARNINGS='false',
ANSIBLE_CONFIG='/dev/null',
ANSIBLE_HOST_KEY_CHECKING='false',
PYTHONPATH=os.path.abspath('lib'),
PAGER='/bin/cat',
PATH=path,
)
env.update(ansible)
if args.debug:
env.update(dict(ANSIBLE_DEBUG='true'))
return env
|
tedsunnyday/Tweet2DowJones | refs/heads/master | examples/search_tweets.py | 1 | from TwitterAPI import TwitterAPI
SEARCH_TERM = 'pizza'
CONSUMER_KEY = ''
CONSUMER_SECRET = ''
ACCESS_TOKEN_KEY = ''
ACCESS_TOKEN_SECRET = ''
api = TwitterAPI(
CONSUMER_KEY,
CONSUMER_SECRET,
ACCESS_TOKEN_KEY,
ACCESS_TOKEN_SECRET)
r = api.request('search/tweets', {'q': SEARCH_TERM})
for item in r:
print(item['text'] if 'text' in item else item)
print('\nQUOTA: %s' % r.get_rest_quota())
|
ejones/mythril | refs/heads/master | mythril/css.py | 1 | """
Python values representing CSS rules, allowing you to create (nestable)
CSS rules in Python. Here is an example::
from mythril.css import css
css( MyClass, border='1px solid #CCC', padding=5, size=( 200, 50 ) )[
css( ('some-class', SomeUtility), pos=( 1, 0, 'relative' ) ) ]
Corresponds to something equivalent to::
my-class { border: 1px solid #CCC; padding: 5px; width: 200px; height: 50px; }
my-class some-class, my-class some-utility {
position: relative; top: 1px; left: 0px; }
The functions `dumps` and `dump` in this module are responsible for
writing values to CSS. It is recommended that you separately write your CSS
into a static file (i.e, at app startup), rather than regenerating it on every
request. Nonetheless, anything is doable.
`special` is a dict that contains the definitions for, and allows you to
extend, special property names like "pos", "size". See `CssType` for
documentation.
"""
import sys
import Image
from cStringIO import StringIO
from functools import partial
from itertools import izip, imap
from numbers import Number
from uuid import uuid4
from operator import add
import os
from collections import Iterable
import re
import codecs
import mythril.html
from mythril import resources
from mythril.html import Element, Attribute as HtmlAttribute, cssid
from mythril.util import customtuple
class CssWriter( object ):
""" Writes arbitrary Python values to CSS. For advanced use. See
`dumps` or `dump` in this module for a simple way of converting to
the CSS byte representation. Any arbitrary Python type can support/extend
conversion to CSS by implementing a ``__css__`` method that takes the
`CssWriter` instance as a sole argument.
If ``encoding`` is provided, all ``str``s are decoded according to it;
otherwise, the system default is used. ``output_encoding`` specifies the
encoding of the output file. It defaults to "UTF-8" (if it is None, unicode
is written to the file, so it must be something that accepts unicode)
Note: at the moment, input strings are not "validated" because data-driven
CSS is pretty rare. If you plan on using user-submitted css rules/property
values, be sure to validate them ... somehow.
"""
def __init__(self, file, encoding=None, output_encoding='UTF-8'):
self.encoding = encoding or sys.getdefaultencoding()
self.file = file if not output_encoding \
else codecs.getwriter(output_encoding)(file)
self.stack = []
self.selector = ('',)
def write( self, value ):
""" Translates the arbitrary Python ``value`` into CSS. """
self.stack.append( value )
oldselector = self.selector
if hasattr( value, 'selector' ):
childsel = value.selector if isinstance( value.selector, tuple ) \
else (value.selector,)
self.selector = tuple( a + u' ' + b for a in self.selector
for b in childsel)
if hasattr( value, '__css__' ): value.__css__( self )
elif isinstance( value, basestring ):
self.file.write(value if isinstance(value, unicode) else
value.decode(self.encoding))
elif isinstance( value, Number ):
self.file.write(unicode(value) + u'px')
elif isinstance( value, Iterable ):
for item in value: self.write( item )
elif isinstance( value, Callable ): self.write( value() )
else: self.write(unicode(value))
self.selector = oldselector
self.stack.pop()
return self
def dump(value, file, encoding=None, output_encoding='UTF-8'):
""" Writes the CSS byte representation of ``value`` to ``file``.
Any ``str``s are decoded using ``encoding`` if provided, the system default
otherwise. The output to the file is encoded using ``output_encoding``, which
defaults to UTF-8 (if it is None, unicode is written to the file, so it must
be something that accepts unicode).
Note: at the moment, input strings are not "validated" because data-driven
CSS is pretty rare. If you plan on using user-submitted css rules/property
values, be sure to validate them ... somehow.
"""
CssWriter(file, encoding, output_encoding).write(value)
class _DumpsFile:
def __init__(self): self.text = []
def write(self, text): self.text.append(text)
def flush(self): pass
def tell(self): return sum(imap(len, self.text))
def dumps(value, encoding=None):
""" Returns the CSS representation of ``value``. As a unicode string.
See `dump` for details. """
s = _DumpsFile(); dump(value, s, encoding, None)
return u''.join(s.text)
class Attribute( HtmlAttribute ):
__slots__ = ()
def __css__( self, writer ):
writer.write( (self.name, u':') )
if isinstance( self.value, tuple ):
for i, item in enumerate( self.value ):
if i > 0: writer.write( u' ' )
writer.write( item )
else:
writer.write( self.value )
class CssType( customtuple ):
""" Represents the type of `css` and its derived (constructed) values.
Use `special` in this module to add new special properties. By default, a number
of such properties, like "size", "pos", and "background_gradient" exist
which expand to multiple declarations (for example, the aforementioned
expand to "width" and "height", "left" or "right" and "top" or "bottom" and
"position", and multiple "background-image" declarations). To add a new
one, add a function which takes the "arguments" (which is the tuple
assigned to the property in the css declaration), and returns a sequence of
``(property_name, property_value)``, to the ``special`` dict. It must be
added by CSS-name (i.e., "background-gradient", not "background_gradient").
Some special properties statically generating images, eg. gradients and
corners for speed and for downlevel browsers. Any generated files are
hosted with `mythril.resources`, so see that if you plan to use them. Data
URIs are used where the size is small. For IE6/7, these properties always
use images.
"""
# TODO: document all the built-in special properties
def __new__( cls, selector, attrs, children ):
return tuple.__new__( cls, (selector, attrs, children) )
def __repr__( self ):
return ('css(' + repr( self.selector ) +
', '.join( repr( tuple( a ) ) for a in self.attrs ) + ')' +
(('[' + ', '.join( imap( repr, self.children ) ) + ']')
if self.children else ''))
def __call__( self, selector, *attr_pairs, **attrs ):
"""
css( selector, (attr_name, attr_value), ..., attr_name=attr_value, ... )
The selector must be a string, a thing with a ``__name__`` (ie.,
function or class) or a tuple, or a tuple of any of these (including
tuples, recursively).
"""
return type( self )(
selector=cssid( selector ),
attrs=list( special.run(
Attribute.from_args( attr_pairs, attrs ))),
children=self.children )
def __getitem__( self, arg ):
"""
css[ child1, ... childN ]
"""
if not type( arg ) == tuple: children = [arg]
else: children = list(arg)
return type( self )( self.selector, self.attrs, arg )
def __css__( self, writer ):
for i, sel in enumerate(writer.selector):
if i > 0: writer.write( u',' )
writer.write( sel )
writer.write( u'{' )
for attr in self.attrs:
writer.write( attr ).write( u';' )
writer.write( u'}' )
for child in self.children:
writer.write( child )
css = CssType( selector='', attrs=(), children=() )
class _CssSpecials( dict ):
def register( self, name=None ):
""" Decorator. Registers the function as a "special" css property by
the name of the function or ``name`` if it is given. Whenever this
property is used in ``css`` values, the tuple or single value it is
given is passed to the function as argument(s), and the returned
key/value pair(s) are used as the real (expanded) attributes.
Note: remember that ``css`` attribute names undergo "cssification", so the
name given here is 'background_gradient', it will still be activated
if an attribute called "background-gradient" is given. """
def fix_attr_name( name ):
if name.endswith( '_' ): name = name[:-1]
return name.replace( '_', '-' )
def dec( f ): self[ name or fix_attr_name( f.__name__ ) ] = f; return f
# in case this is called without a name and without parens
if name and not isinstance( name, basestring ):
f = name; name = None
return dec( f )
return dec
def run( self, attrs ):
""" Internal utility for expanding special attribute names """
for a in attrs:
if a.name in self:
args = a.value if isinstance( a.value, tuple ) else (a.value,)
for name, value in self[ a.name ]( *args ):
yield Attribute( name, value )
else: yield a
special = _CssSpecials()
def color( r, g, b, a=None ):
""" Mostly internal helper method for formatting color channel values
into strings """
if a:
if not isinstance(a, float): a = a / 255.0
return u'rgba(%s,%s,%s,%.1f)' % (r, g, b, a)
else: return u'#%.2x%.2x%.2x' % (r, g, b)
@special.register
def pos( x, y, rel='' ):
return (
(u'position', u'relative' if 'relative' in rel
else u'fixed' if 'fixed' in rel
else u'absolute'),
(u'right' if 'right' in rel else u'left', x),
(u'bottom' if 'bottom' in rel else u'top', y))
@special.register
def size( w, h ): return ( (u'width', w), (u'height', h) )
# REVIEW: technically, this would overwrite any existing IE filter property...
# this shouldn't be a problem since the rest are pretty useless anyway
@special.register
def opacity( val ):
return ((u'opacity', val),
(u'filter', u'alpha(opacity=%d)' % (val*100)),
(u'zoom', u'1'))
@special.register
def border_radius( radius, desc=None ):
if not desc:
yield (u'border-radius', radius)
yield (u'-webkit-border-radius', radius)
yield (u'-moz-border-radius', radius)
return
for y in (u'top', u'bottom'):
for x in (u'left', u'right'):
if desc == x or desc == y or desc == y + u' ' + x:
yield (u'border-' + y + u'-' + x + u'-radius', radius)
yield (u'-webkit-border-' + y + u'-' + x + u'-radius', radius)
yield (u'-moz-border-radius-' + y + x, radius)
@special.register
def box_shadow( x, y, blur, spread, color, inset='' ):
return ((u'box-shadow', (x, y, blur, spread, color, inset)),
(u'-webkit-box-shadow', (x, y, blur, spread, color, inset)),
(u'-moz-box-shadow', (x, y, blur, spread, color, inset)))
# NB: we don't do DXImageTransform effects like Shadow and DropShadow
# because they fuck with the rendering of the rest of the element
# TODO: support multiple color stops
@special.register
def background_gradient( frm, to, angle=None ):
origin = u'top' if not angle else angle
bgcolor = tuple( (a + b) // 2 for a, b in izip( frm, to ) )
cfrom, cto = color( *frm ), color( *to )
args = u'(' + origin + u',' + cfrom + u',' + cto + u')'
yield (u'background-color', color( *bgcolor ))
# TODO: angle for old webkit-gradient syntax
yield (u'background-image',
u'-webkit-gradient(linear, left top, left bottom, from(' +
cfrom + u'), to(' + cto + u'))')
for prov in (u'-webkit-', u'-moz-', u'-o-', u'-ms-', u''):
yield (u'background-image', prov + u'linear-gradient' + args)
# TODO: horizontal?
# TODO: we will eventually need a general utility for specials that generate images
@special.register
def static_background_gradient( frm, to, height ):
cpairs = zip( frm, to )
im = Image.new( 'RGBA' if len( frm ) > 3 else 'RGB', (1, height) )
im.putdata( list( tuple( a + (b - a) * i // (height - 1) for a, b in cpairs )
for i in xrange( height ) ))
sio = StringIO(); im.save( sio, 'PNG' )
data = sio.getvalue()
def fescape_tuple(t): return re.sub('[(, )]', '_', repr(t))
# generate a filename that can be cached
fname = u'static-background-gradient-%s-%s-%d.png' % (
fescape_tuple(frm), fescape_tuple(to), height)
resources.put_string(fname, data)
return ((u'background', u'url(data:image/png;base64,' +
data.encode( 'base64' ) + u') 0 0 repeat-x'),
(u'*background', u'url(' +
resources.get_url(fname).decode('ascii') + u') 0 0 repeat-x'))
|
kotfic/girder | refs/heads/master | girder/cli/build.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import json
import os
from pkg_resources import resource_filename
from subprocess import check_call
import shutil
import sys
import click
import six
from girder.constants import STATIC_ROOT_DIR
from girder.plugin import allPlugins, getPlugin
# monkey patch shutil for python < 3
if not six.PY3:
import shutilwhich # noqa
_GIRDER_BUILD_ASSETS_PATH = resource_filename('girder', 'web_client')
@click.command(name='build', help='Build web client static assets.')
@click.option('--dev/--no-dev', default=False,
help='Build girder client for development.')
@click.option('--watch', default=False, is_flag=True,
help='Build girder library bundle in watch mode (implies --dev --no-reinstall).')
@click.option('--watch-plugin',
help='Build a girder plugin bundle in watch mode (implies --dev --no-reinstall).')
@click.option('--npm', default=os.getenv('NPM_EXE', 'npm'),
help='Full path to the npm executable to use.')
@click.option('--reinstall/--no-reinstall', default=True,
help='Force regenerate node_modules.')
def main(dev, watch, watch_plugin, npm, reinstall):
if shutil.which(npm) is None:
raise click.UsageError(
'No npm executable was detected. Please ensure the npm executable is in your '
'path, use the --npm flag, or set the "NPM_EXE" environment variable.'
)
if watch and watch_plugin:
raise click.UsageError('--watch and --watch-plugins cannot be used together')
if watch or watch_plugin:
dev = True
reinstall = False
staging = _GIRDER_BUILD_ASSETS_PATH
_generatePackageJSON(staging, os.path.join(_GIRDER_BUILD_ASSETS_PATH, 'package.json.template'))
if not os.path.isdir(os.path.join(staging, 'node_modules')) or reinstall:
# The autogeneration of package.json breaks how package-lock.json is
# intended to work. If we don't delete it first, you will frequently
# get "file doesn't exist" errors.
npmLockFile = os.path.join(staging, 'package-lock.json')
if os.path.exists(npmLockFile):
os.unlink(npmLockFile)
installCommand = [npm, 'install']
if not dev:
installCommand.append('--production')
check_call(installCommand, cwd=staging)
quiet = '--no-progress=false' if sys.stdout.isatty() else '--no-progress=true'
buildCommand = [
npm, 'run', 'build', '--', '--static-path=%s' % STATIC_ROOT_DIR, quiet]
if watch:
buildCommand.append('--watch')
if watch_plugin:
buildCommand.extend([
'--watch',
'webpack:plugin_%s' % watch_plugin
])
if dev:
buildCommand.append('--env=dev')
else:
buildCommand.append('--env=prod')
check_call(buildCommand, cwd=staging)
def _collectPluginDependencies():
packages = {}
for pluginName in allPlugins():
plugin = getPlugin(pluginName)
packages.update(plugin.npmPackages())
return packages
def _generatePackageJSON(staging, source):
with open(source, 'r') as f:
sourceJSON = json.load(f)
deps = sourceJSON['dependencies']
plugins = _collectPluginDependencies()
deps.update(plugins)
sourceJSON['girder'] = {
'plugins': list(plugins.keys())
}
with open(os.path.join(staging, 'package.json'), 'w') as f:
json.dump(sourceJSON, f)
|
vmanoria/bluemix-hue-filebrowser | refs/heads/master | hue-3.8.1-bluemix/desktop/core/ext-py/pysqlite/doc/includes/sqlite3/insert_more_people.py | 49 | from pysqlite2 import dbapi2 as sqlite3
con = sqlite3.connect("mydb")
cur = con.cursor()
newPeople = (
('Lebed' , 53),
('Zhirinovsky' , 57),
)
for person in newPeople:
cur.execute("insert into people (name_last, age) values (?, ?)", person)
# The changes will not be saved unless the transaction is committed explicitly:
con.commit()
|
maurizi/otm-core | refs/heads/develop | opentreemap/treemap/tests/test_udfs.py | 3 | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import json
from random import shuffle
from datetime import datetime
import psycopg2
from django.db import connection
from django.db.models import Q
from django.core.exceptions import ValidationError
from django.contrib.gis.geos import Point, Polygon
from treemap.tests import (make_instance, make_commander_user,
make_officer_user,
set_write_permissions)
from treemap.lib.object_caches import role_field_permissions
from treemap.lib.udf import udf_create
from treemap.instance import create_stewardship_udfs
from treemap.udf import UserDefinedFieldDefinition, UDFDictionary
from treemap.models import Instance, Plot, User
from treemap.audit import AuthorizeException, FieldPermission, Role
from treemap.tests.base import OTMTestCase
def make_collection_udf(instance, name='Stewardship', model='Plot',
datatype=None):
# Need to setup the hstore extension to make UDFs
psycopg2.extras.register_hstore(connection.cursor(), globally=True)
if datatype is None:
datatype = [
{'type': 'choice',
'choices': ['water', 'prune'],
'name': 'action'},
{'type': 'int',
'name': 'height'}]
return UserDefinedFieldDefinition.objects.create(
instance=instance,
model_type=model,
datatype=json.dumps(datatype),
iscollection=True,
name=name)
class UDFDictionaryTestCase(OTMTestCase):
def setUp(self):
self.p = Point(0, 0)
self.instance = make_instance(point=self.p)
UserDefinedFieldDefinition.objects.create(
instance=self.instance,
model_type='Plot',
datatype=json.dumps({'type': 'choice',
'choices': ['a', 'b', 'c']}),
iscollection=False,
name='Test choice')
self.plot = self.plot = Plot(geom=self.p, instance=self.instance)
self.d = UDFDictionary()
self.d.set_model_instance(self.plot)
def test_set_item_to_none_removes_key(self):
self.d['Test choice'] = 'a'
self.assertEqual(1, len(self.d.keys()))
self.d['Test choice'] = None
self.assertEqual(0, len(self.d.keys()))
def test_setting_nonexistant_key_to_none_is_a_noop(self):
# Should not raise an error
self.d['Test choice'] = None
class ScalarUDFTestCase(OTMTestCase):
def setUp(self):
psycopg2.extras.register_hstore(connection.cursor(), globally=True)
self.p = Point(0, 0)
self.instance = make_instance(point=self.p)
self.commander_user = make_commander_user(self.instance)
set_write_permissions(self.instance, self.commander_user,
'Plot',
['udf:Test choice', 'udf:Test string',
'udf:Test int', 'udf:Test date',
'udf:Test float'])
UserDefinedFieldDefinition.objects.create(
instance=self.instance,
model_type='Plot',
datatype=json.dumps({'type': 'choice',
'choices': ['a', 'b', 'c']}),
iscollection=False,
name='Test choice')
UserDefinedFieldDefinition.objects.create(
instance=self.instance,
model_type='Plot',
datatype=json.dumps({'type': 'string'}),
iscollection=False,
name='Test string')
UserDefinedFieldDefinition.objects.create(
instance=self.instance,
model_type='Plot',
datatype=json.dumps({'type': 'date'}),
iscollection=False,
name='Test date')
UserDefinedFieldDefinition.objects.create(
instance=self.instance,
model_type='Plot',
datatype=json.dumps({'type': 'int'}),
iscollection=False,
name='Test int')
UserDefinedFieldDefinition.objects.create(
instance=self.instance,
model_type='Plot',
datatype=json.dumps({'type': 'float'}),
iscollection=False,
name='Test float')
UserDefinedFieldDefinition.objects.create(
instance=self.instance,
model_type='Plot',
datatype=json.dumps({
'type': 'multichoice',
'choices': [
'a',
'contains a',
'also does']}),
iscollection=False,
name='Test multichoice')
self.plot = Plot(geom=self.p, instance=self.instance)
self.plot.save_with_user(self.commander_user)
class ScalarUDFFilterTest(ScalarUDFTestCase):
def setUp(self):
super(ScalarUDFFilterTest, self).setUp()
def create_and_save_with_choice(c, n=1):
plots = []
for i in xrange(n):
plot = Plot(geom=self.p, instance=self.instance)
plot.udfs['Test choice'] = c
plot.save_with_user(self.commander_user)
plots.append(plot)
return {plot.pk for plot in plots}
self.choice_a = create_and_save_with_choice('a', n=2)
self.choice_b = create_and_save_with_choice('b', n=3)
self.choice_c = create_and_save_with_choice('c', n=7)
def test_filtering_on_string_and_choice_using_count(self):
plots = Plot.objects.filter(**{'udfs__Test choice': 'a'})
self.assertEqual(
len(self.choice_a),
plots.count())
def test_filtering_on_value_works(self):
plots = Plot.objects.filter(**{'udfs__Test choice': 'b'})
self.assertEqual(
self.choice_b,
{plot.pk for plot in plots})
def test_filter_on_multichoice_value_works(self):
plot = Plot(geom=self.p, instance=self.instance)
plot.udfs['Test multichoice'] = ['a']
plot.save_with_user(self.commander_user)
plot = Plot(geom=self.p, instance=self.instance)
plot.udfs['Test multichoice'] = ['contains a']
plot.save_with_user(self.commander_user)
plot = Plot(geom=self.p, instance=self.instance)
plot.udfs['Test multichoice'] = ['also does']
plot.save_with_user(self.commander_user)
# Requires the double quotes in order to not find the other two.
plots_with_a = Plot.objects.filter(
**{'udfs__Test multichoice__contains': '"a"'})
self.assertEqual(plots_with_a.count(), 1)
def test_combine_with_geom(self):
plot_a = Plot.objects.get(pk=self.choice_a.pop())
plot_b = Plot.objects.get(pk=self.choice_b.pop())
p = Point(10, 0)
poly = Polygon(((5, -5), (15, -5), (15, 5), (5, 5), (5, -5)))
plot_a.geom = p
plot_a.save_with_user(self.commander_user)
plot_b.geom = p
plot_b.save_with_user(self.commander_user)
a_in_poly = Plot.objects.filter(**{'udfs__Test choice': 'a'})\
.filter(geom__contained=poly)
self.assertEqual({plot.pk for plot in a_in_poly},
{plot_a.pk, })
b_in_poly = Plot.objects.filter(**{'udfs__Test choice': 'b'})\
.filter(geom__contained=poly)
self.assertEqual({plot.pk for plot in b_in_poly},
{plot_b.pk, })
def test_search_suffixes(self):
plot1 = Plot(geom=self.p, instance=self.instance)
plot1.udfs['Test string'] = 'this is a test'
plot1.save_with_user(self.commander_user)
plot2 = Plot(geom=self.p, instance=self.instance)
plot2.udfs['Test string'] = 'this is aLsO'
plot2.save_with_user(self.commander_user)
def run(sfx, val):
return {plot.pk
for plot
in Plot.objects.filter(
**{'udfs__Test string' + sfx: val})}
self.assertEqual(set(), run('', 'also'))
self.assertEqual({plot1.pk, plot2.pk},
run('__contains', 'this is a'))
self.assertEqual({plot2.pk}, run('__icontains', 'this is al'))
def _setup_dates(self):
def create_plot_with_date(adate):
plot = Plot(geom=self.p, instance=self.instance)
plot.udfs['Test date'] = adate
plot.save_with_user(self.commander_user)
return plot
dates = [
(2010, 3, 4),
(2010, 3, 5),
(2010, 4, 4),
(2010, 5, 5),
(2012, 3, 4),
(2012, 3, 5),
(2012, 4, 4),
(2012, 5, 5),
(2013, 3, 4)]
dates = [datetime(*adate) for adate in dates]
# Get dates out of standard order
shuffle(dates, lambda: 0.5)
for adate in dates:
create_plot_with_date(adate)
return dates
def test_has_key(self):
dates = self._setup_dates()
plots = Plot.objects.filter(**{'udfs__has_key': 'Test date'})
self.assertEqual(len(plots), len(dates))
def test_integer_gt_and_lte_constraints(self):
'''
The straightforward test
plots = Plot.objects.filter(**{'udfs__Test int__gt': 20,
'udfs__Test int__lte': 50})
fails because it does a lexical comparison, not numerical.
In order to get it to do an integer comparison,
it is necessary to add a Transform to cast both the
LHS and RHS of the comparison to `int`.
So...
udfs__Test int__gt becomes
udfs__Test int__int__gt, where
^ this __int is the casting Transform.
'''
def create_plot_with_num(anint):
plot = Plot(geom=self.p, instance=self.instance)
plot.udfs['Test int'] = anint
plot.save_with_user(self.commander_user)
return plot
# in range
create_plot_with_num(21)
create_plot_with_num(50)
# out of range numerically, but in range lexically
create_plot_with_num(3)
create_plot_with_num(300)
# out of range either way
create_plot_with_num(2)
create_plot_with_num(20)
plots = Plot.objects.filter(**{'udfs__Test int__int__gt': 20,
'udfs__Test int__int__lte': 50})
self.assertEqual(len(plots), 2)
def test_float_gt_and_lte_constraints(self):
'''
The straightforward test
plots = Plot.objects.filter(**{'udfs__Test float__gt': 20.5,
'udfs__Test float__lte': 50.0})
fails because it does a lexical comparison, not numerical.
In order to get it to do a float comparison,
it is necessary to add a Transform to cast both the
LHS and RHS of the comparison to `float`.
So...
udfs__Test float__gt becomes
udfs__Test float__float__gt, where
^ this __float is the casting Transform.
'''
def create_plot_with_num(afloat):
plot = Plot(geom=self.p, instance=self.instance)
plot.udfs['Test float'] = afloat
plot.save_with_user(self.commander_user)
return plot
# in range
create_plot_with_num(20.6)
create_plot_with_num(50.0)
# out of range numerically, but in range lexically
create_plot_with_num(3.1)
create_plot_with_num(300.1)
# out of range either way
create_plot_with_num(2.5)
create_plot_with_num(20.5)
plots = Plot.objects.filter(
**{'udfs__Test float__float__gt': 20.5,
'udfs__Test float__float__lte': 50.0})
self.assertEqual(len(plots), 2)
def test_using_q_objects(self):
qb = Q(**{'udfs__Test choice': 'b'})
qc = Q(**{'udfs__Test choice': 'c'})
q = qb | qc
plots = Plot.objects.filter(q)
self.assertEqual(
self.choice_b | self.choice_c,
{plot.pk for plot in plots})
class UDFAuditTest(OTMTestCase):
def setUp(self):
self.p = Point(-8515941.0, 4953519.0)
self.instance = make_instance(point=self.p)
self.commander_user = make_commander_user(self.instance)
set_write_permissions(self.instance, self.commander_user,
'Plot', ['udf:Test choice'])
UserDefinedFieldDefinition.objects.create(
instance=self.instance,
model_type='Plot',
datatype=json.dumps({'type': 'choice',
'choices': ['a', 'b', 'c']}),
iscollection=False,
name='Test choice')
UserDefinedFieldDefinition.objects.create(
instance=self.instance,
model_type='Plot',
datatype=json.dumps({'type': 'string'}),
iscollection=False,
name='Test unauth')
UserDefinedFieldDefinition.objects.create(
instance=self.instance,
model_type='Plot',
datatype=json.dumps([{'type': 'choice',
'name': 'a choice',
'choices': ['a', 'b', 'c']},
{'type': 'string',
'name': 'a string'}]),
iscollection=True,
name='Test collection')
self.plot = Plot(geom=self.p, instance=self.instance)
self.plot.save_with_user(self.commander_user)
psycopg2.extras.register_hstore(connection.cursor(), globally=True)
def test_mask_unauthorized_with_udfs(self):
officer_user = make_officer_user(self.instance)
self.plot.udfs['Test choice'] = 'b'
self.plot.save_with_user(self.commander_user)
self.plot.udfs['Test unauth'] = 'foo'
self.plot.save_base()
newplot = Plot.objects.get(pk=self.plot.pk)
self.assertEqual(newplot.udfs['Test choice'], 'b')
self.assertEqual(newplot.udfs['Test unauth'], 'foo')
newplot = Plot.objects.get(pk=self.plot.pk)
newplot.mask_unauthorized_fields(self.commander_user)
self.assertEqual(newplot.udfs['Test choice'], 'b')
self.assertEqual(newplot.udfs['Test unauth'], None)
newplot = Plot.objects.get(pk=self.plot.pk)
newplot.mask_unauthorized_fields(officer_user)
self.assertEqual(newplot.udfs['Test choice'], None)
self.assertEqual(newplot.udfs['Test unauth'], None)
def test_update_field_creates_audit(self):
self.plot.udfs['Test choice'] = 'b'
self.plot.save_with_user(self.commander_user)
last_audit = list(self.plot.audits())[-1]
self.assertEqual(last_audit.model, 'Plot')
self.assertEqual(last_audit.model_id, self.plot.pk)
self.assertEqual(last_audit.field, 'udf:Test choice')
self.assertEqual(last_audit.previous_value, None)
self.assertEqual(last_audit.current_value, 'b')
self.plot.udfs['Test choice'] = 'c'
self.plot.save_with_user(self.commander_user)
last_audit = list(self.plot.audits())[-1]
self.assertEqual(last_audit.model, 'Plot')
self.assertEqual(last_audit.model_id, self.plot.pk)
self.assertEqual(last_audit.field, 'udf:Test choice')
self.assertEqual(last_audit.previous_value, 'b')
self.assertEqual(last_audit.current_value, 'c')
def test_cant_edit_unauthorized_collection(self):
self.plot.udfs['Test collection'] = [
{'a choice': 'a', 'a string': 's'}]
self.assertRaises(AuthorizeException,
self.plot.save_with_user, self.commander_user)
def test_cant_edit_unauthorized_field(self):
self.plot.udfs['Test unauth'] = 'c'
self.assertRaises(AuthorizeException,
self.plot.save_with_user, self.commander_user)
def test_create_invalid_pending_collection(self):
pending = self.plot.audits().filter(requires_auth=True)
self.assertEqual(len(pending), 0)
role = self.commander_user.get_role(self.instance)
fp, __ = FieldPermission.objects.get_or_create(
model_name='Plot', field_name='udf:Test collection',
permission_level=FieldPermission.WRITE_WITH_AUDIT,
role=role, instance=self.instance)
self.plot.udfs['Test collection'] = [
{'a choice': 'invalid choice', 'a string': 's'}]
self.assertRaises(ValidationError,
self.plot.save_with_user, self.commander_user)
class UDFDefTest(OTMTestCase):
def setUp(self):
self.instance = make_instance()
def _create_and_save_with_datatype(
self, d, model_type='Plot', name='Blah', iscollection=False):
return UserDefinedFieldDefinition.objects.create(
instance=self.instance,
model_type=model_type,
datatype=json.dumps(d),
iscollection=iscollection,
name=name)
def test_cannot_create_datatype_with_invalid_model(self):
self.assertRaises(
ValidationError,
self._create_and_save_with_datatype,
{'type': 'string'},
model_type='InvalidModel')
def test_cannot_create_datatype_with_nonudf(self):
self.assertRaises(
ValidationError,
self._create_and_save_with_datatype,
{'type': 'string'},
model_type='InstanceUser')
def test_cannot_create_duplicate_udfs(self):
self._create_and_save_with_datatype(
{'type': 'string'},
name='random')
self.assertRaises(
ValidationError,
self._create_and_save_with_datatype,
{'type': 'string'},
name='random')
self._create_and_save_with_datatype(
{'type': 'string'},
name='random2')
def test_cannot_create_datatype_with_existing_field(self):
self.assertRaises(
ValidationError,
self._create_and_save_with_datatype,
{'type': 'string'},
name='width')
self.assertRaises(
ValidationError,
self._create_and_save_with_datatype,
{'type': 'string'},
name='id')
self._create_and_save_with_datatype(
{'type': 'string'},
name='random')
def test_must_have_type_key(self):
self.assertRaises(
ValidationError,
self._create_and_save_with_datatype, {})
def test_invalid_type(self):
self.assertRaises(
ValidationError,
self._create_and_save_with_datatype, {'type': 'woohoo'})
self._create_and_save_with_datatype({'type': 'float'})
def test_description_op(self):
self._create_and_save_with_datatype(
{'type': 'float',
'description': 'this is a float field'})
def test_choices_not_missing(self):
self.assertRaises(
ValidationError,
self._create_and_save_with_datatype,
{'type': 'choice'})
self._create_and_save_with_datatype(
{'type': 'choice',
'choices': ['a choice', 'another']})
def test_choices_not_empty(self):
self.assertRaises(
ValidationError,
self._create_and_save_with_datatype,
{'type': 'choice',
'choices': []})
self._create_and_save_with_datatype(
{'type': 'choice',
'choices': ['a choice', 'another']})
def test_cannot_create_choices_with_numeric_values(self):
with self.assertRaises(ValidationError):
self._create_and_save_with_datatype(
{'type': 'choice',
'choices': [0, 1, 3, 4, 5]})
def test_can_create_subfields(self):
self._create_and_save_with_datatype(
[{'type': 'choice',
'name': 'achoice',
'choices': ['a', 'b']},
{'type': 'string',
'name': 'something'}], iscollection=True)
def test_must_have_name_on_subfields(self):
self.assertRaises(
ValidationError,
self._create_and_save_with_datatype,
[{'type': 'choice',
'choices': ['a', 'b']},
{'type': 'string',
'name': 'something'}],
iscollection=True)
self.assertRaises(
ValidationError,
self._create_and_save_with_datatype,
[{'type': 'choice',
'choices': ['a', 'b'],
'name': ''},
{'type': 'string',
'name': 'something'}],
iscollection=True)
self._create_and_save_with_datatype(
[{'type': 'choice',
'name': 'valid name',
'choices': ['a', 'b']},
{'type': 'string',
'name': 'something'}],
iscollection=True)
def test_subfields_may_not_have_duplicate_names(self):
self.assertRaises(
ValidationError,
self._create_and_save_with_datatype,
[{'type': 'choice',
'name': 'valid name',
'choices': ['a', 'b']},
{'type': 'string',
'name': 'valid name'}],
name='another',
iscollection=True)
self._create_and_save_with_datatype(
[{'type': 'choice',
'name': 'valid name',
'choices': ['a', 'b']},
{'type': 'string',
'name': 'valid name2'}],
iscollection=True)
def test_iscollection_requires_json_array(self):
self.assertRaises(
ValidationError,
self._create_and_save_with_datatype,
[{'type': 'choice',
'name': 'a name',
'choices': ['a', 'b']},
{'type': 'string',
'name': 'something'}],
iscollection=False)
self._create_and_save_with_datatype(
[{'type': 'choice',
'choices': ['a', 'b'],
'name': 'a name'},
{'type': 'string',
'name': 'something'}],
iscollection=True)
def test_not_iscollection_requires_only_a_dict(self):
self.assertRaises(
ValidationError,
self._create_and_save_with_datatype,
{'type': 'choice',
'choices': ['a', 'b']},
iscollection=True)
self._create_and_save_with_datatype(
{'type': 'choice',
'choices': ['a', 'b']},
iscollection=False)
def test_subfield_cannot_be_called_id(self):
self.assertRaises(
ValidationError,
self._create_and_save_with_datatype,
[{'type': 'choice',
'name': 'id',
'choices': ['a', 'b']},
{'type': 'string',
'name': 'something'}],
iscollection=True)
self._create_and_save_with_datatype(
[{'type': 'choice',
'name': 'anything else',
'choices': ['a', 'b']},
{'type': 'string',
'name': 'something'}],
iscollection=True)
def test_default_values(self):
with self.assertRaises(ValidationError):
self._create_and_save_with_datatype(
[{'type': 'choice',
'name': 'a name',
'choices': ['a', 'b'],
'default': 'c'},
{'type': 'string',
'name': 'something'}],
iscollection=True)
self._create_and_save_with_datatype(
[{'type': 'choice',
'name': 'a name',
'choices': ['a', 'b'],
'default': 'a'},
{'type': 'string',
'name': 'something',
'default': 'anything'}],
iscollection=True)
def test_create_multiple_choice_udf(self):
UserDefinedFieldDefinition.objects.create(
instance=self.instance,
model_type='Plot',
datatype=json.dumps({
'type': 'multichoice',
'choices': ['a', 'b']
}),
iscollection=False,
name='a name')
def test_cannot_create_multiple_choice_udf_with_double_quotes(self):
with self.assertRaises(ValidationError):
UserDefinedFieldDefinition.objects.create(
instance=self.instance,
model_type='Plot',
datatype=json.dumps({
'type': 'multichoice',
'choices': ['a', 'b"']
}),
iscollection=False,
name='a name')
def test_invalid_names(self):
with self.assertRaises(ValidationError):
UserDefinedFieldDefinition.objects.create(
instance=self.instance,
model_type='Plot',
datatype=json.dumps({'type': 'string'}),
iscollection=False,
name='%')
with self.assertRaises(ValidationError):
UserDefinedFieldDefinition.objects.create(
instance=self.instance,
model_type='Tree',
datatype=json.dumps({'type': 'string'}),
iscollection=False,
name='.')
with self.assertRaises(ValidationError):
UserDefinedFieldDefinition.objects.create(
instance=self.instance,
model_type='Plot',
datatype=json.dumps({'type': 'string'}),
iscollection=False,
name='__contains')
class ScalarUDFInstanceIsolationTest(OTMTestCase):
def setUp(self):
self.p = Point(-8515941.0, 4953519.0)
self.instances = [
make_instance(point=self.p),
make_instance(point=self.p)
]
self.commander_users = [
make_commander_user(i, username='commander%d' % i.pk)
for i in self.instances]
for i in range(len(self.instances)):
set_write_permissions(self.instances[i], self.commander_users[i],
'Plot', ['udf:Test choice'])
self.choice_udfds = [
UserDefinedFieldDefinition.objects.create(
instance=i,
model_type='Plot',
datatype=json.dumps({'type': 'choice',
'choices': ['a', 'b', 'c']}),
iscollection=False,
name='Test choice') for i in self.instances]
self.plots = [
Plot(geom=self.p, instance=i) for i in self.instances]
for i in range(len(self.plots)):
self.plots[i].save_with_user(self.commander_users[i])
psycopg2.extras.register_hstore(connection.cursor(), globally=True)
def test_update_choice_value_in_one_instance(self):
# Add and assert a choice value in both instances
for i in range(len(self.plots)):
self.plots[i].udfs['Test choice'] = 'a'
self.plots[i].save_with_user(self.commander_users[i])
self.plots[i] = Plot.objects.get(pk=self.plots[i].pk)
audit = self.plots[i].audits().get(field='udf:Test choice')
self.assertEqual(
self.plots[i].udfs['Test choice'], 'a')
self.assertEqual(
audit.current_value, 'a')
# Update a choice name in the first instance only and assert the change
self.choice_udfds[0].update_choice('a', 'm')
self.plots[0] = Plot.objects.get(pk=self.plots[0].pk)
audit0 = self.plots[0].audits().get(field='udf:Test choice')
self.assertEqual(
self.plots[0].udfs['Test choice'], 'm')
self.assertEqual(
audit0.current_value, 'm')
choice0 = UserDefinedFieldDefinition.objects.get(
pk=self.choice_udfds[0].pk)
self.assertEqual(
set(choice0.datatype_dict['choices']),
{'m', 'b', 'c'})
# Assert that the second instance is unchanged
self.plots[1] = Plot.objects.get(pk=self.plots[1].pk)
audit0 = self.plots[1].audits().get(field='udf:Test choice')
self.assertEqual(
self.plots[1].udfs['Test choice'], 'a')
self.assertEqual(
audit0.current_value, 'a')
choice1 = UserDefinedFieldDefinition.objects.get(
pk=self.choice_udfds[1].pk)
self.assertEqual(
set(choice1.datatype_dict['choices']),
{'a', 'b', 'c'})
class ScalarUDFTest(OTMTestCase):
def setUp(self):
self.p = Point(-8515941.0, 4953519.0)
self.instance = make_instance(point=self.p)
def make_and_save_type(dtype):
UserDefinedFieldDefinition.objects.create(
instance=self.instance,
model_type='Plot',
datatype=json.dumps({'type': dtype}),
iscollection=False,
name='Test %s' % dtype)
allowed_types = 'float', 'int', 'string', 'date'
addl_fields = ['udf:Test %s' % ttype for ttype in allowed_types]
addl_fields.append('udf:Test choice')
addl_fields.append('udf:Test multichoice')
self.commander_user = make_commander_user(self.instance)
set_write_permissions(self.instance, self.commander_user,
'Plot', addl_fields)
for dtype in allowed_types:
make_and_save_type(dtype)
self.choice_udfd = UserDefinedFieldDefinition.objects.create(
instance=self.instance,
model_type='Plot',
datatype=json.dumps({'type': 'choice',
'choices': ['a', 'b', 'c']}),
iscollection=False,
name='Test choice')
self.multichoice_udfd = UserDefinedFieldDefinition.objects.create(
instance=self.instance,
model_type='Plot',
datatype=json.dumps({'type': 'multichoice',
'choices': ['a', 'b', 'c']}),
iscollection=False,
name='Test multichoice')
self.plot = Plot(geom=self.p, instance=self.instance)
self.plot.save_with_user(self.commander_user)
psycopg2.extras.register_hstore(connection.cursor(), globally=True)
def _test_datatype(self, field, value):
self.plot.udfs[field] = value
self.plot.save_with_user(self.commander_user)
self.plot = Plot.objects.get(pk=self.plot.pk)
self.assertEqual(
self.plot.udfs[field], value)
def test_int_datatype(self):
self._test_datatype('Test int', 4)
self.assertEqual(getattr(self.plot, 'udf:Test int', None), 4)
def test_int_validation_non_integer(self):
self.assertRaises(ValidationError,
self._test_datatype, 'Test int', 42.3)
self.assertRaises(ValidationError,
self._test_datatype, 'Test int', 'blah')
def test_float_datatype(self):
self._test_datatype('Test float', 4.4)
self.assertEqual(getattr(self.plot, 'udf:Test float', None), 4.4)
def test_float_validation(self):
self.assertRaises(ValidationError,
self._test_datatype, 'Test float', 'blah')
def test_cant_update_choices_on_non_choice_model(self):
floatfield = UserDefinedFieldDefinition\
.objects\
.filter(name='Test float')
self.assertRaises(ValidationError,
floatfield[0].update_choice,
'a', 'b')
def test_update_invalid_choice(self):
self.assertRaises(ValidationError,
self.choice_udfd.update_choice,
'WHAT?????', 'm')
def test_multiple_invalid_choices(self):
self.plot.udfs['Test int'] = 'not an integer'
self.plot.udfs['Test float'] = 'not a float'
with self.assertRaises(ValidationError) as ve:
self.plot.save_with_user(self.commander_user)
self.assertValidationErrorDictContainsKey(
ve.exception, 'udf:Test int')
self.assertValidationErrorDictContainsKey(
ve.exception, 'udf:Test float')
def test_empty_choice_deletes_field(self):
self._test_datatype('Test choice', 'a')
self.assertEqual(getattr(self.plot, 'udf:Test choice', None), 'a')
count = Plot.objects.filter(**{
'udfs__Test choice': 'a'}).count()
self.assertEqual(count, 1)
# should remove the udf
self.plot.udfs['Test choice'] = ''
self.plot.save_with_user(self.commander_user)
self.plot = Plot.objects.get(pk=self.plot.pk)
self.assertIsNone(self.plot.udfs['Test choice'])
self.assertEqual(getattr(self.plot, 'udf:Test choice', None), None)
count = Plot.objects.filter(**{
'udfs__has_key': 'Test choice'}).count()
self.assertEqual(count, 0)
def test_delete_choice_value(self):
self.plot.udfs['Test choice'] = 'a'
self.plot.save_with_user(self.commander_user)
self.plot = Plot.objects.get(pk=self.plot.pk)
audit = self.plot.audits().get(field='udf:Test choice')
self.assertEqual(
self.plot.udfs['Test choice'], 'a')
self.assertEqual(
audit.current_value, 'a')
self.choice_udfd.delete_choice('a')
self.plot = Plot.objects.get(pk=self.plot.pk)
audit = self.plot.audits().filter(field='udf:Test choice')
self.assertEqual(
self.plot.udfs['Test choice'], None)
self.assertEqual(
audit.exists(), False)
choice = UserDefinedFieldDefinition.objects.get(
pk=self.choice_udfd.pk)
self.assertEqual(
set(choice.datatype_dict['choices']),
{'b', 'c'})
def test_delete_multichoice_value(self):
self.plot.udfs['Test multichoice'] = ['a']
self.plot.save_with_user(self.commander_user)
self.plot = Plot.objects.get(pk=self.plot.pk)
audit = self.plot.audits().get(field='udf:Test multichoice')
self.assertEqual(
self.plot.udfs['Test multichoice'], ['a'])
self.assertEqual(
json.loads(audit.current_value), ['a'])
self.multichoice_udfd.delete_choice('a')
self.plot = Plot.objects.get(pk=self.plot.pk)
audit = self.plot.audits().filter(field='udf:Test multichoice')
self.assertEqual(self.plot.udfs['Test multichoice'], None)
self.assertEqual(json.loads(audit[0].current_value), None)
choice = UserDefinedFieldDefinition.objects.get(
pk=self.multichoice_udfd.pk)
self.assertEqual(
set(choice.datatype_dict['choices']),
{'b', 'c'})
def test_update_multichoice_value(self):
# setup plot and requery
self.plot.udfs['Test multichoice'] = ['a']
self.plot.save_with_user(self.commander_user)
self.plot = Plot.objects.get(pk=self.plot.pk)
self.multichoice_udfd.update_choice('a', 'weird \\\\\\1a2chars')
self.plot = Plot.objects.get(pk=self.plot.pk)
audit = self.plot.audits().get(field='udf:Test multichoice')
self.assertEqual(
self.plot.udfs['Test multichoice'], ['weird \\\\\\1a2chars'])
self.assertEqual(json.loads(audit.current_value),
['weird \\\\\\1a2chars'])
choice = UserDefinedFieldDefinition.objects.get(
pk=self.multichoice_udfd.pk)
self.assertEqual(
set(choice.datatype_dict['choices']),
{'weird \\\\\\1a2chars', 'b', 'c'})
self.plot = Plot.objects.get(pk=self.plot.pk)
self.multichoice_udfd.update_choice('b', 'd')
self.assertEqual(
self.plot.udfs['Test multichoice'], ['weird \\\\\\1a2chars'])
choice = UserDefinedFieldDefinition.objects.get(
pk=self.multichoice_udfd.pk)
self.assertEqual(
set(choice.datatype_dict['choices']),
{'weird \\\\\\1a2chars', 'd', 'c'})
def test_update_choice_value(self):
self.plot.udfs['Test choice'] = 'a'
self.plot.save_with_user(self.commander_user)
self.plot = Plot.objects.get(pk=self.plot.pk)
audit = self.plot.audits().get(field='udf:Test choice')
self.assertEqual(
self.plot.udfs['Test choice'], 'a')
self.assertEqual(
audit.current_value, 'a')
self.choice_udfd.update_choice('a', 'm')
self.plot = Plot.objects.get(pk=self.plot.pk)
audit = self.plot.audits().get(field='udf:Test choice')
self.assertEqual(
self.plot.udfs['Test choice'], 'm')
self.assertEqual(
audit.current_value, 'm')
choice = UserDefinedFieldDefinition.objects.get(
pk=self.choice_udfd.pk)
self.assertEqual(
set(choice.datatype_dict['choices']),
{'m', 'b', 'c'})
def test_choice_datatype(self):
self._test_datatype('Test choice', 'a')
def test_choice_validation(self):
self.assertRaises(ValidationError,
self._test_datatype, 'Test choice', 'bad choice')
def test_date_datatype(self):
d = datetime.now().replace(microsecond=0)
self._test_datatype('Test date', d)
def test_string_datatype(self):
self._test_datatype('Test string', 'Sweet Plot')
def test_in_operator(self):
self.assertNotIn('Test string', self.plot.udfs)
self.assertNotIn('RanDoM NAme', self.plot.udfs)
def test_returns_none_for_empty_but_valid_udfs(self):
self.assertEqual(self.plot.udfs['Test string'],
None)
def test_raises_keyerror_for_invalid_udf(self):
self.assertRaises(KeyError,
lambda: self.plot.udfs['RaNdoName'])
class CollectionUDFTest(OTMTestCase):
def setUp(self):
self.p = Point(-8515941.0, 4953519.0)
self.instance = make_instance(point=self.p)
self.udf = make_collection_udf(self.instance, 'Stewardship')
self.commander_user = make_commander_user(self.instance)
set_write_permissions(self.instance, self.commander_user,
'Plot', ['udf:Stewardship'])
self.plot = Plot(geom=self.p, instance=self.instance)
self.plot.save_with_user(self.commander_user)
def test_can_update_choice_option(self):
stews = [{'action': 'water',
'height': 42},
{'action': 'prune',
'height': 12}]
self.plot.udfs['Stewardship'] = stews
self.plot.save_with_user(self.commander_user)
plot = Plot.objects.get(pk=self.plot.pk)
audits = [a.current_value for a in
plot.audits().filter(field='udf:action')]
self.assertEqual(self._get_udf_actions(plot), {'water', 'prune'})
self.assertEqual(audits, ['water', 'prune'])
self.udf.update_choice('water', 'h2o', name='action')
plot = Plot.objects.get(pk=self.plot.pk)
audits = [a.current_value for a in
plot.audits().filter(field='udf:action')]
self.assertEqual(self._get_udf_actions(plot), {'h2o', 'prune'})
self.assertEqual(audits, ['h2o', 'prune'])
def _get_udf_actions(self, plot):
# UDF collection values are not ordered! So compare using sets.
return {value['action'] for value in plot.udfs['Stewardship']}
def test_can_delete_choice_option(self):
stews = [{'action': 'water',
'height': 42},
{'action': 'prune',
'height': 12}]
self.plot.udfs['Stewardship'] = stews
self.plot.save_with_user(self.commander_user)
plot = Plot.objects.get(pk=self.plot.pk)
audits = [a.current_value for a in
plot.audits().filter(field='udf:action')]
self.assertEqual(self._get_udf_actions(plot), {'water', 'prune'})
self.assertEqual(audits, ['water', 'prune'])
self.udf.delete_choice('water', name='action')
plot = Plot.objects.get(pk=self.plot.pk)
audits = [a.current_value for a in
plot.audits().filter(field='udf:action')]
self.assertEqual(self._get_udf_actions(plot), {'prune'})
self.assertEqual(audits, ['prune'])
def test_can_get_and_set(self):
stews = [{'action': 'water',
'height': 42},
{'action': 'prune',
'height': 12}]
self.plot.udfs['Stewardship'] = stews
self.plot.save_with_user(self.commander_user)
reloaded_plot = Plot.objects.get(pk=self.plot.pk)
new_stews = reloaded_plot.udfs['Stewardship']
for expected_stew, actual_stew in zip(stews, new_stews):
self.assertIn('id', actual_stew)
self.assertDictContainsSubset(expected_stew, actual_stew)
def test_can_delete(self):
stews = [{'action': 'water',
'height': 42},
{'action': 'prune',
'height': 12}]
self.plot.udfs['Stewardship'] = stews
self.plot.save_with_user(self.commander_user)
reloaded_plot = Plot.objects.get(pk=self.plot.pk)
all_new_stews = reloaded_plot.udfs['Stewardship']
# Keep only 'prune' (note that UDF collection values are unordered)
new_stews = filter(lambda v: v['action'] == 'prune', all_new_stews)
reloaded_plot.udfs['Stewardship'] = new_stews
reloaded_plot.save_with_user(self.commander_user)
reloaded_plot = Plot.objects.get(pk=self.plot.pk)
newest_stews = reloaded_plot.udfs['Stewardship']
self.assertEqual(len(newest_stews), 1)
self.assertEqual(newest_stews[0]['action'], 'prune')
self.assertEqual(newest_stews[0]['height'], 12)
# Collection fields used the same validation logic as scalar
# udfs the point of this section is prove that the code is hooked
# up, not to exhaustively test datatype validation
def test_cannot_save_with_invalid_field_name(self):
self.plot.udfs['Stewardship'] = [
{'action': 'water',
'height': 32,
'random': 'test'}]
self.assertRaises(
ValidationError,
self.plot.save_with_user,
self.commander_user)
def test_cannot_save_with_invalid_value(self):
self.plot.udfs['Stewardship'] = [
{'action': 'water',
'height': 'too high'}]
self.assertRaises(
ValidationError,
self.plot.save_with_user,
self.commander_user)
class UdfDeleteTest(OTMTestCase):
def setUp(self):
self.instance = make_instance()
self.commander_user = make_commander_user(self.instance)
def test_delete_udf_deletes_perms_collection(self):
set_write_permissions(self.instance, self.commander_user,
'Plot', ['udf:Test choice'])
udf_def = UserDefinedFieldDefinition(
instance=self.instance,
model_type='Plot',
datatype=json.dumps([{'name': 'pick',
'type': 'choice',
'choices': ['a', 'b', 'c']},
{'type': 'int',
'name': 'height'}]),
iscollection=True,
name='Test choice')
udf_def.save()
qs = FieldPermission.objects.filter(
field_name='udf:Test choice',
model_name='Plot')
self.assertTrue(qs.exists())
udf_def.delete()
self.assertFalse(qs.exists())
def test_delete_udf_deletes_perms_value(self):
set_write_permissions(self.instance, self.commander_user,
'Plot', ['udf:Test string'])
udf_def = UserDefinedFieldDefinition(
instance=self.instance,
model_type='Plot',
datatype=json.dumps({'type': 'string'}),
iscollection=False,
name='Test string')
udf_def.save()
qs = FieldPermission.objects.filter(
field_name='udf:Test string',
model_name='Plot')
self.assertTrue(qs.exists())
udf_def.delete()
self.assertFalse(qs.exists())
def test_delete_udf_deletes_mobile_api_field(self):
udf_def = UserDefinedFieldDefinition(
instance=self.instance,
model_type='Plot',
datatype=json.dumps({'type': 'string'}),
iscollection=False,
name='Test string')
udf_def.save()
self.instance.mobile_api_fields = [
{'header': 'fields', 'model': 'plot',
'field_keys': ['plot.udf:Test string']}]
self.instance.save()
udf_def.delete()
updated_instance = Instance.objects.get(pk=self.instance.pk)
self.assertEquals(
0, len(updated_instance.mobile_api_fields[0]['field_keys']))
def test_delete_cudf_deletes_mobile_api_field_group(self):
tree_udf_def = UserDefinedFieldDefinition(
instance=self.instance,
model_type='Plot',
datatype=json.dumps([{'name': 'pick',
'type': 'choice',
'choices': ['a', 'b', 'c']},
{'type': 'int',
'name': 'height'}]),
iscollection=True,
name='Choices')
tree_udf_def.save()
plot_udf_def = UserDefinedFieldDefinition(
instance=self.instance,
model_type='Tree',
datatype=json.dumps([{'name': 'pick',
'type': 'choice',
'choices': ['1', '2', '3']},
{'type': 'int',
'name': 'times'}]),
iscollection=True,
name='Choices')
plot_udf_def.save()
self.instance.mobile_api_fields = [
{'header': 'plot', 'model': 'plot', 'field_keys': ['plot.width']},
{'header': 'Choices', 'sort_key': 'pick',
'collection_udf_keys': ['plot.udf:Choices', 'tree.udf:Choices']}
]
self.instance.save()
tree_udf_def.delete()
updated_instance = Instance.objects.get(pk=self.instance.pk)
self.assertEquals(1, len(
updated_instance.mobile_api_fields[1]['collection_udf_keys']))
plot_udf_def.delete()
updated_instance = Instance.objects.get(pk=self.instance.pk)
self.assertEquals(1, len(updated_instance.mobile_api_fields))
class UdfCRUTestCase(OTMTestCase):
def setUp(self):
User._system_user.save_base()
self.instance = make_instance()
create_stewardship_udfs(self.instance)
self.user = make_commander_user(self.instance)
set_write_permissions(self.instance, self.user,
'Plot', ['udf:Test choice'])
self.udf = UserDefinedFieldDefinition.objects.create(
instance=self.instance,
model_type='Plot',
datatype=json.dumps({'type': 'choice',
'choices': ['a', 'b', 'c']}),
iscollection=False,
name='Test choice')
class UdfCreateTest(UdfCRUTestCase):
def test_create_non_choice_udf(self):
body = {'udf.name': ' cool udf ',
'udf.model': 'Plot',
'udf.type': 'string'}
udf = udf_create(body, self.instance)
self.assertEqual(udf.instance_id, self.instance.pk)
self.assertEqual(udf.model_type, 'Plot')
self.assertEqual(udf.name, 'cool udf')
self.assertEqual(udf.datatype_dict['type'], 'string')
def test_adds_udf_to_role_when_created(self):
body = {'udf.name': 'cool udf',
'udf.model': 'Plot',
'udf.type': 'string'}
udf_create(body, self.instance)
roles_in_instance = Role.objects.filter(instance=self.instance)
self.assertGreater(len(roles_in_instance), 0)
for role in roles_in_instance:
perms = [perm.field_name
for perm in role_field_permissions(role, self.instance)]
self.assertIn('udf:cool udf', perms)
def test_create_choice_udf(self):
body = {'udf.name': 'cool udf',
'udf.model': 'Plot',
'udf.type': 'choice',
'udf.choices': ['a', 'b', 'c']}
udf = udf_create(body, self.instance)
self.assertEqual(udf.instance_id, self.instance.pk)
self.assertEqual(udf.model_type, 'Plot')
self.assertEqual(udf.name, 'cool udf')
self.assertEqual(udf.datatype_dict['type'], 'choice')
self.assertEqual(udf.datatype_dict['choices'], ['a', 'b', 'c'])
def test_invalid_choice_list(self):
body = {'udf.name': 'cool udf',
'udf.model': 'Plot',
'udf.type': 'choice'}
self.assertRaises(ValidationError, udf_create, body, self.instance)
body = {'udf.name': 'cool udf',
'udf.model': 'Plot',
'udf.type': 'choice',
'udf.choices': ['', 'a']}
self.assertRaises(ValidationError, udf_create, body, self.instance)
body = {'udf.name': 'cool udf',
'udf.model': 'Plot',
'udf.type': 'choice',
'udf.choices': ['a', 'a']}
self.assertRaises(ValidationError, udf_create, body, self.instance)
def test_missing_params(self):
body = {'udf.model': 'Plot',
'udf.type': 'string',
'udf.choices': []}
self.assertRaises(ValidationError, udf_create, body, self.instance)
body = {'udf.name': 'cool udf',
'udf.type': 'string',
'udf.choices': []}
self.assertRaises(ValidationError, udf_create, body, self.instance)
body = {'udf.name': 'cool udf',
'udf.model': 'Plot'}
self.assertRaises(ValidationError, udf_create, body, self.instance)
def test_empty_name(self):
body = {'udf.name': '',
'udf.model': 'Plot',
'udf.type': 'string'}
self.assertRaises(ValidationError, udf_create, body, self.instance)
def test_duplicate_name(self):
body = {'udf.name': 'Test choice',
'udf.model': 'Plot',
'udf.type': 'string'}
self.assertRaises(ValidationError, udf_create, body, self.instance)
def test_invalid_model_name(self):
body = {'udf.name': 'Testing choice',
'udf.model': 'Shoe',
'udf.type': 'string'}
self.assertRaises(ValidationError, udf_create, body, self.instance)
|
rivuletaudio/rivulet | refs/heads/master | server/search/torrent_search_provider.py | 1 | from tornado.web import gen
import re
re_prepositions = r'((\ba\b|\bthe\b)\s*)|(\s*\ba\b|\bthe\b)'
re_splitter = re.compile(r'[\W_]+')
def remove_prepositions(text):
return re.sub(re_prepositions, '', text, flags=(re.IGNORECASE & re.UNICODE))
def clean_path(path):
path = path.split('/')[-1].lower()
# kill extension
if '.' in path:
path = '.'.join(path.split('.')[:-1])
# kill track numbers
path = re.sub(r'^[0-9]+\W+', '', path)
return path
# A is the superset, B is the subset
def match_fraction(a, b):
As = set(re_splitter.split(a))
Bs = set(re_splitter.split(b))
return float(len(As.intersection(Bs))) / len(As.union(Bs))
def exact_match(file_list, title):
for f in file_list:
if title == clean_path(f):
return [f]
return []
def all_words_match(file_list, title):
for f in file_list:
filename = clean_path(f)
if match_fraction(filename, title) == 1:
return [f]
return []
def all_words_no_prep_match(file_list, title):
title = remove_prepositions(title)
for f in file_list:
filename = remove_prepositions(clean_path(f))
if match_fraction(filename, title) == 1:
return [f]
return []
def best_effort_match(file_list, title):
# now we have to give up and guess by the highest overlap between words
# TODO: there's probably a more pythonic way to do this
best_file = None
best_score = 0
for f in file_list:
filename = remove_prepositions(clean_path(f))
frac = match_fraction(filename, title)
if frac > best_score:
best_file = f
best_score = frac
if best_score > 0:
return [best_file]
return []
def dedup(seq):
noDupes = []
[noDupes.append(i) for i in seq if not noDupes.count(i)]
return noDupes
#TODO: extension filtering
def file_list_search(file_list, title):
title = title.lower()
return dedup(
exact_match(file_list, title) +
all_words_match(file_list, title) +
all_words_no_prep_match(file_list, title) +
best_effort_match(file_list, title)
)
def parse_file_list_and_find_paths(provider, response, title):
# parse file lists
file_list = provider.parse_file_list(response)
# find the song in the file list
return file_list_search(file_list, title.lower())
class TorrentSearchProvider:
pfx_len = len('magnet:?xt=urn:btih:')
hash_len = 40
def __init__(self):
self.search_cache = {}
self.file_cache = {}
@gen.coroutine
def search(self, query, artist, title):
torrents_with_paths = []
torrents = []
# fist check the cache for the torrent
if self.search_cache.has_key(query):
torrents = self.search_cache[query]
else:
response = yield self.search_torrent(query)
# parse initial search results
torrents = self.parse_search(response, artist)
self.search_cache[query] = torrents
while len(torrents) > 0:
torrent = torrents.pop(0)
paths = []
# first check the cache for the list of paths for this torrent
info_link = torrent['info_link']
if self.file_cache.has_key(info_link):
paths = self.file_cache[info_link]
else:
# get the file listings for the torrent
response = yield self.file_list(info_link)
# find the song in the file list
paths = parse_file_list_and_find_paths(self, response, title)
self.file_cache[info_link] = paths
if len(paths) > 0:
torrents_with_paths.append((torrent, paths))
# we don't need more than one source torrent
break
raise gen.Return(torrents_with_paths)
def search_torrent(self, query):
raise NotImplemented()
def parse_search(self, response, artist):
raise NotImplemented()
def file_list(self, torrent):
raise NotImplemented()
def parse_file_list(self, response):
raise NotImplemented()
|
lovelysystems/pyjamas | refs/heads/ls-production | pyjd/pywebkitgtk.py | 1 | #!/usr/bin/env python
# Copyright (C) 2006, Red Hat, Inc.
# Copyright (C) 2007, One Laptop Per Child
# Copyright (C) 2007 Jan Alonzo <jmalonzo@unpluggable.com>
# Copyright (C) 2008, 2009 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
pyjd.py is the loader for Pyjamas-Desktop applications.
It takes as the first argument either the python module containing a class
named after the module, with an onModuleLoad() function, or an HTML page
containing one or more <meta name="pygwt:module" content="modulename" />
tags.
This is an example Hello.py module (which you would load with
pyjd.py Hello.py):
from pyjamas.ui import RootPanel, Button
class Hello:
def onModuleLoad(self):
RootPanel().add(Button("Hello world"))
This is an example HTML file which will load the above example
(which you would load with pyjd.py Hello.html, and the application
Hello.py will be automatically located, through the <meta /> tag):
<html>
<head> <meta name="pygwt:module" content="Hello" /> </head>
<body />
</html>
pyjd.py will create a basic template HTML, based on the name of your
application if you do not provide one, in order to load the application.
The basic template does not contain any HTML, or any links to CSS
stylesheets, and so your application will have to add everything,
manually, by manipulating the DOM model. The basic template does,
however, include a "History" frame, which is essential for the Pyjamas
History module to function correctly.
You may find using an HTML page, even to just add a CSS stylesheet
(in the usual way - <link rel='stylesheet' href='./Hello.css' /> or
other location, even href="http://foo.org/style.css") to be more
convenient.
pyjd.py also takes a second argument (which the author has found
to be convenient) which can be used to specify an alternative
"root" location for loading of content from any "relative" URLs
in your DOM document. for example, equivalent to images with
<img src="./images/test.png" />. the author has found this to
be convenient when running pyjamas applications
http://code.google.com/p/pyjamas), which store the static content
in a directory called "public". Specifying this directory as the
second argument to pyjd.py allows the same application being
developed with Pyjamas to also be tested under Pyjamas-Desktop.
However, you may find that you need to write a separate short
http page for your Pyjamas-Desktop app, which is an identical
copy of your Pyjamas HTML page in every respect but making
absolutely sure that you remove the javascript "pygwt.js" script.
You will still need to place the page on your Web Server,
and then load it with pyjs.py as follows:
pyjs.py http://127.0.0.1/jsonrpc/output/test.html
This will ensure that pyjs.py - more specifically Webkit - knows
the correct location for all relative URLS (of the form
href="./images", stylesheet links, img src= references etc.)
If you do not remove the "pygwt.js" script from the copy of
the http loader page, pyjs.py, being effectively a web browser
in its own right thanks to Webkit, will successfully run your
Pyjamas-compiled application! Unfortunately, however, the
loader will also be activated, and you will end up running
two conflicting versions of your application - one javascript
based and one python based - simultaneously. It's probably
best to avoid this scenario.
pyjd.py is based on the PyWebkitGTK "demobrowser.py".
"""
import os
import new
import sys
import logging
import time
from gettext import gettext as _
from traceback import print_stack
import gtk
import gobject
import webkit
def module_load(m):
minst = None
exec """\
from %(mod)s import %(mod)s
minst = %(mod)s()
""" % ({'mod': m})
return minst
class WebToolbar(gtk.Toolbar):
def __init__(self, browser):
gtk.Toolbar.__init__(self)
self._browser = browser
# navigational buttons
self._back = gtk.ToolButton(gtk.STOCK_GO_BACK)
self._back.set_tooltip(gtk.Tooltips(),_('Back'))
self._back.props.sensitive = False
self._back.connect('clicked', self._go_back_cb)
self.insert(self._back, -1)
self._forward = gtk.ToolButton(gtk.STOCK_GO_FORWARD)
self._forward.set_tooltip(gtk.Tooltips(),_('Forward'))
self._forward.props.sensitive = False
self._forward.connect('clicked', self._go_forward_cb)
self.insert(self._forward, -1)
self._forward.show()
self._stop_and_reload = gtk.ToolButton(gtk.STOCK_REFRESH)
self._stop_and_reload.set_tooltip(gtk.Tooltips(),_('Stop and reload current page'))
self._stop_and_reload.connect('clicked', self._stop_and_reload_cb)
self.insert(self._stop_and_reload, -1)
self._stop_and_reload.show()
self._loading = False
self.insert(gtk.SeparatorToolItem(), -1)
# zoom buttons
self._zoom_in = gtk.ToolButton(gtk.STOCK_ZOOM_IN)
self._zoom_in.set_tooltip(gtk.Tooltips(), _('Zoom in'))
self._zoom_in.connect('clicked', self._zoom_in_cb)
self.insert(self._zoom_in, -1)
self._zoom_in.show()
self._zoom_out = gtk.ToolButton(gtk.STOCK_ZOOM_OUT)
self._zoom_out.set_tooltip(gtk.Tooltips(), _('Zoom out'))
self._zoom_out.connect('clicked', self._zoom_out_cb)
self.insert(self._zoom_out, -1)
self._zoom_out.show()
self._zoom_hundred = gtk.ToolButton(gtk.STOCK_ZOOM_100)
self._zoom_hundred.set_tooltip(gtk.Tooltips(), _('100% zoom'))
self._zoom_hundred.connect('clicked', self._zoom_hundred_cb)
self.insert(self._zoom_hundred, -1)
self._zoom_hundred.show()
self.insert(gtk.SeparatorToolItem(), -1)
# location entry
self._entry = gtk.Entry()
self._entry.connect('activate', self._entry_activate_cb)
self._current_uri = None
entry_item = gtk.ToolItem()
entry_item.set_expand(True)
entry_item.add(self._entry)
self._entry.show()
self.insert(entry_item, -1)
entry_item.show()
# scale other content besides from text as well
self._browser.set_full_content_zoom(True)
self._browser.connect("title-changed", self._title_changed_cb)
def set_loading(self, loading):
self._loading = loading
if self._loading:
self._show_stop_icon()
self._stop_and_reload.set_tooltip(gtk.Tooltips(),_('Stop'))
else:
self._show_reload_icon()
self._stop_and_reload.set_tooltip(gtk.Tooltips(),_('Reload'))
self._update_navigation_buttons()
def _set_address(self, address):
self._entry.props.text = address
self._current_uri = address
def _update_navigation_buttons(self):
can_go_back = self._browser.can_go_back()
self._back.props.sensitive = can_go_back
can_go_forward = self._browser.can_go_forward()
self._forward.props.sensitive = can_go_forward
def _entry_activate_cb(self, entry):
self._browser.open(entry.props.text)
def _go_back_cb(self, button):
self._browser.go_back()
def _go_forward_cb(self, button):
self._browser.go_forward()
def _title_changed_cb(self, widget, frame, title):
self._set_address(frame.get_uri())
def _stop_and_reload_cb(self, button):
if self._loading:
self._browser.stop_loading()
else:
self._browser.reload()
def _show_stop_icon(self):
self._stop_and_reload.set_stock_id(gtk.STOCK_CANCEL)
def _show_reload_icon(self):
self._stop_and_reload.set_stock_id(gtk.STOCK_REFRESH)
def _zoom_in_cb (self, widget):
"""Zoom into the page"""
self._browser.zoom_in()
def _zoom_out_cb (self, widget):
"""Zoom out of the page"""
self._browser.zoom_out()
def _zoom_hundred_cb (self, widget):
"""Zoom 100%"""
if not (self._browser.get_zoom_level() == 1.0):
self._browser.set_zoom_level(1.0);
class WebStatusBar(gtk.Statusbar):
def __init__(self):
gtk.Statusbar.__init__(self)
self.iconbox = gtk.EventBox()
self.iconbox.add(gtk.image_new_from_stock(gtk.STOCK_INFO, gtk.ICON_SIZE_BUTTON))
self.pack_start(self.iconbox, False, False, 6)
self.iconbox.hide_all()
def display(self, text, context=None):
cid = self.get_context_id("pywebkitgtk")
self.push(cid, str(text))
def show_javascript_info(self):
self.iconbox.show()
def hide_javascript_info(self):
self.iconbox.hide()
def mash_attrib(name, joiner='-'):
res = ''
for c in name:
if c.isupper():
res += joiner + c.lower()
else:
res += c
return res
def _alert(self, msg):
global wv
wv._alert(msg)
def getDomDocument(self):
return self.getWebkitDocument()
def getDomWindow(self):
return self.getWebkitDocument().window
def addWindowEventListener(self, event_name, cb):
#print self, event_name, cb
if cb not in self._callbacks:
self.connect("browser-event", cb)
self._callbacks.append(cb)
return self.addWindowEventListener(event_name, True)
def addXMLHttpRequestEventListener(element, event_name, cb):
if not hasattr(element, "_callbacks"):
element._callbacks = []
if cb not in element._callbacks:
element.connect("browser-event", cb)
element._callbacks.append(cb)
return element.addEventListener(event_name)
def addEventListener(element, event_name, cb):
if not hasattr(element, "_callbacks"):
element._callbacks = []
if cb not in element._callbacks:
element.connect("browser-event", cb)
element._callbacks.append(cb)
return element.addEventListener(event_name, True)
class WebBrowser(gtk.Window):
def __init__(self, application, appdir=None, width=800, height=600):
gtk.Window.__init__(self)
self.set_size_request(width, height)
self.already_initialised = False
logging.debug("initializing web browser window")
self._loading = False
self._browser= webkit.WebView()
#self._browser.connect('load-started', self._loading_start_cb)
#self._browser.connect('load-progress-changed', self._loading_progress_cb)
self._browser.connect('load-finished', self._loading_stop_cb)
self._browser.connect("title-changed", self._title_changed_cb)
self._browser.connect("hovering-over-link", self._hover_link_cb)
self._browser.connect("status-bar-text-changed", self._statusbar_text_changed_cb)
self._browser.connect("icon-loaded", self._icon_loaded_cb)
self._browser.connect("selection-changed", self._selection_changed_cb)
self._browser.connect("set-scroll-adjustments", self._set_scroll_adjustments_cb)
self._browser.connect("populate-popup", self._populate_popup)
# self._browser.connect("navigation-requested", self._navigation_requested_cb)
self._browser.connect("console-message",
self._javascript_console_message_cb)
self._browser.connect("script-alert",
self._javascript_script_alert_cb)
self._browser.connect("script-confirm",
self._javascript_script_confirm_cb)
self._browser.connect("script-prompt",
self._javascript_script_prompt_cb)
self._scrolled_window = gtk.ScrolledWindow()
self._scrolled_window.props.hscrollbar_policy = gtk.POLICY_AUTOMATIC
self._scrolled_window.props.vscrollbar_policy = gtk.POLICY_AUTOMATIC
self._scrolled_window.add(self._browser)
self._scrolled_window.show_all()
self._toolbar = WebToolbar(self._browser)
self._statusbar = WebStatusBar()
vbox = gtk.VBox(spacing=4)
vbox.pack_start(self._toolbar, expand=False, fill=False)
vbox.pack_start(self._scrolled_window)
vbox.pack_end(self._statusbar, expand=False, fill=False)
self.add(vbox)
self.set_default_size(600, 480)
self.connect('destroy', gtk.main_quit)
self.application = application
self.appdir = appdir
return
if os.path.isfile(application):
(pth, app) = os.path.split(application)
if appdir:
pth = os.path.abspath(appdir)
sys.path.append(pth)
m = None
# first, pretend it's a module. if success, create fake template
# otherwise, treat it as a URL
if application[-3:] == ".py":
try:
m = module_load(app[:-3])
except ImportError, e:
print_stack()
print e
m = None
if m is None:
application = os.path.abspath(application)
print application
self._browser.open(application)
else:
# it's a python app.
if application[-3:] != ".py":
print "Application %s must be a python file (.py)"
sys.exit(-1)
# ok, we create a template with the app name in it:
# pygwt_processMetas will pick up the app name
# and do the load, there. at least this way we
# have a basic HTML page to start off with,
# including a possible stylesheet.
fqp = os.path.abspath(application[:-3])
template = """
<html>
<head>
<meta name="pygwt:module" content="%(app)s" />
<link rel="stylesheet" href="%(app)s.css" />
<title>%(app)s</title>
</head>
<body bgcolor="white" color="white">
<iframe id='__pygwt_historyFrame' style='width:0px;height:0px;border:0px;margin:0px;padding:0px;display:none;'></iframe>
</body>
</html>
""" % {'app': app[:-3]}
print template
self._browser.load_string(template, "text/html", "iso-8859-15", fqp)
else:
# URL.
sys.path.append(os.path.abspath(os.getcwd()))
self._browser.open(application)
def load_app(self):
uri = self.application
if uri.find("://") == -1:
# assume file
uri = 'file://'+os.path.abspath(uri)
self._browser.open(uri)
def getUri(self):
return self.application
def init_app(self):
# TODO: ideally, this should be done by hooking body with an "onLoad".
from __pyjamas__ import pygwt_processMetas, set_main_frame
from __pyjamas__ import set_gtk_module
set_gtk_module(gtk)
main_frame = self._browser.getMainFrame()
main_frame._callbacks = []
main_frame.gobject_wrap = webkit.gobject_wrap
main_frame.platform = 'webkit'
main_frame.addEventListener = addEventListener
main_frame.getUri = self.getUri
main_frame.getDomWindow = new.instancemethod(getDomWindow, main_frame)
main_frame.getDomDocument = new.instancemethod(getDomDocument, main_frame)
main_frame._addXMLHttpRequestEventListener = addXMLHttpRequestEventListener
main_frame._addWindowEventListener = new.instancemethod(addWindowEventListener, main_frame)
main_frame._alert = new.instancemethod(_alert, main_frame)
main_frame.mash_attrib = mash_attrib
set_main_frame(main_frame)
#for m in pygwt_processMetas():
# minst = module_load(m)
# minst.onModuleLoad()
def _set_title(self, title):
self.props.title = title
def _loading_start_cb(self, view, frame):
main_frame = self._browser.get_main_frame()
if frame is main_frame:
self._set_title(_("Loading %s - %s") % (frame.get_title(), frame.get_uri()))
self._toolbar.set_loading(True)
def _loading_stop_cb(self, view, frame):
# FIXME: another frame may still be loading?
self._toolbar.set_loading(False)
if self.already_initialised:
return
self.already_initialised = True
self.init_app()
def _loading_progress_cb(self, view, progress):
self._set_progress(_("%s%s loaded") % (progress, '%'))
def _set_progress(self, progress):
self._statusbar.display(progress)
def _title_changed_cb(self, widget, frame, title):
self._set_title(_("%s") % title)
def _hover_link_cb(self, view, title, url):
if view and url:
self._statusbar.display(url)
else:
self._statusbar.display('')
def _statusbar_text_changed_cb(self, view, text):
#if text:
self._statusbar.display(text)
def _icon_loaded_cb(self):
print "icon loaded"
def _selection_changed_cb(self):
print "selection changed"
def _set_scroll_adjustments_cb(self, view, hadjustment, vadjustment):
self._scrolled_window.props.hadjustment = hadjustment
self._scrolled_window.props.vadjustment = vadjustment
def _navigation_requested_cb(self, view, frame, networkRequest):
return 1
def _javascript_console_message_cb(self, view, message, line, sourceid):
self._statusbar.show_javascript_info()
def _javascript_script_alert_cb(self, view, frame, message):
print "alert", message
def close(w):
dialog.destroy()
dialog = gtk.Dialog("Alert", None, gtk.DIALOG_DESTROY_WITH_PARENT)
#dialog.Modal = True;
label = gtk.Label(message)
dialog.vbox.add(label)
label.show()
button = gtk.Button("OK")
dialog.action_area.pack_start (button, True, True, 0)
button.connect("clicked", close)
button.show()
#dialog.Response += new ResponseHandler (on_dialog_response)
dialog.run ()
def _alert(self, msg):
self._javascript_script_alert_cb(None, None, msg)
def _javascript_script_confirm_cb(self, view, frame, message, isConfirmed):
pass
def _browser_event_cb(self, view, event, message, fromwindow):
#print "event! wha-hey!", event, view, message
#print event.get_event_type()
#event.stop_propagation()
return True
def _javascript_script_prompt_cb(self, view, frame, message, default, text):
pass
def _populate_popup(self, view, menu):
aboutitem = gtk.MenuItem(label="About PyWebKit")
menu.append(aboutitem)
aboutitem.connect('activate', self._about_pywebkitgtk_cb)
menu.show_all()
def _about_pywebkitgtk_cb(self, widget):
self._browser.open("http://live.gnome.org/PyWebKitGtk")
def setup(application, appdir=None, width=800, height=600):
gobject.threads_init()
global wv
wv = WebBrowser(application, appdir, width, height)
wv.load_app()
wv.show_all()
while 1:
if is_loaded():
return
run(one_event=True)
def is_loaded():
return wv.already_initialised
def run(one_event=False):
if one_event:
gtk.main_iteration()
else:
gtk.main()
|
BaskerShu/typeidea | refs/heads/master | typeidea/blog/adminforms.py | 1 | # -*- coding: utf-8 -*-
from dal import autocomplete
from django import forms
from ckeditor_uploader.widgets import CKEditorUploadingWidget
from .models import Category, Tag
from typeidea.widget import CustomAdminMarkdownxWidget
class PostAdminForm(forms.ModelForm):
desc = forms.CharField(widget=forms.Textarea, label='摘要', required=False)
content = forms.CharField(widget=CustomAdminMarkdownxWidget())
category = forms.ModelChoiceField(
queryset=Category.objects.all(),
widget=autocomplete.ModelSelect2(url='category-autocomplete'),
label='分类',
)
tag = forms.ModelMultipleChoiceField(
queryset=Tag.objects.all(),
widget=autocomplete.ModelSelect2Multiple(url='tag-autocomplete'),
label='标签',
)
|
mhbu50/frappe | refs/heads/develop | frappe/email/doctype/email_queue_recipient/email_queue_recipient.py | 22 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class EmailQueueRecipient(Document):
pass
|
kingvuplus/ts-gui-3 | refs/heads/master | lib/python/Plugins/Extensions/DVDBurn/Process.py | 64 | from Components.Task import Task, Job, DiskspacePrecondition, Condition, ToolExistsPrecondition
from Components.Harddisk import harddiskmanager
from Screens.MessageBox import MessageBox
import os
class png2yuvTask(Task):
def __init__(self, job, inputfile, outputfile):
Task.__init__(self, job, "Creating menu video")
self.setTool("png2yuv")
self.args += ["-n1", "-Ip", "-f25", "-j", inputfile]
self.dumpFile = outputfile
self.weighting = 15
def run(self, callback):
Task.run(self, callback)
self.container.stdoutAvail.remove(self.processStdout)
self.container.dumpToFile(self.dumpFile)
def processStderr(self, data):
print "[png2yuvTask]", data[:-1]
class mpeg2encTask(Task):
def __init__(self, job, inputfile, outputfile):
Task.__init__(self, job, "Encoding menu video")
self.setTool("mpeg2enc")
self.args += ["-f8", "-np", "-a2", "-o", outputfile]
self.inputFile = inputfile
self.weighting = 25
def run(self, callback):
Task.run(self, callback)
self.container.readFromFile(self.inputFile)
def processOutputLine(self, line):
print "[mpeg2encTask]", line[:-1]
class spumuxTask(Task):
def __init__(self, job, xmlfile, inputfile, outputfile):
Task.__init__(self, job, "Muxing buttons into menu")
self.setTool("spumux")
self.args += [xmlfile]
self.inputFile = inputfile
self.dumpFile = outputfile
self.weighting = 15
def run(self, callback):
Task.run(self, callback)
self.container.stdoutAvail.remove(self.processStdout)
self.container.dumpToFile(self.dumpFile)
self.container.readFromFile(self.inputFile)
def processStderr(self, data):
print "[spumuxTask]", data[:-1]
class MakeFifoNode(Task):
def __init__(self, job, number):
Task.__init__(self, job, "Make FIFO nodes")
self.setTool("mknod")
nodename = self.job.workspace + "/dvd_title_%d" % number + ".mpg"
self.args += [nodename, "p"]
self.weighting = 10
class LinkTS(Task):
def __init__(self, job, sourcefile, link_name):
Task.__init__(self, job, "Creating symlink for source titles")
self.setTool("ln")
self.args += ["-s", sourcefile, link_name]
self.weighting = 10
class CopyMeta(Task):
def __init__(self, job, sourcefile):
Task.__init__(self, job, "Copy title meta files")
self.setTool("cp")
from os import listdir
path, filename = sourcefile.rstrip("/").rsplit("/",1)
tsfiles = listdir(path)
for file in tsfiles:
if file.startswith(filename+"."):
self.args += [path+'/'+file]
self.args += [self.job.workspace]
self.weighting = 15
class DemuxTask(Task):
def __init__(self, job, inputfile):
Task.__init__(self, job, "Demux video into ES")
title = job.project.titles[job.i]
self.global_preconditions.append(DiskspacePrecondition(title.estimatedDiskspace))
self.setTool("projectx")
self.args += [inputfile, "-demux", "-set", "ExportPanel.Streamtype.Subpicture=0", "-set", "ExportPanel.Streamtype.Teletext=0", "-out", self.job.workspace ]
self.end = 300
self.prog_state = 0
self.weighting = 1000
self.cutfile = self.job.workspace + "/cut_%d.Xcl" % (job.i+1)
self.cutlist = title.cutlist
self.currentPID = None
self.relevantAudioPIDs = [ ]
self.getRelevantAudioPIDs(title)
self.generated_files = [ ]
self.mplex_audiofiles = { }
self.mplex_videofile = ""
self.mplex_streamfiles = [ ]
if len(self.cutlist) > 1:
self.args += [ "-cut", self.cutfile ]
def prepare(self):
self.writeCutfile()
def getRelevantAudioPIDs(self, title):
for audiotrack in title.properties.audiotracks:
if audiotrack.active.getValue():
self.relevantAudioPIDs.append(audiotrack.pid.getValue())
def processOutputLine(self, line):
line = line[:-1]
#print "[DemuxTask]", line
MSG_NEW_FILE = "---> new File: "
MSG_PROGRESS = "[PROGRESS] "
MSG_NEW_MP2 = "++> Mpg Audio: PID 0x"
MSG_NEW_AC3 = "++> AC3/DTS Audio: PID 0x"
if line.startswith(MSG_NEW_FILE):
file = line[len(MSG_NEW_FILE):]
if file[0] == "'":
file = file[1:-1]
self.haveNewFile(file)
elif line.startswith(MSG_PROGRESS):
progress = line[len(MSG_PROGRESS):]
self.haveProgress(progress)
elif line.startswith(MSG_NEW_MP2) or line.startswith(MSG_NEW_AC3):
try:
self.currentPID = str(int(line.split(': PID 0x',1)[1].split(' ',1)[0],16))
except ValueError:
print "[DemuxTask] ERROR: couldn't detect Audio PID (projectx too old?)"
def haveNewFile(self, file):
print "[DemuxTask] produced file:", file, self.currentPID
self.generated_files.append(file)
if self.currentPID in self.relevantAudioPIDs:
self.mplex_audiofiles[self.currentPID] = file
elif file.endswith("m2v"):
self.mplex_videofile = file
def haveProgress(self, progress):
#print "PROGRESS [%s]" % progress
MSG_CHECK = "check & synchronize audio file"
MSG_DONE = "done..."
if progress == "preparing collection(s)...":
self.prog_state = 0
elif progress[:len(MSG_CHECK)] == MSG_CHECK:
self.prog_state += 1
else:
try:
p = int(progress)
p = p - 1 + self.prog_state * 100
if p > self.progress:
self.progress = p
except ValueError:
pass
def writeCutfile(self):
f = open(self.cutfile, "w")
f.write("CollectionPanel.CutMode=4\n")
for p in self.cutlist:
s = p / 90000
m = s / 60
h = m / 60
m %= 60
s %= 60
f.write("%02d:%02d:%02d\n" % (h, m, s))
f.close()
def cleanup(self, failed):
print "[DemuxTask::cleanup]"
self.mplex_streamfiles = [ self.mplex_videofile ]
for pid in self.relevantAudioPIDs:
if pid in self.mplex_audiofiles:
self.mplex_streamfiles.append(self.mplex_audiofiles[pid])
print self.mplex_streamfiles
if failed:
import os
for file in self.generated_files:
try:
os.remove(file)
except OSError:
pass
class MplexTaskPostcondition(Condition):
def check(self, task):
if task.error == task.ERROR_UNDERRUN:
return True
return task.error is None
def getErrorMessage(self, task):
return {
task.ERROR_UNDERRUN: ("Can't multiplex source video!"),
task.ERROR_UNKNOWN: ("An unknown error occurred!")
}[task.error]
class MplexTask(Task):
ERROR_UNDERRUN, ERROR_UNKNOWN = range(2)
def __init__(self, job, outputfile, inputfiles=None, demux_task=None, weighting = 500):
Task.__init__(self, job, "Mux ES into PS")
self.weighting = weighting
self.demux_task = demux_task
self.postconditions.append(MplexTaskPostcondition())
self.setTool("mplex")
self.args += ["-f8", "-o", outputfile, "-v1"]
if inputfiles:
self.args += inputfiles
def setTool(self, tool):
self.cmd = tool
self.args = [tool]
self.global_preconditions.append(ToolExistsPrecondition())
# we don't want the ReturncodePostcondition in this case because for right now we're just gonna ignore the fact that mplex fails with a buffer underrun error on some streams (this always at the very end)
def prepare(self):
self.error = None
if self.demux_task:
self.args += self.demux_task.mplex_streamfiles
def processOutputLine(self, line):
print "[MplexTask] ", line[:-1]
if line.startswith("**ERROR:"):
if line.find("Frame data under-runs detected") != -1:
self.error = self.ERROR_UNDERRUN
else:
self.error = self.ERROR_UNKNOWN
class RemoveESFiles(Task):
def __init__(self, job, demux_task):
Task.__init__(self, job, "Remove temp. files")
self.demux_task = demux_task
self.setTool("rm")
self.weighting = 10
def prepare(self):
self.args += ["-f"]
self.args += self.demux_task.generated_files
self.args += [self.demux_task.cutfile]
class ReplexTask(Task):
def __init__(self, job, outputfile, inputfile):
Task.__init__(self, job, "ReMux TS into PS")
self.weighting = 1000
self.setTool("replex")
self.args += ["-t", "DVD", "-j", "-o", outputfile, inputfile]
def processOutputLine(self, line):
print "[ReplexTask] ", line[:-1]
class DVDAuthorTask(Task):
def __init__(self, job):
Task.__init__(self, job, "Authoring DVD")
self.weighting = 20
self.setTool("dvdauthor")
self.CWD = self.job.workspace
self.args += ["-x", self.job.workspace+"/dvdauthor.xml"]
self.menupreview = job.menupreview
def processOutputLine(self, line):
print "[DVDAuthorTask] ", line[:-1]
if not self.menupreview and line.startswith("STAT: Processing"):
self.callback(self, [], stay_resident=True)
elif line.startswith("STAT: VOBU"):
try:
progress = int(line.split("MB")[0].split(" ")[-1])
if progress:
self.job.mplextask.progress = progress
print "[DVDAuthorTask] update mplextask progress:", self.job.mplextask.progress, "of", self.job.mplextask.end
except:
print "couldn't set mux progress"
class DVDAuthorFinalTask(Task):
def __init__(self, job):
Task.__init__(self, job, "dvdauthor finalize")
self.setTool("dvdauthor")
self.args += ["-T", "-o", self.job.workspace + "/dvd"]
class WaitForResidentTasks(Task):
def __init__(self, job):
Task.__init__(self, job, "waiting for dvdauthor to finalize")
def run(self, callback):
print "waiting for %d resident task(s) %s to finish..." % (len(self.job.resident_tasks),str(self.job.resident_tasks))
self.callback = callback
if self.job.resident_tasks == 0:
callback(self, [])
class BurnTaskPostcondition(Condition):
RECOVERABLE = True
def check(self, task):
if task.returncode == 0:
return True
elif task.error is None or task.error is task.ERROR_MINUSRWBUG:
return True
return False
def getErrorMessage(self, task):
return {
task.ERROR_NOTWRITEABLE: _("Medium is not a writeable DVD!"),
task.ERROR_LOAD: _("Could not load medium! No disc inserted?"),
task.ERROR_SIZE: _("Content does not fit on DVD!"),
task.ERROR_WRITE_FAILED: _("Write failed!"),
task.ERROR_DVDROM: _("No (supported) DVDROM found!"),
task.ERROR_ISOFS: _("Medium is not empty!"),
task.ERROR_FILETOOLARGE: _("TS file is too large for ISO9660 level 1!"),
task.ERROR_ISOTOOLARGE: _("ISO file is too large for this filesystem!"),
task.ERROR_UNKNOWN: _("An unknown error occurred!")
}[task.error]
class BurnTask(Task):
ERROR_NOTWRITEABLE, ERROR_LOAD, ERROR_SIZE, ERROR_WRITE_FAILED, ERROR_DVDROM, ERROR_ISOFS, ERROR_FILETOOLARGE, ERROR_ISOTOOLARGE, ERROR_MINUSRWBUG, ERROR_UNKNOWN = range(10)
def __init__(self, job, extra_args=[], tool="growisofs"):
Task.__init__(self, job, job.name)
self.weighting = 500
self.end = 120 # 100 for writing, 10 for buffer flush, 10 for closing disc
self.postconditions.append(BurnTaskPostcondition())
self.setTool(tool)
self.args += extra_args
def prepare(self):
self.error = None
def processOutputLine(self, line):
line = line[:-1]
print "[GROWISOFS] %s" % line
if line[8:14] == "done, ":
self.progress = float(line[:6])
print "progress:", self.progress
elif line.find("flushing cache") != -1:
self.progress = 100
elif line.find("closing disc") != -1:
self.progress = 110
elif line.startswith(":-["):
if line.find("ASC=30h") != -1:
self.error = self.ERROR_NOTWRITEABLE
elif line.find("ASC=24h") != -1:
self.error = self.ERROR_LOAD
elif line.find("SK=5h/ASC=A8h/ACQ=04h") != -1:
self.error = self.ERROR_MINUSRWBUG
else:
self.error = self.ERROR_UNKNOWN
print "BurnTask: unknown error %s" % line
elif line.startswith(":-("):
if line.find("No space left on device") != -1:
self.error = self.ERROR_SIZE
elif self.error == self.ERROR_MINUSRWBUG:
print "*sigh* this is a known bug. we're simply gonna assume everything is fine."
self.postconditions = []
elif line.find("write failed") != -1:
self.error = self.ERROR_WRITE_FAILED
elif line.find("unable to open64(") != -1 and line.find(",O_RDONLY): No such file or directory") != -1:
self.error = self.ERROR_DVDROM
elif line.find("media is not recognized as recordable DVD") != -1:
self.error = self.ERROR_NOTWRITEABLE
else:
self.error = self.ERROR_UNKNOWN
print "BurnTask: unknown error %s" % line
elif line.startswith("FATAL:"):
if line.find("already carries isofs!"):
self.error = self.ERROR_ISOFS
else:
self.error = self.ERROR_UNKNOWN
print "BurnTask: unknown error %s" % line
elif line.find("-allow-limited-size was not specified. There is no way do represent this file size. Aborting.") != -1:
self.error = self.ERROR_FILETOOLARGE
elif line.startswith("genisoimage: File too large."):
self.error = self.ERROR_ISOTOOLARGE
def setTool(self, tool):
self.cmd = tool
self.args = [tool]
self.global_preconditions.append(ToolExistsPrecondition())
class RemoveDVDFolder(Task):
def __init__(self, job):
Task.__init__(self, job, "Remove temp. files")
self.setTool("rm")
self.args += ["-rf", self.job.workspace]
self.weighting = 10
class CheckDiskspaceTask(Task):
def __init__(self, job):
Task.__init__(self, job, "Checking free space")
totalsize = 0 # require an extra safety 50 MB
maxsize = 0
for title in job.project.titles:
titlesize = title.estimatedDiskspace
if titlesize > maxsize: maxsize = titlesize
totalsize += titlesize
diskSpaceNeeded = totalsize + maxsize
job.estimateddvdsize = totalsize / 1024 / 1024
totalsize += 50*1024*1024 # require an extra safety 50 MB
self.global_preconditions.append(DiskspacePrecondition(diskSpaceNeeded))
self.weighting = 5
def abort(self):
self.finish(aborted = True)
def run(self, callback):
self.callback = callback
failed_preconditions = self.checkPreconditions(True) + self.checkPreconditions(False)
if len(failed_preconditions):
callback(self, failed_preconditions)
return
Task.processFinished(self, 0)
class PreviewTask(Task):
def __init__(self, job, path):
Task.__init__(self, job, "Preview")
self.postconditions.append(PreviewTaskPostcondition())
self.job = job
self.path = path
self.weighting = 10
def run(self, callback):
self.callback = callback
if self.job.menupreview:
self.previewProject()
else:
import Screens.Standby
if Screens.Standby.inStandby:
self.previewCB(False)
else:
from Tools import Notifications
Notifications.AddNotificationWithCallback(self.previewCB, MessageBox, _("Do you want to preview this DVD before burning?"), timeout = 60, default = False)
def abort(self):
self.finish(aborted = True)
def previewCB(self, answer):
if answer == True:
self.previewProject()
else:
self.closedCB(True)
def playerClosed(self):
if self.job.menupreview:
self.closedCB(True)
else:
from Tools import Notifications
Notifications.AddNotificationWithCallback(self.closedCB, MessageBox, _("Do you want to burn this collection to DVD medium?") )
def closedCB(self, answer):
if answer == True:
Task.processFinished(self, 0)
else:
Task.processFinished(self, 1)
def previewProject(self):
from Screens.DVD import DVDPlayer
self.job.project.session.openWithCallback(self.playerClosed, DVDPlayer, dvd_filelist= [ self.path ])
class PreviewTaskPostcondition(Condition):
def check(self, task):
return task.returncode == 0
def getErrorMessage(self, task):
return "Cancel"
class ImagingPostcondition(Condition):
def check(self, task):
return task.returncode == 0
def getErrorMessage(self, task):
return _("Failed") + ": python-imaging"
class ImagePrepareTask(Task):
def __init__(self, job):
Task.__init__(self, job, _("please wait, loading picture..."))
self.postconditions.append(ImagingPostcondition())
self.weighting = 20
self.job = job
self.Menus = job.Menus
def run(self, callback):
self.callback = callback
# we are doing it this weird way so that the TaskView Screen actually pops up before the spinner comes
from enigma import eTimer
self.delayTimer = eTimer()
self.delayTimer.callback.append(self.conduct)
self.delayTimer.start(10,1)
def conduct(self):
try:
from ImageFont import truetype
from Image import open as Image_open
s = self.job.project.menutemplate.settings
(width, height) = s.dimensions.getValue()
self.Menus.im_bg_orig = Image_open(s.menubg.getValue())
if self.Menus.im_bg_orig.size != (width, height):
self.Menus.im_bg_orig = self.Menus.im_bg_orig.resize((width, height))
self.Menus.fontsizes = [s.fontsize_headline.getValue(), s.fontsize_title.getValue(), s.fontsize_subtitle.getValue()]
self.Menus.fonts = [(truetype(s.fontface_headline.getValue(), self.Menus.fontsizes[0])), (truetype(s.fontface_title.getValue(), self.Menus.fontsizes[1])),(truetype(s.fontface_subtitle.getValue(), self.Menus.fontsizes[2]))]
Task.processFinished(self, 0)
except:
Task.processFinished(self, 1)
class MenuImageTask(Task):
def __init__(self, job, menu_count, spuxmlfilename, menubgpngfilename, highlightpngfilename):
Task.__init__(self, job, "Create Menu %d Image" % menu_count)
self.postconditions.append(ImagingPostcondition())
self.weighting = 10
self.job = job
self.Menus = job.Menus
self.menu_count = menu_count
self.spuxmlfilename = spuxmlfilename
self.menubgpngfilename = menubgpngfilename
self.highlightpngfilename = highlightpngfilename
def run(self, callback):
self.callback = callback
#try:
import ImageDraw, Image, os
s = self.job.project.menutemplate.settings
s_top = s.margin_top.getValue()
s_bottom = s.margin_bottom.getValue()
s_left = s.margin_left.getValue()
s_right = s.margin_right.getValue()
s_rows = s.space_rows.getValue()
s_cols = s.space_cols.getValue()
nr_cols = s.cols.getValue()
nr_rows = s.rows.getValue()
thumb_size = s.thumb_size.getValue()
if thumb_size[0]:
from Image import open as Image_open
(s_width, s_height) = s.dimensions.getValue()
fonts = self.Menus.fonts
im_bg = self.Menus.im_bg_orig.copy()
im_high = Image.new("P", (s_width, s_height), 0)
im_high.putpalette(self.Menus.spu_palette)
draw_bg = ImageDraw.Draw(im_bg)
draw_high = ImageDraw.Draw(im_high)
if self.menu_count == 1:
headlineText = self.job.project.settings.name.getValue().decode("utf-8")
headlinePos = self.getPosition(s.offset_headline.getValue(), 0, 0, s_width, s_top, draw_bg.textsize(headlineText, font=fonts[0]))
draw_bg.text(headlinePos, headlineText, fill=self.Menus.color_headline, font=fonts[0])
spuxml = """<?xml version="1.0" encoding="utf-8"?>
<subpictures>
<stream>
<spu
highlight="%s"
transparent="%02x%02x%02x"
start="00:00:00.00"
force="yes" >""" % (self.highlightpngfilename, self.Menus.spu_palette[0], self.Menus.spu_palette[1], self.Menus.spu_palette[2])
#rowheight = (self.Menus.fontsizes[1]+self.Menus.fontsizes[2]+thumb_size[1]+s_rows)
menu_start_title = (self.menu_count-1)*self.job.titles_per_menu + 1
menu_end_title = (self.menu_count)*self.job.titles_per_menu + 1
nr_titles = len(self.job.project.titles)
if menu_end_title > nr_titles:
menu_end_title = nr_titles+1
col = 1
row = 1
for title_no in range( menu_start_title , menu_end_title ):
title = self.job.project.titles[title_no-1]
col_width = ( s_width - s_left - s_right ) / nr_cols
row_height = ( s_height - s_top - s_bottom ) / nr_rows
left = s_left + ( (col-1) * col_width ) + s_cols/2
right = left + col_width - s_cols
top = s_top + ( (row-1) * row_height) + s_rows/2
bottom = top + row_height - s_rows
width = right - left
height = bottom - top
if bottom > s_height:
bottom = s_height
#draw_bg.rectangle((left, top, right, bottom), outline=(255,0,0))
im_cell_bg = Image.new("RGBA", (width, height),(0,0,0,0))
draw_cell_bg = ImageDraw.Draw(im_cell_bg)
im_cell_high = Image.new("P", (width, height), 0)
im_cell_high.putpalette(self.Menus.spu_palette)
draw_cell_high = ImageDraw.Draw(im_cell_high)
if thumb_size[0]:
thumbPos = self.getPosition(s.offset_thumb.getValue(), 0, 0, width, height, thumb_size)
box = (thumbPos[0], thumbPos[1], thumbPos[0]+thumb_size[0], thumbPos[1]+thumb_size[1])
try:
thumbIm = Image_open(title.inputfile.rsplit('.',1)[0] + ".png")
im_cell_bg.paste(thumbIm,thumbPos)
except:
draw_cell_bg.rectangle(box, fill=(64,127,127,127))
border = s.thumb_border.getValue()
if border:
draw_cell_high.rectangle(box, fill=1)
draw_cell_high.rectangle((box[0]+border, box[1]+border, box[2]-border, box[3]-border), fill=0)
titleText = title.formatDVDmenuText(s.titleformat.getValue(), title_no).decode("utf-8")
titlePos = self.getPosition(s.offset_title.getValue(), 0, 0, width, height, draw_bg.textsize(titleText, font=fonts[1]))
draw_cell_bg.text(titlePos, titleText, fill=self.Menus.color_button, font=fonts[1])
draw_cell_high.text(titlePos, titleText, fill=1, font=self.Menus.fonts[1])
subtitleText = title.formatDVDmenuText(s.subtitleformat.getValue(), title_no).decode("utf-8")
subtitlePos = self.getPosition(s.offset_subtitle.getValue(), 0, 0, width, height, draw_cell_bg.textsize(subtitleText, font=fonts[2]))
draw_cell_bg.text(subtitlePos, subtitleText, fill=self.Menus.color_button, font=fonts[2])
del draw_cell_bg
del draw_cell_high
im_bg.paste(im_cell_bg,(left, top, right, bottom), mask=im_cell_bg)
im_high.paste(im_cell_high,(left, top, right, bottom))
spuxml += """
<button name="button%s" x0="%d" x1="%d" y0="%d" y1="%d"/>""" % (str(title_no).zfill(2),left,right,top,bottom )
if col < nr_cols:
col += 1
else:
col = 1
row += 1
top = s_height - s_bottom - s_rows/2
if self.menu_count < self.job.nr_menus:
next_page_text = s.next_page_text.getValue().decode("utf-8")
textsize = draw_bg.textsize(next_page_text, font=fonts[1])
pos = ( s_width-textsize[0]-s_right, top )
draw_bg.text(pos, next_page_text, fill=self.Menus.color_button, font=fonts[1])
draw_high.text(pos, next_page_text, fill=1, font=fonts[1])
spuxml += """
<button name="button_next" x0="%d" x1="%d" y0="%d" y1="%d"/>""" % (pos[0],pos[0]+textsize[0],pos[1],pos[1]+textsize[1])
if self.menu_count > 1:
prev_page_text = s.prev_page_text.getValue().decode("utf-8")
textsize = draw_bg.textsize(prev_page_text, font=fonts[1])
pos = ( (s_left+s_cols/2), top )
draw_bg.text(pos, prev_page_text, fill=self.Menus.color_button, font=fonts[1])
draw_high.text(pos, prev_page_text, fill=1, font=fonts[1])
spuxml += """
<button name="button_prev" x0="%d" x1="%d" y0="%d" y1="%d"/>""" % (pos[0],pos[0]+textsize[0],pos[1],pos[1]+textsize[1])
del draw_bg
del draw_high
fd=open(self.menubgpngfilename,"w")
im_bg.save(fd,"PNG")
fd.close()
fd=open(self.highlightpngfilename,"w")
im_high.save(fd,"PNG")
fd.close()
spuxml += """
</spu>
</stream>
</subpictures>"""
f = open(self.spuxmlfilename, "w")
f.write(spuxml)
f.close()
Task.processFinished(self, 0)
#except:
#Task.processFinished(self, 1)
def getPosition(self, offset, left, top, right, bottom, size):
pos = [left, top]
if offset[0] != -1:
pos[0] += offset[0]
else:
pos[0] += ( (right-left) - size[0] ) / 2
if offset[1] != -1:
pos[1] += offset[1]
else:
pos[1] += ( (bottom-top) - size[1] ) / 2
return tuple(pos)
class Menus:
def __init__(self, job):
self.job = job
job.Menus = self
s = self.job.project.menutemplate.settings
self.color_headline = tuple(s.color_headline.getValue())
self.color_button = tuple(s.color_button.getValue())
self.color_highlight = tuple(s.color_highlight.getValue())
self.spu_palette = [ 0x60, 0x60, 0x60 ] + s.color_highlight.getValue()
ImagePrepareTask(job)
nr_titles = len(job.project.titles)
job.titles_per_menu = s.cols.getValue()*s.rows.getValue()
job.nr_menus = ((nr_titles+job.titles_per_menu-1)/job.titles_per_menu)
#a new menu_count every 4 titles (1,2,3,4->1 ; 5,6,7,8->2 etc.)
for menu_count in range(1 , job.nr_menus+1):
num = str(menu_count)
spuxmlfilename = job.workspace+"/spumux"+num+".xml"
menubgpngfilename = job.workspace+"/dvd_menubg"+num+".png"
highlightpngfilename = job.workspace+"/dvd_highlight"+num+".png"
MenuImageTask(job, menu_count, spuxmlfilename, menubgpngfilename, highlightpngfilename)
png2yuvTask(job, menubgpngfilename, job.workspace+"/dvdmenubg"+num+".yuv")
menubgm2vfilename = job.workspace+"/dvdmenubg"+num+".mv2"
mpeg2encTask(job, job.workspace+"/dvdmenubg"+num+".yuv", menubgm2vfilename)
menubgmpgfilename = job.workspace+"/dvdmenubg"+num+".mpg"
menuaudiofilename = s.menuaudio.getValue()
MplexTask(job, outputfile=menubgmpgfilename, inputfiles = [menubgm2vfilename, menuaudiofilename], weighting = 20)
menuoutputfilename = job.workspace+"/dvdmenu"+num+".mpg"
spumuxTask(job, spuxmlfilename, menubgmpgfilename, menuoutputfilename)
def CreateAuthoringXML_singleset(job):
nr_titles = len(job.project.titles)
mode = job.project.settings.authormode.getValue()
authorxml = []
authorxml.append('<?xml version="1.0" encoding="utf-8"?>\n')
authorxml.append(' <dvdauthor dest="' + (job.workspace+"/dvd") + '">\n')
authorxml.append(' <vmgm>\n')
authorxml.append(' <menus lang="' + job.project.menutemplate.settings.menulang.getValue() + '">\n')
authorxml.append(' <pgc>\n')
authorxml.append(' <vob file="' + job.project.settings.vmgm.getValue() + '" />\n', )
if mode.startswith("menu"):
authorxml.append(' <post> jump titleset 1 menu; </post>\n')
else:
authorxml.append(' <post> jump title 1; </post>\n')
authorxml.append(' </pgc>\n')
authorxml.append(' </menus>\n')
authorxml.append(' </vmgm>\n')
authorxml.append(' <titleset>\n')
if mode.startswith("menu"):
authorxml.append(' <menus lang="' + job.project.menutemplate.settings.menulang.getValue() + '">\n')
authorxml.append(' <video aspect="4:3"/>\n')
for menu_count in range(1 , job.nr_menus+1):
if menu_count == 1:
authorxml.append(' <pgc entry="root">\n')
else:
authorxml.append(' <pgc>\n')
menu_start_title = (menu_count-1)*job.titles_per_menu + 1
menu_end_title = (menu_count)*job.titles_per_menu + 1
if menu_end_title > nr_titles:
menu_end_title = nr_titles+1
for i in range( menu_start_title , menu_end_title ):
authorxml.append(' <button name="button' + (str(i).zfill(2)) + '"> jump title ' + str(i) +'; </button>\n')
if menu_count > 1:
authorxml.append(' <button name="button_prev"> jump menu ' + str(menu_count-1) + '; </button>\n')
if menu_count < job.nr_menus:
authorxml.append(' <button name="button_next"> jump menu ' + str(menu_count+1) + '; </button>\n')
menuoutputfilename = job.workspace+"/dvdmenu"+str(menu_count)+".mpg"
authorxml.append(' <vob file="' + menuoutputfilename + '" pause="inf"/>\n')
authorxml.append(' </pgc>\n')
authorxml.append(' </menus>\n')
authorxml.append(' <titles>\n')
for i in range( nr_titles ):
chapters = ','.join(job.project.titles[i].getChapterMarks())
title_no = i+1
title_filename = job.workspace + "/dvd_title_%d.mpg" % (title_no)
if job.menupreview:
LinkTS(job, job.project.settings.vmgm.getValue(), title_filename)
else:
MakeFifoNode(job, title_no)
if mode.endswith("linked") and title_no < nr_titles:
post_tag = "jump title %d;" % ( title_no+1 )
elif mode.startswith("menu"):
post_tag = "call vmgm menu 1;"
else: post_tag = ""
authorxml.append(' <pgc>\n')
authorxml.append(' <vob file="' + title_filename + '" chapters="' + chapters + '" />\n')
authorxml.append(' <post> ' + post_tag + ' </post>\n')
authorxml.append(' </pgc>\n')
authorxml.append(' </titles>\n')
authorxml.append(' </titleset>\n')
authorxml.append(' </dvdauthor>\n')
f = open(job.workspace+"/dvdauthor.xml", "w")
for x in authorxml:
f.write(x)
f.close()
def CreateAuthoringXML_multiset(job):
nr_titles = len(job.project.titles)
mode = job.project.settings.authormode.getValue()
authorxml = []
authorxml.append('<?xml version="1.0" encoding="utf-8"?>\n')
authorxml.append(' <dvdauthor dest="' + (job.workspace+"/dvd") + '" jumppad="yes">\n')
authorxml.append(' <vmgm>\n')
authorxml.append(' <menus lang="' + job.project.menutemplate.settings.menulang.getValue() + '">\n')
authorxml.append(' <video aspect="4:3"/>\n')
if mode.startswith("menu"):
for menu_count in range(1 , job.nr_menus+1):
if menu_count == 1:
authorxml.append(' <pgc>\n')
else:
authorxml.append(' <pgc>\n')
menu_start_title = (menu_count-1)*job.titles_per_menu + 1
menu_end_title = (menu_count)*job.titles_per_menu + 1
if menu_end_title > nr_titles:
menu_end_title = nr_titles+1
for i in range( menu_start_title , menu_end_title ):
authorxml.append(' <button name="button' + (str(i).zfill(2)) + '"> jump titleset ' + str(i) +' title 1; </button>\n')
if menu_count > 1:
authorxml.append(' <button name="button_prev"> jump menu ' + str(menu_count-1) + '; </button>\n')
if menu_count < job.nr_menus:
authorxml.append(' <button name="button_next"> jump menu ' + str(menu_count+1) + '; </button>\n')
menuoutputfilename = job.workspace+"/dvdmenu"+str(menu_count)+".mpg"
authorxml.append(' <vob file="' + menuoutputfilename + '" pause="inf"/>\n')
authorxml.append(' </pgc>\n')
else:
authorxml.append(' <pgc>\n')
authorxml.append(' <vob file="' + job.project.settings.vmgm.getValue() + '" />\n' )
authorxml.append(' <post> jump titleset 1 title 1; </post>\n')
authorxml.append(' </pgc>\n')
authorxml.append(' </menus>\n')
authorxml.append(' </vmgm>\n')
for i in range( nr_titles ):
title = job.project.titles[i]
authorxml.append(' <titleset>\n')
authorxml.append(' <menus lang="' + job.project.menutemplate.settings.menulang.getValue() + '">\n')
authorxml.append(' <pgc entry="root">\n')
authorxml.append(' <pre>\n')
authorxml.append(' jump vmgm menu entry title;\n')
authorxml.append(' </pre>\n')
authorxml.append(' </pgc>\n')
authorxml.append(' </menus>\n')
authorxml.append(' <titles>\n')
for audiotrack in title.properties.audiotracks:
active = audiotrack.active.getValue()
if active:
format = audiotrack.format.getValue()
language = audiotrack.language.getValue()
audio_tag = ' <audio format="%s"' % format
if language != "nolang":
audio_tag += ' lang="%s"' % language
audio_tag += ' />\n'
authorxml.append(audio_tag)
aspect = title.properties.aspect.getValue()
video_tag = ' <video aspect="'+aspect+'"'
if title.properties.widescreen.getValue() == "4:3":
video_tag += ' widescreen="'+title.properties.widescreen.getValue()+'"'
video_tag += ' />\n'
authorxml.append(video_tag)
chapters = ','.join(title.getChapterMarks())
title_no = i+1
title_filename = job.workspace + "/dvd_title_%d.mpg" % (title_no)
if job.menupreview:
LinkTS(job, job.project.settings.vmgm.getValue(), title_filename)
else:
MakeFifoNode(job, title_no)
if mode.endswith("linked") and title_no < nr_titles:
post_tag = "jump titleset %d title 1;" % ( title_no+1 )
elif mode.startswith("menu"):
post_tag = "call vmgm menu 1;"
else: post_tag = ""
authorxml.append(' <pgc>\n')
authorxml.append(' <vob file="' + title_filename + '" chapters="' + chapters + '" />\n')
authorxml.append(' <post> ' + post_tag + ' </post>\n')
authorxml.append(' </pgc>\n')
authorxml.append(' </titles>\n')
authorxml.append(' </titleset>\n')
authorxml.append(' </dvdauthor>\n')
f = open(job.workspace+"/dvdauthor.xml", "w")
for x in authorxml:
f.write(x)
f.close()
def getISOfilename(isopath, volName):
from Tools.Directories import fileExists
i = 0
filename = isopath+'/'+volName+".iso"
while fileExists(filename):
i = i+1
filename = isopath+'/'+volName + str(i).zfill(3) + ".iso"
return filename
class DVDJob(Job):
def __init__(self, project, menupreview=False):
Job.__init__(self, "DVDBurn Job")
self.project = project
from time import strftime
from Tools.Directories import SCOPE_HDD, resolveFilename, createDir
new_workspace = resolveFilename(SCOPE_HDD) + "tmp/" + strftime("%Y%m%d%H%M%S")
createDir(new_workspace, True)
self.workspace = new_workspace
self.project.workspace = self.workspace
self.menupreview = menupreview
self.conduct()
def conduct(self):
CheckDiskspaceTask(self)
if self.project.settings.authormode.getValue().startswith("menu") or self.menupreview:
Menus(self)
if self.project.settings.titlesetmode.getValue() == "multi":
CreateAuthoringXML_multiset(self)
else:
CreateAuthoringXML_singleset(self)
DVDAuthorTask(self)
nr_titles = len(self.project.titles)
if self.menupreview:
PreviewTask(self, self.workspace + "/dvd/VIDEO_TS/")
else:
hasProjectX = os.path.exists('/usr/bin/projectx')
print "[DVDJob] hasProjectX=", hasProjectX
for self.i in range(nr_titles):
self.title = self.project.titles[self.i]
link_name = self.workspace + "/source_title_%d.ts" % (self.i+1)
title_filename = self.workspace + "/dvd_title_%d.mpg" % (self.i+1)
LinkTS(self, self.title.inputfile, link_name)
if not hasProjectX:
ReplexTask(self, outputfile=title_filename, inputfile=link_name).end = self.estimateddvdsize
else:
demux = DemuxTask(self, link_name)
self.mplextask = MplexTask(self, outputfile=title_filename, demux_task=demux)
self.mplextask.end = self.estimateddvdsize
RemoveESFiles(self, demux)
WaitForResidentTasks(self)
PreviewTask(self, self.workspace + "/dvd/VIDEO_TS/")
output = self.project.settings.output.getValue()
volName = self.project.settings.name.getValue()
if output == "dvd":
self.name = _("Burn DVD")
tool = "growisofs"
burnargs = [ "-Z", "/dev/" + harddiskmanager.getCD(), "-dvd-compat" ]
if self.project.size/(1024*1024) > self.project.MAX_SL:
burnargs += [ "-use-the-force-luke=4gms", "-speed=1", "-R" ]
elif output == "iso":
self.name = _("Create DVD-ISO")
tool = "genisoimage"
isopathfile = getISOfilename(self.project.settings.isopath.getValue(), volName)
burnargs = [ "-o", isopathfile ]
burnargs += [ "-dvd-video", "-publisher", "Dreambox", "-V", volName, self.workspace + "/dvd" ]
BurnTask(self, burnargs, tool)
RemoveDVDFolder(self)
class DVDdataJob(Job):
def __init__(self, project):
Job.__init__(self, "Data DVD Burn")
self.project = project
from time import strftime
from Tools.Directories import SCOPE_HDD, resolveFilename, createDir
new_workspace = resolveFilename(SCOPE_HDD) + "tmp/" + strftime("%Y%m%d%H%M%S") + "/dvd/"
createDir(new_workspace, True)
self.workspace = new_workspace
self.project.workspace = self.workspace
self.conduct()
def conduct(self):
if self.project.settings.output.getValue() == "iso":
CheckDiskspaceTask(self)
nr_titles = len(self.project.titles)
for self.i in range(nr_titles):
title = self.project.titles[self.i]
filename = title.inputfile.rstrip("/").rsplit("/",1)[1]
link_name = self.workspace + filename
LinkTS(self, title.inputfile, link_name)
CopyMeta(self, title.inputfile)
output = self.project.settings.output.getValue()
volName = self.project.settings.name.getValue()
tool = "growisofs"
if output == "dvd":
self.name = _("Burn DVD")
burnargs = [ "-Z", "/dev/" + harddiskmanager.getCD(), "-dvd-compat" ]
if self.project.size/(1024*1024) > self.project.MAX_SL:
burnargs += [ "-use-the-force-luke=4gms", "-speed=1", "-R" ]
elif output == "iso":
tool = "genisoimage"
self.name = _("Create DVD-ISO")
isopathfile = getISOfilename(self.project.settings.isopath.getValue(), volName)
burnargs = [ "-o", isopathfile ]
if self.project.settings.dataformat.getValue() == "iso9660_1":
burnargs += ["-iso-level", "1" ]
elif self.project.settings.dataformat.getValue() == "iso9660_4":
burnargs += ["-iso-level", "4", "-allow-limited-size" ]
elif self.project.settings.dataformat.getValue() == "udf":
burnargs += ["-udf", "-allow-limited-size" ]
burnargs += [ "-publisher", "Dreambox", "-V", volName, "-follow-links", self.workspace ]
BurnTask(self, burnargs, tool)
RemoveDVDFolder(self)
class DVDisoJob(Job):
def __init__(self, project, imagepath):
Job.__init__(self, _("Burn DVD"))
self.project = project
self.menupreview = False
from Tools.Directories import getSize
if imagepath.endswith(".iso"):
PreviewTask(self, imagepath)
burnargs = [ "-Z", "/dev/" + harddiskmanager.getCD() + '='+imagepath, "-dvd-compat" ]
if getSize(imagepath)/(1024*1024) > self.project.MAX_SL:
burnargs += [ "-use-the-force-luke=4gms", "-speed=1", "-R" ]
else:
PreviewTask(self, imagepath + "/VIDEO_TS/")
volName = self.project.settings.name.getValue()
burnargs = [ "-Z", "/dev/" + harddiskmanager.getCD(), "-dvd-compat" ]
if getSize(imagepath)/(1024*1024) > self.project.MAX_SL:
burnargs += [ "-use-the-force-luke=4gms", "-speed=1", "-R" ]
burnargs += [ "-dvd-video", "-publisher", "Dreambox", "-V", volName, imagepath ]
tool = "growisofs"
BurnTask(self, burnargs, tool)
|
deathping1994/sendmail-api | refs/heads/master | venv/lib/python2.7/site-packages/setuptools/command/__init__.py | 475 | __all__ = [
'alias', 'bdist_egg', 'bdist_rpm', 'build_ext', 'build_py', 'develop',
'easy_install', 'egg_info', 'install', 'install_lib', 'rotate', 'saveopts',
'sdist', 'setopt', 'test', 'install_egg_info', 'install_scripts',
'register', 'bdist_wininst', 'upload_docs',
]
from distutils.command.bdist import bdist
import sys
from setuptools.command import install_scripts
if 'egg' not in bdist.format_commands:
bdist.format_command['egg'] = ('bdist_egg', "Python .egg file")
bdist.format_commands.append('egg')
del bdist, sys
|
timothyclarke/ansible-modules-extras | refs/heads/devel | system/alternatives.py | 29 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to manage symbolic link alternatives.
(c) 2014, Gabe Mulley <gabe.mulley@gmail.com>
(c) 2015, David Wittman <dwittman@gmail.com>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
DOCUMENTATION = '''
---
module: alternatives
short_description: Manages alternative programs for common commands
description:
- Manages symbolic links using the 'update-alternatives' tool
- Useful when multiple programs are installed but provide similar functionality (e.g. different editors).
version_added: "1.6"
author:
- "David Wittman (@DavidWittman)"
- "Gabe Mulley (@mulby)"
options:
name:
description:
- The generic name of the link.
required: true
path:
description:
- The path to the real executable that the link should point to.
required: true
link:
description:
- The path to the symbolic link that should point to the real executable.
- This option is required on RHEL-based distributions
required: false
priority:
description:
- The priority of the alternative
required: false
default: 50
version_added: "2.2"
requirements: [ update-alternatives ]
'''
EXAMPLES = '''
- name: correct java version selected
alternatives: name=java path=/usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
- name: alternatives link created
alternatives: name=hadoop-conf link=/etc/hadoop/conf path=/etc/hadoop/conf.ansible
- name: make java 32 bit an alternative with low priority
alternatives: name=java path=/usr/lib/jvm/java-7-openjdk-i386/jre/bin/java priority=-10
'''
import re
from ansible.module_utils.basic import *
from ansible.module_utils.pycompat24 import get_exception
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
path = dict(required=True, type='path'),
link = dict(required=False, type='path'),
priority = dict(required=False, type='int',
default=50),
),
supports_check_mode=True,
)
params = module.params
name = params['name']
path = params['path']
link = params['link']
priority = params['priority']
UPDATE_ALTERNATIVES = module.get_bin_path('update-alternatives',True)
current_path = None
all_alternatives = []
# Run `update-alternatives --display <name>` to find existing alternatives
(rc, display_output, _) = module.run_command(
['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--display', name]
)
if rc == 0:
# Alternatives already exist for this link group
# Parse the output to determine the current path of the symlink and
# available alternatives
current_path_regex = re.compile(r'^\s*link currently points to (.*)$',
re.MULTILINE)
alternative_regex = re.compile(r'^(\/.*)\s-\spriority', re.MULTILINE)
current_path = current_path_regex.search(display_output).group(1)
all_alternatives = alternative_regex.findall(display_output)
if not link:
# Read the current symlink target from `update-alternatives --query`
# in case we need to install the new alternative before setting it.
#
# This is only compatible on Debian-based systems, as the other
# alternatives don't have --query available
rc, query_output, _ = module.run_command(
['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--query', name]
)
if rc == 0:
for line in query_output.splitlines():
if line.startswith('Link:'):
link = line.split()[1]
break
if current_path != path:
if module.check_mode:
module.exit_json(changed=True, current_path=current_path)
try:
# install the requested path if necessary
if path not in all_alternatives:
if not link:
module.fail_json(msg="Needed to install the alternative, but unable to do so as we are missing the link")
module.run_command(
[UPDATE_ALTERNATIVES, '--install', link, name, path, str(priority)],
check_rc=True
)
# select the requested path
module.run_command(
[UPDATE_ALTERNATIVES, '--set', name, path],
check_rc=True
)
module.exit_json(changed=True)
except subprocess.CalledProcessError:
e = get_exception()
module.fail_json(msg=str(dir(cpe)))
else:
module.exit_json(changed=False)
main()
|
lecaoquochung/ddnb.django | refs/heads/master | tests/model_package/models/publication.py | 586 | from django.db import models
class Publication(models.Model):
title = models.CharField(max_length=30)
|
lokirius/python-for-android | refs/heads/master | python3-alpha/extra_modules/pyxmpp2/xmppserializer.py | 46 | #
# (C) Copyright 2011 Jacek Konieczny <jajcus@jajcus.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
"""XMPP serializer for ElementTree data.
XMPP has specific requirements for XML serialization. Predefined
namespace prefixes must be used, including no prefix for the stanza
namespace (which may be one of, at least, two different namespaces:
'jabber:client' and 'jabber:server')"""
import threading
import re
from xml.sax.saxutils import escape, quoteattr
from .constants import STANZA_NAMESPACES, STREAM_NS, XML_NS
__docformat__ = "restructuredtext en"
STANDARD_PREFIXES = {
STREAM_NS: 'stream',
XML_NS: 'xml',
}
EVIL_CHARACTERS_RE = re.compile(r"[\000-\010\013\014\016-\037]", re.UNICODE)
def remove_evil_characters(data):
"""Remove control characters (not allowed in XML) from a string."""
return EVIL_CHARACTERS_RE.sub("\ufffd", data)
class XMPPSerializer(object):
"""Implementation of the XMPP serializer.
Single instance of this class should be used for a single stream and never
reused. It will keep track of prefixes declared on the root element and
used later.
:Ivariables:
- `stanza_namespace`: the default namespace of the stream
- `_prefixes`: mapping (prefix -> uri) of known namespace prefixes
- `_root_prefixes`: prefixes declared on the root element
- `_head_emitted`: `True` if the stream start tag has been emitted
- `_next_id`: the next sequence number to be used in auto-generated
prefixes.
:Types:
- `stanza_namespace`: `str`
- `_prefixes`: `dict`
- `_root_prefixes`: `dict`
- `_head_emitted`: `bool`
- `_next_id`: `int`
"""
def __init__(self, stanza_namespace, extra_prefixes = None):
"""
:Parameters:
- `stanza_namespace`: the default namespace used for XMPP stanzas.
E.g. 'jabber:client' for c2s connections.
- `extra_prefixes`: mapping of namespaces to prefixes (not the
other way) to be used on the stream. These prefixes will be
declared on the root element and used in all descendants. That
may be used to optimize the stream for size.
:Types:
- `stanza_namespace`: `str`
- `extra_prefixes`: `str` to `str` mapping.
"""
self.stanza_namespace = stanza_namespace
self._prefixes = {}
if extra_prefixes:
self._prefixes.update(extra_prefixes)
self._root_prefixes = None
self._head_emitted = False
self._next_id = 1
def add_prefix(self, namespace, prefix):
"""Add a new namespace prefix.
If the root element has not yet been emitted the prefix will
be declared there, otherwise the prefix will be declared on the
top-most element using this namespace in every stanza.
:Parameters:
- `namespace`: the namespace URI
- `prefix`: the prefix string
:Types:
- `namespace`: `str`
- `prefix`: `str`
"""
if prefix == "xml" and namespace != XML_NS:
raise ValueError("Cannot change 'xml' prefix meaning")
self._prefixes[namespace] = prefix
def emit_head(self, stream_from, stream_to, stream_id = None,
version = '1.0', language = None):
"""Return the opening tag of the stream root element.
:Parameters:
- `stream_from`: the 'from' attribute of the stream. May be `None`.
- `stream_to`: the 'to' attribute of the stream. May be `None`.
- `version`: the 'version' of the stream.
- `language`: the 'xml:lang' of the stream
:Types:
- `stream_from`: `str`
- `stream_to`: `str`
- `version`: `str`
- `language`: `str`
"""
# pylint: disable-msg=R0913
self._root_prefixes = dict(STANDARD_PREFIXES)
self._root_prefixes[self.stanza_namespace] = None
for namespace, prefix in list(self._root_prefixes.items()):
if not prefix or prefix == "stream":
continue
if namespace in STANDARD_PREFIXES or namespace in STANZA_NAMESPACES:
continue
self._root_prefixes[namespace] = prefix
tag = "<{0}:stream version={1}".format(STANDARD_PREFIXES[STREAM_NS],
quoteattr(version))
if stream_from:
tag += " from={0}".format(quoteattr(stream_from))
if stream_to:
tag += " to={0}".format(quoteattr(stream_to))
if stream_id is not None:
tag += " id={0}".format(quoteattr(stream_id))
if language is not None:
tag += " xml:lang={0}".format(quoteattr(language))
for namespace, prefix in list(self._root_prefixes.items()):
if prefix == "xml":
continue
if prefix:
tag += ' xmlns:{0}={1}'.format(prefix, quoteattr(namespace))
else:
tag += ' xmlns={1}'.format(prefix, quoteattr(namespace))
tag += ">"
self._head_emitted = True
return tag
def emit_tail(self):
"""Return the end tag of the stream root element."""
return "</{0}:stream>".format(self._root_prefixes[STREAM_NS])
def _split_qname(self, name, is_element):
"""Split an element of attribute qname into namespace and local
name.
:Parameters:
- `name`: element or attribute QName
- `is_element`: `True` for an element, `False` for an attribute
:Types:
- `name`: `str`
- `is_element`: `bool`
:Return: namespace URI, local name
:returntype: `str`, `str`"""
if name.startswith("{"):
namespace, name = name[1:].split("}", 1)
if namespace in STANZA_NAMESPACES:
namespace = self.stanza_namespace
elif is_element:
raise ValueError("Element with no namespace: {0!r}".format(name))
else:
namespace = None
return namespace, name
def _make_prefix(self, declared_prefixes):
"""Make up a new namespace prefix, which won't conflict
with `_prefixes` and prefixes declared in the current scope.
:Parameters:
- `declared_prefixes`: namespace to prefix mapping for the current
scope
:Types:
- `declared_prefixes`: `str` to `str` dictionary
:Returns: a new prefix
:Returntype: `str`
"""
used_prefixes = set(self._prefixes.values())
used_prefixes |= set(declared_prefixes.values())
while True:
prefix = "ns{0}".format(self._next_id)
self._next_id += 1
if prefix not in used_prefixes:
break
return prefix
def _make_prefixed(self, name, is_element, declared_prefixes, declarations):
"""Return namespace-prefixed tag or attribute name.
Add appropriate declaration to `declarations` when neccessary.
If no prefix for an element namespace is defined, make the elements
namespace default (no prefix). For attributes, make up a prefix in such
case.
:Parameters:
- `name`: QName ('{namespace-uri}local-name')
to convert
- `is_element`: `True` for element, `False` for an attribute
- `declared_prefixes`: mapping of prefixes already declared
at this scope
- `declarations`: XMLNS declarations on the current element.
:Types:
- `name`: `str`
- `is_element`: `bool`
- `declared_prefixes`: `str` to `str` dictionary
- `declarations`: `str` to `str` dictionary
:Returntype: `str`"""
namespace, name = self._split_qname(name, is_element)
if namespace is None:
prefix = None
elif namespace in declared_prefixes:
prefix = declared_prefixes[namespace]
elif namespace in self._prefixes:
prefix = self._prefixes[namespace]
declarations[namespace] = prefix
declared_prefixes[namespace] = prefix
else:
if is_element:
prefix = None
else:
prefix = self._make_prefix(declared_prefixes)
declarations[namespace] = prefix
declared_prefixes[namespace] = prefix
if prefix:
return prefix + ":" + name
else:
return name
@staticmethod
def _make_ns_declarations(declarations, declared_prefixes):
"""Build namespace declarations and remove obsoleted mappings
from `declared_prefixes`.
:Parameters:
- `declarations`: namespace to prefix mapping of the new
declarations
- `declared_prefixes`: namespace to prefix mapping of already
declared prefixes.
:Types:
- `declarations`: `str` to `str` dictionary
- `declared_prefixes`: `str` to `str` dictionary
:Return: string of namespace declarations to be used in a start tag
:Returntype: `str`
"""
result = []
for namespace, prefix in list(declarations.items()):
if prefix:
result.append(' xmlns:{0}={1}'.format(prefix, quoteattr(
namespace)))
else:
result.append(' xmlns={1}'.format(prefix, quoteattr(
namespace)))
for d_namespace, d_prefix in list(declared_prefixes.items()):
if (not prefix and not d_prefix) or d_prefix == prefix:
if namespace != d_namespace:
del declared_prefixes[d_namespace]
return " ".join(result)
def _emit_element(self, element, level, declared_prefixes):
""""Recursive XML element serializer.
:Parameters:
- `element`: the element to serialize
- `level`: nest level (0 - root element, 1 - stanzas, etc.)
- `declared_prefixes`: namespace to prefix mapping of already
declared prefixes.
:Types:
- `element`: :etree:`ElementTree.Element`
- `level`: `int`
- `declared_prefixes`: `str` to `str` dictionary
:Return: serialized element
:Returntype: `str`
"""
declarations = {}
declared_prefixes = dict(declared_prefixes)
name = element.tag
prefixed = self._make_prefixed(name, True, declared_prefixes,
declarations)
start_tag = "<{0}".format(prefixed)
end_tag = "</{0}>".format(prefixed)
for name, value in list(element.items()):
prefixed = self._make_prefixed(name, False, declared_prefixes,
declarations)
start_tag += ' {0}={1}'.format(prefixed, quoteattr(value))
declarations = self._make_ns_declarations(declarations,
declared_prefixes)
if declarations:
start_tag += " " + declarations
children = []
for child in element:
children.append(self._emit_element(child, level +1,
declared_prefixes))
if not children and not element.text:
start_tag += "/>"
end_tag = ""
text = ""
else:
start_tag += ">"
if level > 0 and element.text:
text = escape(element.text)
else:
text = ""
if level > 1 and element.tail:
tail = escape(element.tail)
else:
tail = ""
return start_tag + text + ''.join(children) + end_tag + tail
def emit_stanza(self, element):
""""Serialize a stanza.
Must be called after `emit_head`.
:Parameters:
- `element`: the element to serialize
:Types:
- `element`: :etree:`ElementTree.Element`
:Return: serialized element
:Returntype: `str`
"""
if not self._head_emitted:
raise RuntimeError(".emit_head() must be called first.")
string = self._emit_element(element, level = 1,
declared_prefixes = self._root_prefixes)
return remove_evil_characters(string)
# thread local data to store XMPPSerializer instance used by the `serialize`
# function
_THREAD = threading.local()
_THREAD.serializer = None
def serialize(element):
"""Serialize an XMPP element.
Utility function for debugging or logging.
:Parameters:
- `element`: the element to serialize
:Types:
- `element`: :etree:`ElementTree.Element`
:Return: serialized element
:Returntype: `str`
"""
if _THREAD.serializer is None:
_THREAD.serializer = XMPPSerializer("jabber:client")
_THREAD.serializer.emit_head(None, None)
return _THREAD.serializer.emit_stanza(element)
# vi: sts=4 et sw=4
|
xapharius/mrEnsemble | refs/heads/master | Engine/src/datahandler/image/image_training_conf.py | 2 | '''
Created on Jan 15, 2014
@author: Simon
'''
from protocol.n_image_input_protocol import NImageInputProtocol
from datahandler.abstract_job_conf import AbstractJobConf
class ImageTrainingConf(AbstractJobConf):
'''
classdocs
'''
INPUT_PROTOCOL = NImageInputProtocol
INTERNAL_PROTOCOL = NImageInputProtocol
OUTPUT_PROTOCOL = NImageInputProtocol
HADOOP_INPUT_FORMAT = 'hadoopml.libfileinput.NWholeFileInputFormat'
def __init__(self, files_per_map):
self.files_per_map = files_per_map
def get_input_protocol(self):
return self.INPUT_PROTOCOL
def get_internal_protocol(self):
return self.INTERNAL_PROTOCOL
def get_output_protocol(self):
return self.OUTPUT_PROTOCOL
def get_hadoop_input_format(self):
return self.HADOOP_INPUT_FORMAT
def get_job_conf(self):
return { 'hadoopml.fileinput.filespermap': self.files_per_map }
|
TateWalker/AV-Query | refs/heads/master | velocities.py | 2 | import requests
import itertools
import threading
import time
import sys
def animate():
for c in itertools.cycle(['|', '/', '-', '\\']):
if done:
break
sys.stdout.write('\rloading ' + c)
sys.stdout.flush()
time.sleep(0.1)
print(chr(27) + "[2J")
sys.stdout.write('\rDone!')
def getVelocities(name,link):
inputs = {'objname': name,
'extend': 'no',
'hconst': '73',
'omegam': '0.27',
'omegav': '0.73',
'corr_z': '1',
'out_csys': 'Equatorial',
'out_equinox': 'J2000.0',
'obj_sort': "RA or Longitude",
'of': 'pre_text',
'zv_breaker': '30000.0',
'list_limit': '5',
'img_stamp': 'YES'}
page = requests.get(link, params = inputs)
from bs4 import BeautifulSoup
soup = BeautifulSoup(page.content, 'html.parser')
#-------Get Velocities-----#
# velocities = soup.find_all('pre')[5]
# Helio = list(velocities.children)[2]
# VGS = list(velocities.children)[16]
# Helio = Helio.lstrip('\n')
# VGS = VGS.lstrip('\n')
# Hvals = [int(s) for s in Helio.split() if s.isdigit()]
# VGSVals = [int(s) for s in VGS.split() if s.isdigit()]
#-----End Get Velocities-----#
#-----Get Diameters-----#
diameters = soup.find_all('table')[22]
at = diameters.find('tr')
print(at.get_text)
diameters = diameters.find_all('tr')[0]
major = diameters.find_all('td')[1].get_text()
minor = diameters.find_all('td')[2].get_text()
#-----End Get Diameters-----#
write_file = 'Data.csv'
with open(write_file, 'a') as output:
output.write(name + ',' + str(Hvals[0]) + ',' + str(Hvals[1]) + ',' + str(VGSVals[0]) + ',' + str(VGSVals[1]) + ',' + major + ',' + minor + '\n')
#-----SETUP-----#
link = "https://ned.ipac.caltech.edu/cgi-bin/objsearch?"
gals = []
can_read = False
while can_read == False:
choice = input("Enter [1] to enter galaxies by hand. Enter [2] to import a .txt file of names.\n")
if choice == '1':
galaxies = input("Enter galaxies separated by commas: Ex. M82, M83\n")
for x in galaxies.split(','):
gals.append(x.strip())
can_read = True
elif choice == '2':
file = input("What is the name of the file? Ex. galaxies.txt\n\n")
with open(file) as inp:
gals = inp.read().splitlines()
can_read = True
else:
print("Please enter either [1] or [2]\n\n")
done = False
print(chr(27) + "[2J")
threader = threading.Thread(target=animate)
threader.start()
write_file = 'Data.csv'
with open(write_file, 'w') as output:
output.write("Name, Heliocentric Velocity (km/s), Uncertainty (km/s), VGS Velocity (km/s), Uncertainty (km/s), Apparent Major Axis (arcsec), Apparent Minor Axis (arcsec)\n")
for i in range(0,len(gals)):
name = gals[i]
getVelocities(name,link)
done = True
|
LeJay/android_kernel_samsung_jactiveltexx | refs/heads/cm-10.1 | tools/perf/scripts/python/netdev-times.py | 11271 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
|
vuteam/BlackHole-New | refs/heads/master | lib/python/Screens/ChannelSelection.py | 2 | # -*- coding: utf-8 -*-
from Tools.Profile import profile
from Screen import Screen
import Screens.InfoBar
import Components.ParentalControl
from Components.Button import Button
from Components.Renderer.Picon import getPiconName
from Components.ServiceList import ServiceList, refreshServiceList
from Components.ActionMap import NumberActionMap, ActionMap, HelpableActionMap
from Components.MenuList import MenuList
from Components.ServiceEventTracker import ServiceEventTracker, InfoBarBase
profile('ChannelSelection.py 1')
from EpgSelection import EPGSelection
from enigma import eServiceReference, eEPGCache, eServiceCenter, eRCInput, eTimer, eDVBDB, iPlayableService, iServiceInformation, getPrevAsciiCode, eEnv, loadPNG
from Components.config import config, configfile, ConfigSubsection, ConfigText, ConfigYesNo
from Tools.NumericalTextInput import NumericalTextInput
profile('ChannelSelection.py 2')
from Components.NimManager import nimmanager
profile('ChannelSelection.py 2.1')
from Components.Sources.RdsDecoder import RdsDecoder
profile('ChannelSelection.py 2.2')
from Components.Sources.ServiceEvent import ServiceEvent
from Components.Sources.Event import Event
profile('ChannelSelection.py 2.3')
from Components.Input import Input
profile('ChannelSelection.py 3')
from Components.ChoiceList import ChoiceList, ChoiceEntryComponent
from Components.SystemInfo import SystemInfo
from Screens.InputBox import PinInput
from Screens.VirtualKeyBoard import VirtualKeyBoard
from Screens.MessageBox import MessageBox
from Screens.ServiceInfo import ServiceInfo
from Screens.Hotkey import InfoBarHotkey, hotkeyActionMap, getHotkeyFunctions
profile('ChannelSelection.py 4')
from Screens.PictureInPicture import PictureInPicture
from Components.Sources.List import List
from Screens.RdsDisplay import RassInteractive
from ServiceReference import ServiceReference
from Tools.BoundFunction import boundFunction
from Tools import Notifications
from Tools.Alternatives import CompareWithAlternatives
from Tools.Directories import fileExists
from time import localtime, time
from Plugins.Plugin import PluginDescriptor
from Components.PluginComponent import plugins
from Screens.ChoiceBox import ChoiceBox
from Screens.EventView import EventViewEPGSelect
from Tools.Alternatives import GetWithAlternative
import os, unicodedata
from Blackhole.BhEpgSearch import Nab_EpgSearch, Nab_EpgSearchLast
profile('ChannelSelection.py after imports')
FLAG_SERVICE_NEW_FOUND = 64
FLAG_IS_DEDICATED_3D = 128
class BouquetSelector(Screen):
def __init__(self, session, bouquets, selectedFunc, enableWrapAround = True):
Screen.__init__(self, session)
self.setTitle(_('Choose bouquet'))
self.selectedFunc = selectedFunc
self['actions'] = ActionMap(['OkCancelActions'], {'ok': self.okbuttonClick,
'cancel': self.cancelClick})
entrys = [ (x[0], x[1]) for x in bouquets ]
self['menu'] = MenuList(entrys, enableWrapAround)
def getCurrent(self):
cur = self['menu'].getCurrent()
return cur and cur[1]
def okbuttonClick(self):
self.selectedFunc(self.getCurrent())
def up(self):
self['menu'].up()
def down(self):
self['menu'].down()
def cancelClick(self):
self.close(False)
class SilentBouquetSelector():
def __init__(self, bouquets, enableWrapAround = False, current = 0):
self.bouquets = [ b[1] for b in bouquets ]
self.pos = current
self.count = len(bouquets)
self.enableWrapAround = enableWrapAround
def up(self):
if self.pos > 0 or self.enableWrapAround:
self.pos = (self.pos - 1) % self.count
def down(self):
if self.pos < self.count - 1 or self.enableWrapAround:
self.pos = (self.pos + 1) % self.count
def getCurrent(self):
return self.bouquets[self.pos]
OFF = 0
EDIT_BOUQUET = 1
EDIT_ALTERNATIVES = 2
def append_when_current_valid(current, menu, args, level = 0, key = ''):
if current and current.valid() and level <= config.usage.setup_level.index:
menu.append(ChoiceEntryComponent(key, args))
def removed_userbouquets_available():
for file in os.listdir('/etc/enigma2/'):
if file.startswith('userbouquet') and file.endswith('.del'):
return True
return False
class ChannelContextMenu(Screen):
def __init__(self, session, csel):
Screen.__init__(self, session)
self.csel = csel
self.bsel = None
if self.isProtected():
self.onFirstExecBegin.append(boundFunction(self.session.openWithCallback, self.protectResult, PinInput, pinList=[ x.value for x in config.ParentalControl.servicepin ], triesEntry=config.ParentalControl.retries.servicepin, title=_('Please enter the correct pin code'), windowTitle=_('Enter pin code')))
self['actions'] = ActionMap(['OkCancelActions',
'ColorActions',
'NumberActions',
'MenuActions'], {'ok': self.okbuttonClick,
'cancel': self.cancelClick,
'blue': self.showServiceInPiP,
'red': self.playMain,
'menu': self.openSetup,
'2': self.renameEntry,
'3': self.findCurrentlyPlayed,
'5': self.addServiceToBouquetOrAlternative,
'6': self.toggleMoveModeSelect,
'8': self.removeEntry})
menu = []
self.removeFunction = False
self.addFunction = False
current = csel.getCurrentSelection()
current_root = csel.getRoot()
current_sel_path = current.getPath()
current_sel_flags = current.flags
inBouquetRootList = current_root and 'FROM BOUQUET "bouquets.' in current_root.getPath()
inAlternativeList = current_root and 'FROM BOUQUET "alternatives' in current_root.getPath()
self.inBouquet = csel.getMutableList() is not None
haveBouquets = config.usage.multibouquet.value
from Components.ParentalControl import parentalControl
self.parentalControl = parentalControl
self.parentalControlEnabled = config.ParentalControl.servicepin[0].value and config.ParentalControl.servicepinactive.value
if not (current_sel_path or current_sel_flags & (eServiceReference.isDirectory | eServiceReference.isMarker)):
append_when_current_valid(current, menu, (_('show transponder info'), self.showServiceInformations), level=2)
if csel.bouquet_mark_edit == OFF and not csel.entry_marked:
if not inBouquetRootList:
isPlayable = not current_sel_flags & (eServiceReference.isMarker | eServiceReference.isDirectory)
if isPlayable:
for p in plugins.getPlugins(PluginDescriptor.WHERE_CHANNEL_CONTEXT_MENU):
append_when_current_valid(current, menu, (p.name, boundFunction(self.runPlugin, p)), key='bullet')
if config.servicelist.startupservice.value == current.toString():
append_when_current_valid(current, menu, (_('stop using as startup service'), self.unsetStartupService), level=0)
else:
append_when_current_valid(current, menu, (_('set as startup service'), self.setStartupService), level=0)
if self.parentalControlEnabled:
if self.parentalControl.getProtectionLevel(current.toCompareString()) == -1:
append_when_current_valid(current, menu, (_('add to parental protection'), boundFunction(self.addParentalProtection, current)), level=0)
elif self.parentalControl.isServiceProtectionBouquet(current.toCompareString()):
append_when_current_valid(current, menu, (_('service is in bouquet parental protection'), self.cancelClick), level=0)
else:
append_when_current_valid(current, menu, (_('remove from parental protection'), boundFunction(self.removeParentalProtection, current)), level=0)
if config.ParentalControl.hideBlacklist.value and not parentalControl.sessionPinCached and config.ParentalControl.storeservicepin.value != 'never':
append_when_current_valid(current, menu, (_('Unhide parental control services'), self.unhideParentalServices), level=0, key='1')
if SystemInfo['3DMode'] and fileExists('/usr/lib/enigma2/python/Plugins/SystemPlugins/OSD3DSetup/plugin.py'):
if eDVBDB.getInstance().getFlag(eServiceReference(current.toString())) & FLAG_IS_DEDICATED_3D:
append_when_current_valid(current, menu, (_('Unmark service as dedicated 3D service'), self.removeDedicated3DFlag), level=0)
else:
append_when_current_valid(current, menu, (_('Mark service as dedicated 3D service'), self.addDedicated3DFlag), level=0)
if haveBouquets:
bouquets = self.csel.getBouquetList()
if bouquets is None:
bouquetCnt = 0
else:
bouquetCnt = len(bouquets)
if not self.inBouquet or bouquetCnt > 1:
append_when_current_valid(current, menu, (_('add service to bouquet'), self.addServiceToBouquetSelected), level=0, key='5')
self.addFunction = self.addServiceToBouquetSelected
if not self.inBouquet:
append_when_current_valid(current, menu, (_('remove entry'), self.removeEntry), level=0, key='8')
self.removeFunction = self.removeSatelliteService
elif not self.inBouquet:
append_when_current_valid(current, menu, (_('add service to favourites'), self.addServiceToBouquetSelected), level=0, key='5')
self.addFunction = self.addServiceToBouquetSelected
if SystemInfo['PIPAvailable']:
if not self.parentalControlEnabled or self.parentalControl.getProtectionLevel(current.toCompareString()) == -1:
if self.csel.dopipzap:
append_when_current_valid(current, menu, (_('play in mainwindow'), self.playMain), level=0, key='red')
else:
append_when_current_valid(current, menu, (_('play as picture in picture'), self.showServiceInPiP), level=0, key='blue')
append_when_current_valid(current, menu, (_('find currently played service'), self.findCurrentlyPlayed), level=0, key='3')
else:
if 'FROM SATELLITES' in current_root.getPath() and current and _('Services') in eServiceCenter.getInstance().info(current).getName(current):
unsigned_orbpos = current.getUnsignedData(4) >> 16
if unsigned_orbpos == 65535:
append_when_current_valid(current, menu, (_('remove cable services'), self.removeSatelliteServices), level=0)
elif unsigned_orbpos == 61166:
append_when_current_valid(current, menu, (_('remove terrestrial services'), self.removeSatelliteServices), level=0)
else:
append_when_current_valid(current, menu, (_('remove selected satellite'), self.removeSatelliteServices), level=0)
if haveBouquets:
if not self.inBouquet and 'PROVIDERS' not in current_sel_path:
append_when_current_valid(current, menu, (_('copy to bouquets'), self.copyCurrentToBouquetList), level=0)
if 'flags == %d' % FLAG_SERVICE_NEW_FOUND in current_sel_path:
append_when_current_valid(current, menu, (_('remove all new found flags'), self.removeAllNewFoundFlags), level=0)
if self.inBouquet:
append_when_current_valid(current, menu, (_('rename entry'), self.renameEntry), level=0, key='2')
if not inAlternativeList:
append_when_current_valid(current, menu, (_('remove entry'), self.removeEntry), level=0, key='8')
self.removeFunction = self.removeCurrentService
if current_root and 'flags == %d' % FLAG_SERVICE_NEW_FOUND in current_root.getPath():
append_when_current_valid(current, menu, (_('remove new found flag'), self.removeNewFoundFlag), level=0)
else:
if self.parentalControlEnabled:
if self.parentalControl.getProtectionLevel(current.toCompareString()) == -1:
append_when_current_valid(current, menu, (_('add bouquet to parental protection'), boundFunction(self.addParentalProtection, current)), level=0)
else:
append_when_current_valid(current, menu, (_('remove bouquet from parental protection'), boundFunction(self.removeParentalProtection, current)), level=0)
menu.append(ChoiceEntryComponent(text=(_('add bouquet'), self.showBouquetInputBox)))
append_when_current_valid(current, menu, (_('rename entry'), self.renameEntry), level=0, key='2')
append_when_current_valid(current, menu, (_('remove entry'), self.removeEntry), level=0, key='8')
self.removeFunction = self.removeBouquet
if removed_userbouquets_available():
append_when_current_valid(current, menu, (_('purge deleted userbouquets'), self.purgeDeletedBouquets), level=0)
append_when_current_valid(current, menu, (_('restore deleted userbouquets'), self.restoreDeletedBouquets), level=0)
if self.inBouquet:
if csel.bouquet_mark_edit == OFF:
if csel.movemode:
append_when_current_valid(current, menu, (_('disable move mode'), self.toggleMoveMode), level=0, key='6')
else:
append_when_current_valid(current, menu, (_('enable move mode'), self.toggleMoveMode), level=1, key='6')
if not csel.entry_marked and not inBouquetRootList and current_root and not current_root.flags & eServiceReference.isGroup:
if current.type != -1:
menu.append(ChoiceEntryComponent(text=(_('add marker'), self.showMarkerInputBox)))
if not csel.movemode:
if haveBouquets:
append_when_current_valid(current, menu, (_('enable bouquet edit'), self.bouquetMarkStart), level=0)
else:
append_when_current_valid(current, menu, (_('enable favourite edit'), self.bouquetMarkStart), level=0)
if current_sel_flags & eServiceReference.isGroup:
append_when_current_valid(current, menu, (_('edit alternatives'), self.editAlternativeServices), level=2)
append_when_current_valid(current, menu, (_('show alternatives'), self.showAlternativeServices), level=2)
append_when_current_valid(current, menu, (_('remove all alternatives'), self.removeAlternativeServices), level=2)
elif not current_sel_flags & eServiceReference.isMarker:
append_when_current_valid(current, menu, (_('add alternatives'), self.addAlternativeServices), level=2)
elif csel.bouquet_mark_edit == EDIT_BOUQUET:
if haveBouquets:
append_when_current_valid(current, menu, (_('end bouquet edit'), self.bouquetMarkEnd), level=0)
append_when_current_valid(current, menu, (_('abort bouquet edit'), self.bouquetMarkAbort), level=0)
else:
append_when_current_valid(current, menu, (_('end favourites edit'), self.bouquetMarkEnd), level=0)
append_when_current_valid(current, menu, (_('abort favourites edit'), self.bouquetMarkAbort), level=0)
if current_sel_flags & eServiceReference.isMarker:
append_when_current_valid(current, menu, (_('rename entry'), self.renameEntry), level=0, key='2')
append_when_current_valid(current, menu, (_('remove entry'), self.removeEntry), level=0, key='8')
self.removeFunction = self.removeCurrentService
else:
append_when_current_valid(current, menu, (_('end alternatives edit'), self.bouquetMarkEnd), level=0)
append_when_current_valid(current, menu, (_('abort alternatives edit'), self.bouquetMarkAbort), level=0)
menu.append(ChoiceEntryComponent('menu', (_('Configuration...'), self.openSetup)))
self['menu'] = ChoiceList(menu)
def set3DMode(self, value):
if config.plugins.OSD3DSetup.mode.value == 'auto' and self.session.nav.currentlyPlayingServiceReference == self.csel.getCurrentSelection():
from Plugins.SystemPlugins.OSD3DSetup.plugin import applySettings
applySettings(value and 'sidebyside' or config.plugins.OSD3DSetup.mode.value)
def addDedicated3DFlag(self):
eDVBDB.getInstance().addFlag(eServiceReference(self.csel.getCurrentSelection().toString()), FLAG_IS_DEDICATED_3D)
eDVBDB.getInstance().reloadBouquets()
self.set3DMode(True)
self.close()
def removeDedicated3DFlag(self):
eDVBDB.getInstance().removeFlag(eServiceReference(self.csel.getCurrentSelection().toString()), FLAG_IS_DEDICATED_3D)
eDVBDB.getInstance().reloadBouquets()
self.set3DMode(False)
self.close()
def isProtected(self):
return self.csel.protectContextMenu and config.ParentalControl.setuppinactive.value and config.ParentalControl.config_sections.context_menus.value
def protectResult(self, answer):
if answer:
self.csel.protectContextMenu = False
elif answer is not None:
self.session.openWithCallback(self.close, MessageBox, _('The pin code you entered is wrong.'), MessageBox.TYPE_ERROR)
else:
self.close()
def addServiceToBouquetOrAlternative(self):
if self.addFunction:
self.addFunction()
else:
return 0
def getCurrentSelectionName(self):
cur = self.csel.getCurrentSelection()
if cur and cur.valid():
name = eServiceCenter.getInstance().info(cur).getName(cur) or ServiceReference(cur).getServiceName() or ''
name = name.replace('\xc2\x86', '').replace('\xc2\x87', '')
return name
return ''
def removeEntry(self):
if self.removeFunction and self.csel.servicelist.getCurrent() and self.csel.servicelist.getCurrent().valid():
if self.csel.confirmRemove:
list = [(_('yes'), True), (_('no'), False), (_('yes') + ' ' + _('and never ask again this session again'), 'never')]
self.session.openWithCallback(self.removeFunction, MessageBox, _('Are you sure to remove this entry?') + '\n%s' % self.getCurrentSelectionName(), list=list)
else:
self.removeFunction(True)
else:
return 0
def removeCurrentService(self, answer):
if answer:
if answer == 'never':
self.csel.confirmRemove = False
self.csel.removeCurrentService()
self.close()
def removeSatelliteService(self, answer):
if answer:
if answer == 'never':
self.csel.confirmRemove = False
self.csel.removeSatelliteService()
self.close()
def removeBouquet(self, answer):
if answer:
self.csel.removeBouquet()
eDVBDB.getInstance().reloadBouquets()
self.close()
def purgeDeletedBouquets(self):
self.session.openWithCallback(self.purgeDeletedBouquetsCallback, MessageBox, _('Are you sure to purge all deleted userbouquets?'))
def purgeDeletedBouquetsCallback(self, answer):
if answer:
for file in os.listdir('/etc/enigma2/'):
if file.startswith('userbouquet') and file.endswith('.del'):
file = '/etc/enigma2/' + file
print 'permantly remove file ', file
os.remove(file)
self.close()
def restoreDeletedBouquets(self):
for file in os.listdir('/etc/enigma2/'):
if file.startswith('userbouquet') and file.endswith('.del'):
file = '/etc/enigma2/' + file
print 'restore file ', file[:-4]
os.rename(file, file[:-4])
eDVBDBInstance = eDVBDB.getInstance()
eDVBDBInstance.setLoadUnlinkedUserbouquets(True)
eDVBDBInstance.reloadBouquets()
eDVBDBInstance.setLoadUnlinkedUserbouquets(config.misc.load_unlinked_userbouquets.value)
refreshServiceList()
self.csel.showFavourites()
self.close()
def playMain(self):
sel = self.csel.getCurrentSelection()
if sel and sel.valid() and self.csel.dopipzap and (not self.parentalControlEnabled or self.parentalControl.getProtectionLevel(self.csel.getCurrentSelection().toCompareString()) == -1):
self.csel.zap()
self.csel.setCurrentSelection(sel)
self.close(True)
else:
return 0
def okbuttonClick(self):
self['menu'].getCurrent()[0][1]()
def openSetup(self):
from Screens.Setup import Setup
self.session.openWithCallback(self.cancelClick, Setup, 'userinterface')
def cancelClick(self, dummy = False):
self.close(False)
def showServiceInformations(self):
self.session.open(ServiceInfo, self.csel.getCurrentSelection())
def setStartupService(self):
self.session.openWithCallback(self.setStartupServiceCallback, MessageBox, _('Set startup service'), list=[(_('Only on startup'), 'startup'), (_('Also on standby'), 'standby')])
def setStartupServiceCallback(self, answer):
if answer:
config.servicelist.startupservice.value = self.csel.getCurrentSelection().toString()
path = ';'.join([ i.toString() for i in self.csel.servicePath ])
config.servicelist.startuproot.value = path
config.servicelist.startupmode.value = config.servicelist.lastmode.value
config.servicelist.startupservice_onstandby.value = answer == 'standby'
config.servicelist.save()
configfile.save()
self.close()
def unsetStartupService(self):
config.servicelist.startupservice.value = ''
config.servicelist.startupservice_onstandby.value = False
config.servicelist.save()
configfile.save()
self.close()
def showBouquetInputBox(self):
self.session.openWithCallback(self.bouquetInputCallback, VirtualKeyBoard, title=_('Please enter a name for the new bouquet'), text='bouquetname', maxSize=False, visible_width=56, type=Input.TEXT)
def bouquetInputCallback(self, bouquet):
if bouquet is not None:
self.csel.addBouquet(bouquet, None)
self.close()
def addParentalProtection(self, service):
self.parentalControl.protectService(service.toCompareString())
if config.ParentalControl.hideBlacklist.value and not self.parentalControl.sessionPinCached:
self.csel.servicelist.resetRoot()
self.close()
def removeParentalProtection(self, service):
self.session.openWithCallback(boundFunction(self.pinEntered, service.toCompareString()), PinInput, pinList=[config.ParentalControl.servicepin[0].value], triesEntry=config.ParentalControl.retries.servicepin, title=_('Enter the service pin'), windowTitle=_('Enter pin code'))
def pinEntered(self, service, answer):
if answer:
self.parentalControl.unProtectService(service)
self.close()
elif answer is not None:
self.session.openWithCallback(self.close, MessageBox, _('The pin code you entered is wrong.'), MessageBox.TYPE_ERROR)
else:
self.close()
def unhideParentalServices(self):
if self.csel.protectContextMenu:
self.session.openWithCallback(self.unhideParentalServicesCallback, PinInput, pinList=[config.ParentalControl.servicepin[0].value], triesEntry=config.ParentalControl.retries.servicepin, title=_('Enter the service pin'), windowTitle=_('Enter pin code'))
else:
self.unhideParentalServicesCallback(True)
def unhideParentalServicesCallback(self, answer):
if answer:
service = self.csel.servicelist.getCurrent()
self.parentalControl.setSessionPinCached()
self.parentalControl.hideBlacklist()
self.csel.servicelist.resetRoot()
self.csel.servicelist.setCurrent(service)
self.close()
elif answer is not None:
self.session.openWithCallback(self.close, MessageBox, _('The pin code you entered is wrong.'), MessageBox.TYPE_ERROR)
else:
self.close()
def showServiceInPiP(self):
if self.csel.dopipzap or self.parentalControlEnabled and not self.parentalControl.getProtectionLevel(self.csel.getCurrentSelection().toCompareString()) == -1:
return 0
if self.session.pipshown:
del self.session.pip
self.session.pip = self.session.instantiateDialog(PictureInPicture)
self.session.pip.setAnimationMode(0)
self.session.pip.show()
newservice = self.csel.servicelist.getCurrent()
currentBouquet = self.csel.servicelist and self.csel.servicelist.getRoot()
if newservice and newservice.valid():
if self.session.pip.playService(newservice):
self.session.pipshown = True
self.session.pip.servicePath = self.csel.getCurrentServicePath()
self.session.pip.servicePath[1] = currentBouquet
self.close(True)
else:
self.session.pipshown = False
del self.session.pip
self.session.openWithCallback(self.close, MessageBox, _('Could not open Picture in Picture'), MessageBox.TYPE_ERROR)
else:
return 0
def addServiceToBouquetSelected(self):
bouquets = self.csel.getBouquetList()
if bouquets is None:
cnt = 0
else:
cnt = len(bouquets)
if cnt > 1:
self.bsel = self.session.openWithCallback(self.bouquetSelClosed, BouquetSelector, bouquets, self.addCurrentServiceToBouquet)
elif cnt == 1:
self.addCurrentServiceToBouquet(bouquets[0][1], closeBouquetSelection=False)
def bouquetSelClosed(self, recursive):
self.bsel = None
if recursive:
self.close(False)
def removeSatelliteServices(self):
self.csel.removeSatelliteServices()
self.close()
def copyCurrentToBouquetList(self):
self.csel.copyCurrentToBouquetList()
self.close()
def showMarkerInputBox(self):
self.session.openWithCallback(self.markerInputCallback, VirtualKeyBoard, title=_('Please enter a name for the new marker'), text='markername', maxSize=False, visible_width=56, type=Input.TEXT)
def markerInputCallback(self, marker):
if marker is not None:
self.csel.addMarker(marker)
self.close()
def addCurrentServiceToBouquet(self, dest, closeBouquetSelection = True):
self.csel.addServiceToBouquet(dest)
if self.bsel is not None:
self.bsel.close(True)
else:
self.close(closeBouquetSelection)
def renameEntry(self):
if self.inBouquet and self.csel.servicelist.getCurrent() and self.csel.servicelist.getCurrent().valid() and not self.csel.entry_marked:
self.csel.renameEntry()
self.close()
else:
return 0
def toggleMoveMode(self):
if self.inBouquet and self.csel.servicelist.getCurrent() and self.csel.servicelist.getCurrent().valid():
self.csel.toggleMoveMode()
self.close()
else:
return 0
def toggleMoveModeSelect(self):
if self.inBouquet and self.csel.servicelist.getCurrent() and self.csel.servicelist.getCurrent().valid():
self.csel.toggleMoveMode(True)
self.close()
else:
return 0
def bouquetMarkStart(self):
self.csel.startMarkedEdit(EDIT_BOUQUET)
self.close()
def bouquetMarkEnd(self):
self.csel.endMarkedEdit(abort=False)
self.close()
def bouquetMarkAbort(self):
self.csel.endMarkedEdit(abort=True)
self.close()
def removeNewFoundFlag(self):
eDVBDB.getInstance().removeFlag(self.csel.getCurrentSelection(), FLAG_SERVICE_NEW_FOUND)
self.close()
def removeAllNewFoundFlags(self):
curpath = self.csel.getCurrentSelection().getPath()
idx = curpath.find('satellitePosition == ')
if idx != -1:
tmp = curpath[idx + 21:]
idx = tmp.find(')')
if idx != -1:
satpos = int(tmp[:idx])
eDVBDB.getInstance().removeFlags(FLAG_SERVICE_NEW_FOUND, -1, -1, -1, satpos)
self.close()
def editAlternativeServices(self):
self.csel.startMarkedEdit(EDIT_ALTERNATIVES)
self.close()
def showAlternativeServices(self):
self.csel['Service'].editmode = True
self.csel.enterPath(self.csel.getCurrentSelection())
self.close()
def removeAlternativeServices(self):
self.csel.removeAlternativeServices()
self.close()
def addAlternativeServices(self):
self.csel.addAlternativeServices()
self.csel.startMarkedEdit(EDIT_ALTERNATIVES)
self.close()
def findCurrentlyPlayed(self):
sel = self.csel.getCurrentSelection()
if sel and sel.valid() and not self.csel.entry_marked:
currentPlayingService = hasattr(self.csel, 'dopipzap') and self.csel.dopipzap and self.session.pip.getCurrentService() or self.session.nav.getCurrentlyPlayingServiceOrGroup()
self.csel.servicelist.setCurrent(currentPlayingService, adjust=False)
if self.csel.getCurrentSelection() != currentPlayingService:
self.csel.setCurrentSelection(sel)
self.close()
else:
return 0
def runPlugin(self, plugin):
plugin(session=self.session, service=self.csel.getCurrentSelection())
self.close()
class SelectionEventInfo():
def __init__(self):
self['Service'] = self['ServiceEvent'] = ServiceEvent()
self['Event'] = Event()
self.servicelist.connectSelChanged(self.__selectionChanged)
self.timer = eTimer()
self.timer.callback.append(self.updateEventInfo)
self.onShown.append(self.__selectionChanged)
def __selectionChanged(self):
if self.execing:
self.timer.start(100, True)
def updateEventInfo(self):
cur = self.getCurrentSelection()
service = self['Service']
service.newService(cur)
self['Event'].newEvent(service.event)
class ChannelSelectionEPG(InfoBarHotkey):
def __init__(self):
self.hotkeys = [('Info (EPG)', 'info', 'Infobar/openEventView'),
('Info (EPG) ' + _('long'), 'info_long', 'Infobar/showEventInfoPlugins'),
('Epg/Guide', 'epg', 'Plugins/Extensions/GraphMultiEPG/1'),
('Epg/Guide ' + _('long'), 'epg_long', 'Infobar/showEventInfoPlugins')]
self['ChannelSelectEPGActions'] = hotkeyActionMap(['ChannelSelectEPGActions'], dict(((x[1], self.hotkeyGlobal) for x in self.hotkeys)))
self.eventViewEPG = self.start_bouquet = self.epg_bouquet = None
self.currentSavedPath = []
def getKeyFunctions(self, key):
selection = eval('config.misc.hotkey.' + key + ".value.split(',')")
selected = []
for x in selection:
function = list((function for function in getHotkeyFunctions() if function[1] == x and function[2] == 'EPG'))
if function:
selected.append(function[0])
return selected
def runPlugin(self, plugin):
Screens.InfoBar.InfoBar.instance.runPlugin(plugin)
def getEPGPluginList(self, getAll = False):
pluginlist = [ (p.name, boundFunction(self.runPlugin, p), p.path) for p in plugins.getPlugins(where=PluginDescriptor.WHERE_EVENTINFO) if 'selectedevent' not in p.__call__.func_code.co_varnames ] or []
from Components.ServiceEventTracker import InfoBarCount
if getAll or InfoBarCount == 1:
pluginlist.append((_('Show EPG for current channel...'), self.openSingleServiceEPG, 'current_channel'))
pluginlist.append((_('Multi EPG'), self.openMultiServiceEPG, 'multi_epg'))
pluginlist.append((_('Current event EPG'), self.openEventView, 'event_epg'))
return pluginlist
def showEventInfoPlugins(self):
pluginlist = self.getEPGPluginList()
if pluginlist:
self.session.openWithCallback(self.EventInfoPluginChosen, ChoiceBox, title=_('Please choose an extension...'), list=pluginlist, skin_name='EPGExtensionsList')
else:
self.openSingleServiceEPG()
def EventInfoPluginChosen(self, answer):
if answer is not None:
answer[1]()
def openEventView(self):
epglist = []
self.epglist = epglist
ref = self.getCurrentSelection()
epg = eEPGCache.getInstance()
now_event = epg.lookupEventTime(ref, -1, 0)
if now_event:
epglist.append(now_event)
next_event = epg.lookupEventTime(ref, -1, 1)
if next_event:
epglist.append(next_event)
if epglist:
self.eventViewEPG = self.session.openWithCallback(self.eventViewEPGClosed, EventViewEPGSelect, epglist[0], ServiceReference(ref), self.eventViewEPGCallback, self.openSingleServiceEPG, self.openMultiServiceEPG, self.openSimilarList)
def eventViewEPGCallback(self, setEvent, setService, val):
epglist = self.epglist
if len(epglist) > 1:
tmp = epglist[0]
epglist[0] = epglist[1]
epglist[1] = tmp
setEvent(epglist[0])
def eventViewEPGClosed(self, ret = False):
self.eventViewEPG = None
if ret:
self.close()
def openMultiServiceEPG(self):
ref = self.getCurrentSelection()
if ref:
self.start_bouquet = self.epg_bouquet = self.servicelist.getRoot()
self.savedService = ref
self.currentSavedPath = self.servicePath[:]
services = self.getServicesList(self.servicelist.getRoot())
self.session.openWithCallback(self.SingleMultiEPGClosed, EPGSelection, services, self.zapToService, None, bouquetChangeCB=self.changeBouquetForMultiEPG)
def openSingleServiceEPG(self):
ref = self.getCurrentSelection()
if ref:
self.start_bouquet = self.epg_bouquet = self.servicelist.getRoot()
self.savedService = ref
self.currentSavedPath = self.servicePath[:]
self.session.openWithCallback(self.SingleMultiEPGClosed, EPGSelection, ref, self.zapToService, serviceChangeCB=self.changeServiceCB, bouquetChangeCB=self.changeBouquetForSingleEPG)
def openSimilarList(self, eventid, refstr):
self.session.open(EPGSelection, refstr, None, eventid)
def getServicesList(self, root):
services = []
servicelist = root and eServiceCenter.getInstance().list(root)
if servicelist is not None:
while True:
service = servicelist.getNext()
if not service.valid():
break
if service.flags & (eServiceReference.isDirectory | eServiceReference.isMarker):
continue
services.append(ServiceReference(service))
return services
def SingleMultiEPGClosed(self, ret = False):
if ret:
service = self.getCurrentSelection()
if self.eventViewEPG:
self.eventViewEPG.close(service)
elif service is not None:
self.close()
else:
if self.start_bouquet != self.epg_bouquet and len(self.currentSavedPath) > 0:
self.clearPath()
self.enterPath(self.bouquet_root)
self.epg_bouquet = self.start_bouquet
self.enterPath(self.epg_bouquet)
self.setCurrentSelection(self.savedService)
def changeBouquetForSingleEPG(self, direction, epg):
if config.usage.multibouquet.value:
inBouquet = self.getMutableList() is not None
if inBouquet and len(self.servicePath) > 1:
self.pathUp()
if direction < 0:
self.moveUp()
else:
self.moveDown()
cur = self.getCurrentSelection()
self.enterPath(cur)
self.epg_bouquet = self.servicelist.getRoot()
epg.setService(ServiceReference(self.getCurrentSelection()))
def changeBouquetForMultiEPG(self, direction, epg):
if config.usage.multibouquet.value:
inBouquet = self.getMutableList() is not None
if inBouquet and len(self.servicePath) > 1:
self.pathUp()
if direction < 0:
self.moveUp()
else:
self.moveDown()
cur = self.getCurrentSelection()
self.enterPath(cur)
self.epg_bouquet = self.servicelist.getRoot()
services = self.getServicesList(self.epg_bouquet)
epg.setServices(services)
def changeServiceCB(self, direction, epg):
beg = self.getCurrentSelection()
while True:
if direction > 0:
self.moveDown()
else:
self.moveUp()
cur = self.getCurrentSelection()
if cur == beg or not cur.flags & eServiceReference.isMarker:
break
epg.setService(ServiceReference(self.getCurrentSelection()))
def zapToService(self, service, preview = False, zapback = False):
if self.startServiceRef is None:
self.startServiceRef = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if service is not None:
if self.servicelist.getRoot() != self.epg_bouquet:
self.servicelist.clearPath()
if self.servicelist.bouquet_root != self.epg_bouquet:
self.servicelist.enterPath(self.servicelist.bouquet_root)
self.servicelist.enterPath(self.epg_bouquet)
self.servicelist.setCurrent(service)
if not zapback or preview:
self.zap(enable_pipzap=True)
if (self.dopipzap or zapback) and not preview:
self.zapBack()
if not preview:
self.startServiceRef = None
self.startRoot = None
self.revertMode = None
class ChannelSelectionEdit():
def __init__(self):
self.entry_marked = False
self.bouquet_mark_edit = OFF
self.mutableList = None
self.__marked = []
self.saved_title = None
self.saved_root = None
self.current_ref = None
self.editMode = False
self.confirmRemove = True
class ChannelSelectionEditActionMap(ActionMap):
def __init__(self, csel, contexts = [], actions = {}, prio = 0):
ActionMap.__init__(self, contexts, actions, prio)
self.csel = csel
def action(self, contexts, action):
if action == 'cancel':
self.csel.handleEditCancel()
return 0
elif action == 'ok':
return 0
else:
return ActionMap.action(self, contexts, action)
self['ChannelSelectEditActions'] = ChannelSelectionEditActionMap(self, ['ChannelSelectEditActions', 'OkCancelActions'], {'contextMenu': self.doContext})
def getMutableList(self, root = eServiceReference()):
if self.mutableList is not None:
return self.mutableList
serviceHandler = eServiceCenter.getInstance()
if not root.valid():
root = self.getRoot()
list = root and serviceHandler.list(root)
if list is not None:
return list.startEdit()
def buildBouquetID(self, name):
name = unicodedata.normalize('NFKD', unicode(name, 'utf_8', errors='ignore')).encode('ASCII', 'ignore').translate(None, '<>:"/\\|?*() ')
while os.path.isfile((self.mode == MODE_TV and '/etc/enigma2/userbouquet.%s.tv' or '/etc/enigma2/userbouquet.%s.radio') % name):
name = name.rsplit('_', 1)
name = '_'.join((name[0], len(name) == 2 and name[1].isdigit() and str(int(name[1]) + 1) or '1'))
return name
def renameEntry(self):
self.editMode = True
cur = self.getCurrentSelection()
if cur and cur.valid():
name = eServiceCenter.getInstance().info(cur).getName(cur) or ServiceReference(cur).getServiceName() or ''
name = name.replace('\xc2\x86', '').replace('\xc2\x87', '')
if name:
self.session.openWithCallback(self.renameEntryCallback, VirtualKeyBoard, title=_('Please enter new name:'), text=name)
else:
return 0
def renameEntryCallback(self, name):
if name:
mutableList = self.getMutableList()
if mutableList:
current = self.servicelist.getCurrent()
current.setName(name)
index = self.servicelist.getCurrentIndex()
mutableList.removeService(current, False)
mutableList.addService(current)
mutableList.moveService(current, index)
mutableList.flushChanges()
self.servicelist.addService(current, True)
self.servicelist.removeCurrent()
if not self.servicelist.atEnd():
self.servicelist.moveUp()
def addMarker(self, name):
current = self.servicelist.getCurrent()
mutableList = self.getMutableList()
cnt = 0
while mutableList:
str = '1:64:%d:0:0:0:0:0:0:0::%s' % (cnt, name)
ref = eServiceReference(str)
if current and current.valid():
if not mutableList.addService(ref, current):
self.servicelist.addService(ref, True)
mutableList.flushChanges()
break
elif not mutableList.addService(ref):
self.servicelist.addService(ref, True)
mutableList.flushChanges()
break
cnt += 1
def addAlternativeServices(self):
cur_service = ServiceReference(self.getCurrentSelection())
end = self.atEnd()
root = self.getRoot()
cur_root = root and ServiceReference(root)
mutableBouquet = cur_root.list().startEdit()
if mutableBouquet:
name = cur_service.getServiceName()
refstr = '_'.join(cur_service.ref.toString().split(':'))
if self.mode == MODE_TV:
str = '1:134:1:0:0:0:0:0:0:0:FROM BOUQUET "alternatives.%s.tv" ORDER BY bouquet' % refstr
else:
str = '1:134:2:0:0:0:0:0:0:0:FROM BOUQUET "alternatives.%s.radio" ORDER BY bouquet' % refstr
new_ref = ServiceReference(str)
if not mutableBouquet.addService(new_ref.ref, cur_service.ref):
mutableBouquet.removeService(cur_service.ref)
mutableBouquet.flushChanges()
eDVBDB.getInstance().reloadBouquets()
mutableAlternatives = new_ref.list().startEdit()
if mutableAlternatives:
mutableAlternatives.setListName(name)
if mutableAlternatives.addService(cur_service.ref):
print 'add', cur_service.ref.toString(), 'to new alternatives failed'
mutableAlternatives.flushChanges()
self.servicelist.addService(new_ref.ref, True)
self.servicelist.removeCurrent()
if not end:
self.servicelist.moveUp()
if cur_service.ref.toString() == self.lastservice.value:
self.saveChannel(new_ref.ref)
if self.startServiceRef and cur_service.ref == self.startServiceRef:
self.startServiceRef = new_ref.ref
else:
print 'get mutable list for new created alternatives failed'
else:
print 'add', str, 'to', cur_root.getServiceName(), 'failed'
else:
print 'bouquetlist is not editable'
def addBouquet(self, bName, services):
serviceHandler = eServiceCenter.getInstance()
mutableBouquetList = serviceHandler.list(self.bouquet_root).startEdit()
if mutableBouquetList:
bName = self.buildBouquetID(bName)
new_bouquet_ref = eServiceReference((self.mode == MODE_TV and '1:7:1:0:0:0:0:0:0:0:FROM BOUQUET "userbouquet.%s.tv" ORDER BY bouquet' or '1:7:2:0:0:0:0:0:0:0:FROM BOUQUET "userbouquet.%s.radio" ORDER BY bouquet') % bName)
if not mutableBouquetList.addService(new_bouquet_ref):
mutableBouquetList.flushChanges()
eDVBDB.getInstance().reloadBouquets()
mutableBouquet = serviceHandler.list(new_bouquet_ref).startEdit()
if mutableBouquet:
mutableBouquet.setListName(bName)
if services is not None:
for service in services:
if mutableBouquet.addService(service):
print 'add', service.toString(), 'to new bouquet failed'
mutableBouquet.flushChanges()
else:
print 'get mutable list for new created bouquet failed'
cur_root = self.getRoot()
str1 = cur_root and cur_root.toString()
pos1 = str1 and str1.find('FROM BOUQUET') or -1
pos2 = self.bouquet_rootstr.find('FROM BOUQUET')
if pos1 != -1 and pos2 != -1 and str1[pos1:] == self.bouquet_rootstr[pos2:]:
self.servicelist.addService(new_bouquet_ref)
self.servicelist.resetRoot()
else:
print 'add', str, 'to bouquets failed'
else:
print 'bouquetlist is not editable'
def copyCurrentToBouquetList(self):
provider = ServiceReference(self.getCurrentSelection())
providerName = provider.getServiceName()
serviceHandler = eServiceCenter.getInstance()
services = serviceHandler.list(provider.ref)
self.addBouquet(providerName, services and services.getContent('R', True))
def removeAlternativeServices(self):
cur_service = ServiceReference(self.getCurrentSelection())
end = self.atEnd()
root = self.getRoot()
cur_root = root and ServiceReference(root)
list = cur_service.list()
first_in_alternative = list and list.getNext()
if first_in_alternative:
edit_root = cur_root and cur_root.list().startEdit()
if edit_root:
if not edit_root.addService(first_in_alternative, cur_service.ref):
self.servicelist.addService(first_in_alternative, True)
if cur_service.ref.toString() == self.lastservice.value:
self.saveChannel(first_in_alternative)
if self.startServiceRef and cur_service.ref == self.startServiceRef:
self.startServiceRef = first_in_alternative
else:
print "couldn't add first alternative service to current root"
else:
print "couldn't edit current root!!"
else:
print 'remove empty alternative list !!'
self.removeBouquet()
if not end:
self.servicelist.moveUp()
def removeBouquet(self):
refstr = self.getCurrentSelection().toString()
print 'removeBouquet', refstr
pos = refstr.find('FROM BOUQUET "')
filename = None
self.removeCurrentService(bouquet=True)
def removeSatelliteService(self):
current = self.getCurrentSelection()
eDVBDB.getInstance().removeService(current)
refreshServiceList()
if not self.atEnd():
self.servicelist.moveUp()
def removeSatelliteServices(self):
current = self.getCurrentSelection()
unsigned_orbpos = current.getUnsignedData(4) >> 16
if unsigned_orbpos == 65535:
messageText = _('Are you sure to remove all cable services?')
elif unsigned_orbpos == 61166:
messageText = _('Are you sure to remove all terrestrial services?')
else:
if unsigned_orbpos > 1800:
unsigned_orbpos = 3600 - unsigned_orbpos
direction = _('W')
else:
direction = _('E')
messageText = _('Are you sure to remove all %d.%d%s%s services?') % (unsigned_orbpos / 10,
unsigned_orbpos % 10,
'\xc2\xb0',
direction)
self.session.openWithCallback(self.removeSatelliteServicesCallback, MessageBox, messageText)
def removeSatelliteServicesCallback(self, answer):
if answer:
currentIndex = self.servicelist.getCurrentIndex()
current = self.getCurrentSelection()
unsigned_orbpos = current.getUnsignedData(4) >> 16
if unsigned_orbpos == 65535:
eDVBDB.getInstance().removeServices(int('0xFFFF0000', 16) - 4294967296L)
elif unsigned_orbpos == 61166:
eDVBDB.getInstance().removeServices(int('0xEEEE0000', 16) - 4294967296L)
else:
curpath = current.getPath()
idx = curpath.find('satellitePosition == ')
if idx != -1:
tmp = curpath[idx + 21:]
idx = tmp.find(')')
if idx != -1:
satpos = int(tmp[:idx])
eDVBDB.getInstance().removeServices(-1, -1, -1, satpos)
refreshServiceList()
if hasattr(self, 'showSatellites'):
self.showSatellites()
self.servicelist.moveToIndex(currentIndex)
if currentIndex != self.servicelist.getCurrentIndex():
self.servicelist.instance.moveSelection(self.servicelist.instance.moveEnd)
def startMarkedEdit(self, type):
self.savedPath = self.servicePath[:]
if type == EDIT_ALTERNATIVES:
self.current_ref = self.getCurrentSelection()
self.enterPath(self.current_ref)
self.mutableList = self.getMutableList()
self.clearMarks()
self.saved_title = self.getTitle()
pos = self.saved_title.find(')')
new_title = self.saved_title[:pos + 1]
if type == EDIT_ALTERNATIVES:
self.bouquet_mark_edit = EDIT_ALTERNATIVES
new_title += ' ' + _('[alternative edit]')
else:
self.bouquet_mark_edit = EDIT_BOUQUET
if config.usage.multibouquet.value:
new_title += ' ' + _('[bouquet edit]')
else:
new_title += ' ' + _('[favourite edit]')
self.setTitle(new_title)
self.__marked = self.servicelist.getRootServices()
for x in self.__marked:
self.servicelist.addMarked(eServiceReference(x))
self['Service'].editmode = True
def endMarkedEdit(self, abort):
if not abort and self.mutableList is not None:
new_marked = set(self.servicelist.getMarked())
old_marked = set(self.__marked)
removed = old_marked - new_marked
added = new_marked - old_marked
changed = False
for x in removed:
changed = True
self.mutableList.removeService(eServiceReference(x))
for x in added:
changed = True
self.mutableList.addService(eServiceReference(x))
if changed:
if self.bouquet_mark_edit == EDIT_ALTERNATIVES and not new_marked and self.__marked:
self.mutableList.addService(eServiceReference(self.__marked[0]))
self.mutableList.flushChanges()
self.__marked = []
self.clearMarks()
self.bouquet_mark_edit = OFF
self.mutableList = None
self.setTitle(self.saved_title)
self.saved_title = None
del self.servicePath[:]
self.servicePath += self.savedPath
del self.savedPath
self.setRoot(self.servicePath[-1])
if self.current_ref:
self.setCurrentSelection(self.current_ref)
self.current_ref = None
def clearMarks(self):
self.servicelist.clearMarks()
def doMark(self):
ref = self.servicelist.getCurrent()
if self.servicelist.isMarked(ref):
self.servicelist.removeMarked(ref)
else:
self.servicelist.addMarked(ref)
def removeCurrentEntry(self, bouquet = False):
if self.confirmRemove:
list = [(_('yes'), True), (_('no'), False), (_('yes') + ' ' + _('and never ask again this session again'), 'never')]
self.session.openWithCallback(boundFunction(self.removeCurrentEntryCallback, bouquet), MessageBox, _('Are you sure to remove this entry?'), list=list)
else:
self.removeCurrentEntryCallback(bouquet, True)
def removeCurrentEntryCallback(self, bouquet, answer):
if answer:
if answer == 'never':
self.confirmRemove = False
if bouquet:
self.removeBouquet()
else:
self.removeCurrentService()
def removeCurrentService(self, bouquet = False):
self.editMode = True
ref = self.servicelist.getCurrent()
mutableList = self.getMutableList()
if ref.valid() and mutableList is not None:
if not mutableList.removeService(ref):
mutableList.flushChanges()
self.servicelist.removeCurrent()
self.servicelist.resetRoot()
playingref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if not bouquet and playingref and ref == playingref:
self.channelSelected(doClose=False)
def addServiceToBouquet(self, dest, service = None):
mutableList = self.getMutableList(dest)
if mutableList is not None:
if service is None:
service = self.servicelist.getCurrent()
if not mutableList.addService(service):
mutableList.flushChanges()
cur_root = self.getRoot()
str1 = cur_root and cur_root.toString() or -1
str2 = dest.toString()
pos1 = str1.find('FROM BOUQUET')
pos2 = str2.find('FROM BOUQUET')
if pos1 != -1 and pos2 != -1 and str1[pos1:] == str2[pos2:]:
self.servicelist.addService(service)
self.servicelist.resetRoot()
def toggleMoveMode(self, select = False):
self.editMode = True
if self.movemode:
if self.entry_marked:
self.toggleMoveMarked()
self.movemode = False
self.mutableList.flushChanges()
self.mutableList = None
self.setTitle(self.saved_title)
self.saved_title = None
self.servicelist.resetRoot()
self.servicelist.l.setHideNumberMarker(config.usage.hide_number_markers.value)
self.setCurrentSelection(self.servicelist.getCurrent())
else:
self.mutableList = self.getMutableList()
self.movemode = True
select and self.toggleMoveMarked()
self.saved_title = self.getTitle()
pos = self.saved_title.find(')')
self.setTitle(self.saved_title[:pos + 1] + ' ' + _('[move mode]') + self.saved_title[pos + 1:])
self.servicelist.l.setHideNumberMarker(False)
self.setCurrentSelection(self.servicelist.getCurrent())
self['Service'].editmode = True
def handleEditCancel(self):
if self.movemode:
self.toggleMoveMode()
elif self.bouquet_mark_edit != OFF:
self.endMarkedEdit(True)
def toggleMoveMarked(self):
if self.entry_marked:
self.servicelist.setCurrentMarked(False)
self.entry_marked = False
self.pathChangeDisabled = False
else:
self.servicelist.setCurrentMarked(True)
self.entry_marked = True
self.pathChangeDisabled = True
def doContext(self):
self.session.openWithCallback(self.exitContext, ChannelContextMenu, self)
def exitContext(self, close = False):
if close:
self.cancel()
MODE_TV = 0
MODE_RADIO = 1
service_types_tv = '1:7:1:0:0:0:0:0:0:0:(type == 1) || (type == 17) || (type == 22) || (type == 25) || (type == 31) || (type == 134) || (type == 195)'
service_types_radio = '1:7:2:0:0:0:0:0:0:0:(type == 2) || (type == 10)'
class ChannelSelectionBase(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self.setScreenPathMode(None)
self['key_red'] = Button(_('All'))
self['key_green'] = Button(_('Satellites'))
self['key_yellow'] = Button(_('Provider'))
self['key_blue'] = Button(_('Favourites'))
self['list'] = ServiceList(self)
self.servicelist = self['list']
self.numericalTextInput = NumericalTextInput(handleTimeout=False)
self.numericalTextInput.setUseableChars(u'1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ')
self.servicePathTV = []
self.servicePathRadio = []
self.servicePath = []
self.history = []
self.rootChanged = False
self.startRoot = None
self.selectionNumber = ''
self.clearNumberSelectionNumberTimer = eTimer()
self.clearNumberSelectionNumberTimer.callback.append(self.clearNumberSelectionNumber)
self.protectContextMenu = True
self.mode = MODE_TV
self.dopipzap = False
self.pathChangeDisabled = False
self.movemode = False
self.showSatDetails = False
self['ChannelSelectBaseActions'] = NumberActionMap(['ChannelSelectBaseActions', 'NumberActions', 'InputAsciiActions'], {'showFavourites': self.showFavourites,
'showAllServices': self.showAllServices,
'showProviders': self.showProviders,
'showSatellites': boundFunction(self.showSatellites, changeMode=True),
'nextBouquet': self.nextBouquet,
'prevBouquet': self.prevBouquet,
'nextMarker': self.nextMarker,
'prevMarker': self.prevMarker,
'gotAsciiCode': self.keyAsciiCode,
'keyLeft': self.keyLeft,
'keyRight': self.keyRight,
'keyRecord': self.keyRecord,
'1': self.keyNumberGlobal,
'2': self.keyNumberGlobal,
'3': self.keyNumberGlobal,
'4': self.keyNumberGlobal,
'5': self.keyNumberGlobal,
'6': self.keyNumberGlobal,
'7': self.keyNumberGlobal,
'8': self.keyNumberGlobal,
'9': self.keyNumberGlobal,
'0': self.keyNumber0}, -2)
self.maintitle = _('Channel selection')
self.recallBouquetMode()
def getBouquetNumOffset(self, bouquet):
if not config.usage.multibouquet.value:
return 0
str = bouquet.toString()
offset = 0
if 'userbouquet.' in bouquet.toCompareString():
serviceHandler = eServiceCenter.getInstance()
servicelist = serviceHandler.list(bouquet)
if servicelist is not None:
while True:
serviceIterator = servicelist.getNext()
if not serviceIterator.valid():
break
number = serviceIterator.getChannelNum()
if number > 0:
offset = number - 1
break
return offset
def recallBouquetMode(self):
if self.mode == MODE_TV:
self.service_types = service_types_tv
if config.usage.multibouquet.value:
self.bouquet_rootstr = '1:7:1:0:0:0:0:0:0:0:FROM BOUQUET "bouquets.tv" ORDER BY bouquet'
else:
self.bouquet_rootstr = '%s FROM BOUQUET "userbouquet.favourites.tv" ORDER BY bouquet' % self.service_types
else:
self.service_types = service_types_radio
if config.usage.multibouquet.value:
self.bouquet_rootstr = '1:7:1:0:0:0:0:0:0:0:FROM BOUQUET "bouquets.radio" ORDER BY bouquet'
else:
self.bouquet_rootstr = '%s FROM BOUQUET "userbouquet.favourites.radio" ORDER BY bouquet' % self.service_types
self.bouquet_root = eServiceReference(self.bouquet_rootstr)
def setTvMode(self):
self.mode = MODE_TV
self.servicePath = self.servicePathTV
self.recallBouquetMode()
title = self.maintitle
pos = title.find(' (')
if pos != -1:
title = title[:pos]
title += _(' (TV)')
self.setTitle(title)
def setRadioMode(self):
self.mode = MODE_RADIO
self.servicePath = self.servicePathRadio
self.recallBouquetMode()
title = self.maintitle
pos = title.find(' (')
if pos != -1:
title = title[:pos]
title += _(' (Radio)')
self.setTitle(title)
def setRoot(self, root, justSet = False):
if self.startRoot is None:
self.startRoot = self.getRoot()
path = root.getPath()
isBouquet = 'FROM BOUQUET' in path and root.flags & eServiceReference.isDirectory
inBouquetRootList = 'FROM BOUQUET "bouquets.' in path
if not inBouquetRootList and isBouquet:
self.servicelist.setMode(ServiceList.MODE_FAVOURITES)
else:
self.servicelist.setMode(ServiceList.MODE_NORMAL)
self.servicelist.setRoot(root, justSet)
self.rootChanged = True
self.buildTitleString()
def removeModeStr(self, str):
if self.mode == MODE_TV:
pos = str.find(_(' (TV)'))
else:
pos = str.find(_(' (Radio)'))
if pos != -1:
return str[:pos]
return str
def getServiceName(self, ref):
str = self.removeModeStr(ServiceReference(ref).getServiceName())
if 'User - bouquets' in str:
return _('User - bouquets')
if not str:
pathstr = ref.getPath()
if 'FROM PROVIDERS' in pathstr:
return _('Provider')
if 'FROM SATELLITES' in pathstr:
return _('Satellites')
if ') ORDER BY name' in pathstr:
return _('All')
return str
def buildTitleString(self):
titleStr = self.getTitle()
pos = titleStr.find(']')
if pos == -1:
pos = titleStr.find(')')
if pos != -1:
titleStr = titleStr[:pos + 1]
Len = len(self.servicePath)
if Len > 0:
base_ref = self.servicePath[0]
if Len > 1:
end_ref = self.servicePath[Len - 1]
else:
end_ref = None
nameStr = self.getServiceName(base_ref)
titleStr += ' - ' + nameStr
if end_ref is not None:
if Len > 2:
titleStr += '/../'
else:
titleStr += '/'
nameStr = self.getServiceName(end_ref)
titleStr += nameStr
self.setTitle(titleStr)
def moveUp(self):
self.servicelist.moveUp()
def moveDown(self):
self.servicelist.moveDown()
def clearPath(self):
del self.servicePath[:]
def enterPath(self, ref, justSet = False):
self.servicePath.append(ref)
self.setRoot(ref, justSet)
def enterUserbouquet(self, root, save_root = True):
self.clearPath()
self.recallBouquetMode()
if self.bouquet_root:
self.enterPath(self.bouquet_root)
self.enterPath(root)
self.startRoot = None
if save_root:
self.saveRoot()
def pathUp(self, justSet = False):
prev = self.servicePath.pop()
if self.servicePath:
current = self.servicePath[-1]
self.setRoot(current, justSet)
if not justSet:
self.setCurrentSelection(prev)
return prev
def isBasePathEqual(self, ref):
if len(self.servicePath) > 1 and self.servicePath[0] == ref:
return True
return False
def isPrevPathEqual(self, ref):
length = len(self.servicePath)
if length > 1 and self.servicePath[length - 2] == ref:
return True
return False
def preEnterPath(self, refstr):
return False
def showAllServices(self):
if not self.pathChangeDisabled:
refstr = '%s ORDER BY name' % self.service_types
if not self.preEnterPath(refstr):
ref = eServiceReference(refstr)
currentRoot = self.getRoot()
if currentRoot is None or currentRoot != ref:
self.clearPath()
self.enterPath(ref)
playingref = self.session.nav.getCurrentlyPlayingServiceReference()
if playingref:
self.setCurrentSelectionAlternative(playingref)
def showSatellites(self, changeMode = False):
if not self.pathChangeDisabled:
refstr = '%s FROM SATELLITES ORDER BY satellitePosition' % self.service_types
if not self.preEnterPath(refstr):
ref = eServiceReference(refstr)
justSet = False
prev = None
if self.isBasePathEqual(ref):
if self.isPrevPathEqual(ref):
justSet = True
prev = self.pathUp(justSet)
else:
currentRoot = self.getRoot()
if currentRoot is None or currentRoot != ref:
justSet = True
self.clearPath()
self.enterPath(ref, True)
if changeMode and currentRoot and currentRoot == ref:
self.showSatDetails = not self.showSatDetails
justSet = True
self.clearPath()
self.enterPath(ref, True)
if justSet:
addCableAndTerrestrialLater = []
serviceHandler = eServiceCenter.getInstance()
servicelist = serviceHandler.list(ref)
if servicelist is not None:
while True:
service = servicelist.getNext()
if not service.valid():
break
unsigned_orbpos = service.getUnsignedData(4) >> 16
orbpos = service.getData(4) >> 16
if orbpos < 0:
orbpos += 3600
if 'FROM PROVIDER' in service.getPath():
service_type = self.showSatDetails and _('Providers')
elif 'flags == %d' % FLAG_SERVICE_NEW_FOUND in service.getPath():
service_type = self.showSatDetails and _('New')
else:
service_type = _('Services')
if service_type:
if unsigned_orbpos == 65535:
service_name = _('Cable')
addCableAndTerrestrialLater.append(('%s - %s' % (service_name, service_type), service.toString()))
elif unsigned_orbpos == 61166:
service_name = _('Terrestrial')
addCableAndTerrestrialLater.append(('%s - %s' % (service_name, service_type), service.toString()))
else:
try:
service_name = str(nimmanager.getSatDescription(orbpos))
except:
if orbpos > 1800:
orbpos = 3600 - orbpos
h = _('W')
else:
h = _('E')
service_name = ('%d.%d' + h) % (orbpos / 10, orbpos % 10)
service.setName('%s - %s' % (service_name, service_type))
self.servicelist.addService(service)
cur_ref = self.session.nav.getCurrentlyPlayingServiceReference()
self.servicelist.l.sort()
if cur_ref:
pos = self.service_types.rfind(':')
refstr = '%s (channelID == %08x%04x%04x) && %s ORDER BY name' % (self.service_types[:pos + 1],
cur_ref.getUnsignedData(4),
cur_ref.getUnsignedData(2),
cur_ref.getUnsignedData(3),
self.service_types[pos + 1:])
ref = eServiceReference(refstr)
ref.setName(_('Current transponder'))
self.servicelist.addService(ref, beforeCurrent=True)
for service_name, service_ref in addCableAndTerrestrialLater:
ref = eServiceReference(service_ref)
ref.setName(service_name)
self.servicelist.addService(ref, beforeCurrent=True)
self.servicelist.l.FillFinished()
if prev is not None:
self.setCurrentSelection(prev)
elif cur_ref:
refstr = cur_ref.toString()
op = ''.join(refstr.split(':', 10)[6:7])
if len(op) >= 4:
hop = int(op[:-4], 16)
if len(op) >= 7 and not op.endswith('0000'):
op = op[:-4] + '0000'
refstr = '1:7:0:0:0:0:%s:0:0:0:(satellitePosition == %s) && %s ORDER BY name' % (op, hop, self.service_types[self.service_types.rfind(':') + 1:])
self.setCurrentSelectionAlternative(eServiceReference(refstr))
def showProviders(self):
if not self.pathChangeDisabled:
refstr = '%s FROM PROVIDERS ORDER BY name' % self.service_types
if not self.preEnterPath(refstr):
ref = eServiceReference(refstr)
if self.isBasePathEqual(ref):
self.pathUp()
else:
currentRoot = self.getRoot()
if currentRoot is None or currentRoot != ref:
self.clearPath()
self.enterPath(ref)
service = self.session.nav.getCurrentService()
if service:
info = service.info()
if info:
provider = info.getInfoString(iServiceInformation.sProvider)
refstr = '1:7:0:0:0:0:0:0:0:0:(provider == "%s") && %s ORDER BY name:%s' % (provider, self.service_types[self.service_types.rfind(':') + 1:], provider)
self.setCurrentSelectionAlternative(eServiceReference(refstr))
def changeBouquet(self, direction):
if not self.pathChangeDisabled:
if len(self.servicePath) > 1:
ref = eServiceReference('%s FROM SATELLITES ORDER BY satellitePosition' % self.service_types)
if self.isBasePathEqual(ref):
self.showSatellites()
else:
self.pathUp()
if direction < 0:
self.moveUp()
else:
self.moveDown()
ref = self.getCurrentSelection()
self.enterPath(ref)
def inBouquet(self):
if self.servicePath and self.servicePath[0] == self.bouquet_root:
return True
return False
def atBegin(self):
return self.servicelist.atBegin()
def atEnd(self):
return self.servicelist.atEnd()
def nextBouquet(self):
if self.shown and config.usage.oldstyle_channel_select_controls.value:
self.servicelist.instance.moveSelection(self.servicelist.instance.pageUp)
elif 'reverseB' in config.usage.servicelist_cursor_behavior.value:
self.changeBouquet(-1)
else:
self.changeBouquet(+1)
def prevBouquet(self):
if self.shown and config.usage.oldstyle_channel_select_controls.value:
self.servicelist.instance.moveSelection(self.servicelist.instance.pageDown)
elif 'reverseB' in config.usage.servicelist_cursor_behavior.value:
self.changeBouquet(+1)
else:
self.changeBouquet(-1)
def keyLeft(self):
if config.usage.oldstyle_channel_select_controls.value:
self.changeBouquet(-1)
else:
self.servicelist.instance.moveSelection(self.servicelist.instance.pageUp)
def keyRight(self):
if config.usage.oldstyle_channel_select_controls.value:
self.changeBouquet(+1)
else:
self.servicelist.instance.moveSelection(self.servicelist.instance.pageDown)
def keyRecord(self):
ref = self.getCurrentSelection()
if ref and not ref.flags & (eServiceReference.isMarker | eServiceReference.isDirectory):
Screens.InfoBar.InfoBar.instance.instantRecord(serviceRef=ref)
def showFavourites(self):
if not self.pathChangeDisabled:
if not self.preEnterPath(self.bouquet_rootstr):
if self.isBasePathEqual(self.bouquet_root):
self.pathUp()
else:
currentRoot = self.getRoot()
if currentRoot is None or currentRoot != self.bouquet_root:
self.clearPath()
self.enterPath(self.bouquet_root)
def keyNumber0(self, number):
if len(self.servicePath) > 1 and not self.selectionNumber:
self.keyGoUp()
else:
self.keyNumberGlobal(number)
def keyNumberGlobal(self, number):
unichar = self.numericalTextInput.getKey(number)
charstr = unichar.encode('utf-8')
if config.misc.deliteepgbuttons.value:
if unichar == '1':
self.session.openWithCallback(self.ShowsearchNab, VirtualKeyBoard, title='Enter event to search', text='')
elif unichar == '2':
self.Show2Nab()
elif unichar == '3':
self.Show3Nab()
elif unichar == '4':
self.session.open(Nab_EpgSearchLast)
elif len(charstr) == 1:
self.servicelist.moveToChar(charstr[0])
def ShowsearchNab(self, cmd):
if cmd is not None:
self.session.open(Nab_EpgSearch, cmd)
def Show2Nab(self):
ref = self.getCurrentSelection()
ptr = eEPGCache.getInstance()
if ptr.startTimeQuery(ref) != -1:
self.session.open(EPGSelection, ref)
else:
self.session.open(MessageBox, 'Sorry no epg currently available for this service.', MessageBox.TYPE_INFO)
def Show3Nab(self):
myplugin = ''
for p in plugins.getPlugins(where=PluginDescriptor.WHERE_EVENTINFO):
nam = p.name
pos = nam.find('EPG')
if pos != -1:
myplugin = p
if myplugin:
myplugin(session=self.session, servicelist=self)
def numberSelectionActions(self, number):
if not (hasattr(self, 'movemode') and self.movemode):
if len(self.selectionNumber) > 4:
self.clearNumberSelectionNumber()
self.selectionNumber = self.selectionNumber + str(number)
ref, bouquet = Screens.InfoBar.InfoBar.instance.searchNumber(int(self.selectionNumber), bouquet=self.getRoot())
if ref:
if not ref.flags & eServiceReference.isMarker:
self.enterUserbouquet(bouquet, save_root=False)
self.setCurrentSelection(ref)
self.clearNumberSelectionNumberTimer.start(1000, True)
else:
self.clearNumberSelectionNumber()
def clearNumberSelectionNumber(self):
self.clearNumberSelectionNumberTimer.stop()
self.selectionNumber = ''
def keyAsciiCode(self):
unichar = unichr(getPrevAsciiCode())
charstr = unichar.encode('utf-8')
if len(charstr) == 1:
self.servicelist.moveToChar(charstr[0])
def getRoot(self):
return self.servicelist.getRoot()
def getCurrentSelection(self):
return self.servicelist.getCurrent()
def setCurrentSelection(self, service):
if service:
self.servicelist.setCurrent(service, adjust=False)
def setCurrentSelectionAlternative(self, ref):
if self.bouquet_mark_edit == EDIT_ALTERNATIVES and not ref.flags & eServiceReference.isDirectory:
for markedService in self.servicelist.getMarked():
markedService = eServiceReference(markedService)
self.setCurrentSelection(markedService)
if markedService == self.getCurrentSelection():
return
self.setCurrentSelection(ref)
def getBouquetList(self):
bouquets = []
serviceHandler = eServiceCenter.getInstance()
if config.usage.multibouquet.value:
list = serviceHandler.list(self.bouquet_root)
if list:
while True:
s = list.getNext()
if not s.valid():
break
if s.flags & eServiceReference.isDirectory and not s.flags & eServiceReference.isInvisible:
info = serviceHandler.info(s)
if info:
bouquets.append((info.getName(s), s))
return bouquets
else:
info = serviceHandler.info(self.bouquet_root)
if info:
bouquets.append((info.getName(self.bouquet_root), self.bouquet_root))
return bouquets
def keyGoUp(self):
if len(self.servicePath) > 1:
if self.isBasePathEqual(self.bouquet_root):
self.showFavourites()
else:
ref = eServiceReference('%s FROM SATELLITES ORDER BY satellitePosition' % self.service_types)
if self.isBasePathEqual(ref):
self.showSatellites()
else:
ref = eServiceReference('%s FROM PROVIDERS ORDER BY name' % self.service_types)
if self.isBasePathEqual(ref):
self.showProviders()
else:
self.showAllServices()
def nextMarker(self):
self.servicelist.moveToNextMarker()
def prevMarker(self):
self.servicelist.moveToPrevMarker()
def gotoCurrentServiceOrProvider(self, ref):
str = ref.toString()
if _('Providers') in str:
service = self.session.nav.getCurrentService()
if service:
info = service.info()
if info:
provider = info.getInfoString(iServiceInformation.sProvider)
op = int(self.session.nav.getCurrentlyPlayingServiceOrGroup().toString().split(':')[6][:-4] or '0', 16)
refstr = '1:7:0:0:0:0:0:0:0:0:(provider == "%s") && (satellitePosition == %s) && %s ORDER BY name:%s' % (provider,
op,
self.service_types[self.service_types.rfind(':') + 1:],
provider)
self.setCurrentSelection(eServiceReference(refstr))
elif not self.isBasePathEqual(self.bouquet_root) or self.bouquet_mark_edit == EDIT_ALTERNATIVES:
playingref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if playingref:
self.setCurrentSelectionAlternative(playingref)
HISTORYSIZE = 20
config.tv = ConfigSubsection()
config.tv.lastservice = ConfigText()
config.tv.lastroot = ConfigText()
config.radio = ConfigSubsection()
config.radio.lastservice = ConfigText()
config.radio.lastroot = ConfigText()
config.servicelist = ConfigSubsection()
config.servicelist.lastmode = ConfigText(default='tv')
config.servicelist.startupservice = ConfigText()
config.servicelist.startupservice_onstandby = ConfigYesNo(default=False)
config.servicelist.startuproot = ConfigText()
config.servicelist.startupmode = ConfigText(default='tv')
class ChannelSelection(ChannelSelectionBase, ChannelSelectionEdit, ChannelSelectionEPG, SelectionEventInfo):
def __init__(self, session):
ChannelSelectionBase.__init__(self, session)
ChannelSelectionEdit.__init__(self)
ChannelSelectionEPG.__init__(self)
SelectionEventInfo.__init__(self)
self['actions'] = ActionMap(['OkCancelActions', 'TvRadioActions'], {'cancel': self.cancel,
'ok': self.channelSelected,
'keyRadio': self.doRadioButton,
'keyTV': self.doTVButton})
self.__event_tracker = ServiceEventTracker(screen=self, eventmap={iPlayableService.evStart: self.__evServiceStart,
iPlayableService.evEnd: self.__evServiceEnd})
self.startServiceRef = None
self.history = []
self.history_pos = 0
self.delhistpoint = None
if config.servicelist.startupservice.value and config.servicelist.startuproot.value:
config.servicelist.lastmode.value = config.servicelist.startupmode.value
if config.servicelist.lastmode.value == 'tv':
config.tv.lastservice.value = config.servicelist.startupservice.value
config.tv.lastroot.value = config.servicelist.startuproot.value
elif config.servicelist.lastmode.value == 'radio':
config.radio.lastservice.value = config.servicelist.startupservice.value
config.radio.lastroot.value = config.servicelist.startuproot.value
self.lastservice = config.tv.lastservice
self.lastroot = config.tv.lastroot
self.revertMode = None
config.usage.multibouquet.addNotifier(self.multibouquet_config_changed)
self.new_service_played = False
self.dopipzap = False
self.onExecBegin.append(self.asciiOn)
self.mainScreenMode = None
self.mainScreenRoot = None
self.lastChannelRootTimer = eTimer()
self.lastChannelRootTimer.callback.append(self.__onCreate)
self.lastChannelRootTimer.start(100, True)
self.pipzaptimer = eTimer()
def asciiOn(self):
rcinput = eRCInput.getInstance()
rcinput.setKeyboardMode(rcinput.kmAscii)
def asciiOff(self):
rcinput = eRCInput.getInstance()
rcinput.setKeyboardMode(rcinput.kmNone)
def multibouquet_config_changed(self, val):
self.recallBouquetMode()
def __evServiceStart(self):
if self.dopipzap and hasattr(self.session, 'pip'):
self.servicelist.setPlayableIgnoreService(self.session.pip.getCurrentServiceReference() or eServiceReference())
else:
service = self.session.nav.getCurrentService()
if service:
info = service.info()
if info:
refstr = info.getInfoString(iServiceInformation.sServiceref)
self.servicelist.setPlayableIgnoreService(eServiceReference(refstr))
def __evServiceEnd(self):
self.servicelist.setPlayableIgnoreService(eServiceReference())
def setMode(self):
self.rootChanged = True
self.restoreRoot()
lastservice = eServiceReference(self.lastservice.value)
if lastservice.valid():
self.setCurrentSelection(lastservice)
def doTVButton(self):
if self.mode == MODE_TV:
self.channelSelected(doClose=False)
else:
self.setModeTv()
def setModeTv(self):
if self.revertMode is None:
self.revertMode = self.mode
self.lastservice = config.tv.lastservice
self.lastroot = config.tv.lastroot
config.servicelist.lastmode.value = 'tv'
self.setTvMode()
self.setMode()
def doRadioButton(self):
if self.mode == MODE_RADIO:
self.channelSelected(doClose=False)
else:
self.setModeRadio()
def setModeRadio(self):
if self.revertMode is None:
self.revertMode = self.mode
if config.usage.e1like_radio_mode.value:
self.lastservice = config.radio.lastservice
self.lastroot = config.radio.lastroot
config.servicelist.lastmode.value = 'radio'
self.setRadioMode()
self.setMode()
def __onCreate(self):
if config.usage.e1like_radio_mode.value:
if config.servicelist.lastmode.value == 'tv':
self.setModeTv()
else:
self.setModeRadio()
else:
self.setModeTv()
lastservice = eServiceReference(self.lastservice.value)
if lastservice.valid():
self.zap()
def channelSelected(self, doClose = True):
playingref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if config.usage.channelselection_preview.value and (playingref is None or self.getCurrentSelection() and self.getCurrentSelection() != playingref):
doClose = False
if not self.startServiceRef and not doClose:
self.startServiceRef = playingref
ref = self.getCurrentSelection()
if self.movemode and (self.isBasePathEqual(self.bouquet_root) or 'userbouquet.' in ref.toString()):
self.toggleMoveMarked()
elif ref.flags & eServiceReference.flagDirectory == eServiceReference.flagDirectory:
if Components.ParentalControl.parentalControl.isServicePlayable(ref, self.bouquetParentalControlCallback, self.session):
self.enterPath(ref)
self.gotoCurrentServiceOrProvider(ref)
self.revertMode = None
elif self.bouquet_mark_edit != OFF:
if not (self.bouquet_mark_edit == EDIT_ALTERNATIVES and ref.flags & eServiceReference.isGroup):
self.doMark()
elif not (ref.flags & eServiceReference.isMarker or ref.type == -1):
root = self.getRoot()
if not root or not root.flags & eServiceReference.isGroup:
self.zap(enable_pipzap=doClose, preview_zap=not doClose)
self.asciiOff()
if doClose:
if self.dopipzap:
self.zapBack()
self.startServiceRef = None
self.startRoot = None
self.correctChannelNumber()
self.movemode and self.toggleMoveMode()
self.editMode = False
self.protectContextMenu = True
self.close(ref)
def bouquetParentalControlCallback(self, ref):
self.enterPath(ref)
self.gotoCurrentServiceOrProvider(ref)
self.revertMode = None
def togglePipzap(self):
title = self.instance.getTitle()
pos = title.find(' (')
if pos != -1:
title = title[:pos]
if self.dopipzap:
self.hidePipzapMessage()
self.dopipzap = False
if self.session.pip.pipservice is None:
self.session.pipshown = False
del self.session.pip
self.__evServiceStart()
lastservice = eServiceReference(self.lastservice.value)
if lastservice.valid() and self.getCurrentSelection() != lastservice:
self.setCurrentSelection(lastservice)
if self.getCurrentSelection() != lastservice:
self.servicelist.setCurrent(lastservice)
title += _(' (TV)')
else:
self.showPipzapMessage()
self.dopipzap = True
self.__evServiceStart()
self.setCurrentSelection(self.session.pip.getCurrentService())
title += _(' (PiP)')
self.setTitle(title)
self.buildTitleString()
def showPipzapMessage(self):
time = config.usage.infobar_timeout.index
if time:
self.pipzaptimer.callback.append(self.hidePipzapMessage)
self.pipzaptimer.startLongTimer(time)
self.session.pip.active()
def hidePipzapMessage(self):
if self.pipzaptimer.isActive():
self.pipzaptimer.callback.remove(self.hidePipzapMessage)
self.pipzaptimer.stop()
self.session.pip.inactive()
def zap(self, enable_pipzap = False, preview_zap = False, checkParentalControl = True, ref = None):
self.curRoot = self.startRoot
nref = ref or self.getCurrentSelection()
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if enable_pipzap and self.dopipzap:
ref = self.session.pip.getCurrentService()
if ref is None or ref != nref:
nref = self.session.pip.resolveAlternatePipService(nref)
if nref and (not checkParentalControl or Components.ParentalControl.parentalControl.isServicePlayable(nref, boundFunction(self.zap, enable_pipzap=True, checkParentalControl=False))):
self.session.pip.playService(nref)
self.__evServiceStart()
self.showPipzapMessage()
else:
self.setStartRoot(self.curRoot)
self.setCurrentSelection(ref)
elif ref is None or ref != nref:
Screens.InfoBar.InfoBar.instance.checkTimeshiftRunning(boundFunction(self.zapCheckTimeshiftCallback, enable_pipzap, preview_zap, nref))
elif not preview_zap:
self.saveRoot()
self.saveChannel(nref)
config.servicelist.lastmode.save()
self.setCurrentSelection(nref)
if self.startServiceRef is None or nref != self.startServiceRef:
self.addToHistory(nref)
self.rootChanged = False
self.revertMode = None
def zapCheckTimeshiftCallback(self, enable_pipzap, preview_zap, nref, answer):
if answer:
self.new_service_played = True
self.session.nav.playService(nref)
if not preview_zap:
self.saveRoot()
self.saveChannel(nref)
config.servicelist.lastmode.save()
if self.startServiceRef is None or nref != self.startServiceRef:
self.addToHistory(nref)
if self.dopipzap:
self.setCurrentSelection(self.session.pip.getCurrentService())
else:
self.mainScreenMode = config.servicelist.lastmode.value
self.mainScreenRoot = self.getRoot()
self.revertMode = None
else:
Notifications.RemovePopup('Parental control')
self.setCurrentSelection(nref)
else:
self.setStartRoot(self.curRoot)
self.setCurrentSelection(self.session.nav.getCurrentlyPlayingServiceOrGroup())
if not preview_zap:
self.hide()
def newServicePlayed(self):
ret = self.new_service_played
self.new_service_played = False
return ret
def addToHistory(self, ref):
if self.delhistpoint is not None:
x = self.delhistpoint
while x <= len(self.history)-1:
del self.history[x]
self.delhistpoint = None
if self.servicePath is not None:
tmp = self.servicePath[:]
tmp.append(ref)
try:
del self.history[self.history_pos + 1:]
except:
pass
self.history.append(tmp)
hlen = len(self.history)
if hlen > HISTORYSIZE:
del self.history[0]
hlen -= 1
self.history_pos = hlen - 1
def historyBack(self):
hlen = len(self.history)
currentPlayedRef = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if hlen > 0 and currentPlayedRef and self.history[self.history_pos][-1] != currentPlayedRef:
self.addToHistory(currentPlayedRef)
hlen = len(self.history)
if hlen > 1 and self.history_pos > 0:
self.history_pos -= 1
self.setHistoryPath()
self.delhistpoint = self.history_pos+1
def historyNext(self):
hlen = len(self.history)
if hlen > 1 and self.history_pos < hlen - 1:
self.history_pos += 1
self.setHistoryPath()
def setHistoryPath(self, doZap = True):
path = self.history[self.history_pos][:]
ref = path.pop()
del self.servicePath[:]
self.servicePath += path
self.saveRoot()
root = path[-1]
cur_root = self.getRoot()
if cur_root and cur_root != root:
self.setRoot(root)
if doZap:
self.session.nav.playService(ref, adjust=False)
if self.dopipzap:
self.setCurrentSelection(self.session.pip.getCurrentService())
else:
self.setCurrentSelection(ref)
self.saveChannel(ref)
def historyClear(self):
if self and self.servicelist:
for i in range(0, len(self.history)-1):
del self.history[0]
self.history_pos = len(self.history)-1
return True
return False
def historyZap(self, direction):
hlen = len(self.history)
if hlen < 1: return
mark = self.history_pos
selpos = self.history_pos + direction
if selpos < 0: selpos = 0
if selpos > hlen-1: selpos = hlen-1
serviceHandler = eServiceCenter.getInstance()
historylist = [ ]
for x in self.history:
info = serviceHandler.info(x[-1])
if info: historylist.append((info.getName(x[-1]), x[-1]))
self.session.openWithCallback(self.historyMenuClosed, HistoryZapSelector, historylist, selpos, mark, invert_items=True, redirect_buttons=True, wrap_around=True)
def historyMenuClosed(self, retval):
if not retval: return
hlen = len(self.history)
pos = 0
for x in self.history:
if x[-1] == retval: break
pos += 1
self.delhistpoint = pos+1
if pos < hlen and pos != self.history_pos:
tmp = self.history[pos]
# self.history.append(tmp)
# del self.history[pos]
self.history_pos = pos
self.setHistoryPath()
def saveRoot(self):
path = ''
for i in self.servicePath:
path += i.toString()
path += ';'
if path and path != self.lastroot.value:
if self.mode == MODE_RADIO and 'FROM BOUQUET "bouquets.tv"' in path:
self.setModeTv()
elif self.mode == MODE_TV and 'FROM BOUQUET "bouquets.radio"' in path:
self.setModeRadio()
self.lastroot.value = path
self.lastroot.save()
def restoreRoot(self):
tmp = [ x for x in self.lastroot.value.split(';') if x != '' ]
current = [ x.toString() for x in self.servicePath ]
if tmp != current or self.rootChanged:
self.clearPath()
cnt = 0
for i in tmp:
self.servicePath.append(eServiceReference(i))
cnt += 1
if cnt:
path = self.servicePath.pop()
self.enterPath(path)
else:
self.showFavourites()
self.saveRoot()
self.rootChanged = False
def preEnterPath(self, refstr):
if self.servicePath and self.servicePath[0] != eServiceReference(refstr):
pathstr = self.lastroot.value
if pathstr is not None and refstr in pathstr:
self.restoreRoot()
lastservice = eServiceReference(self.lastservice.value)
if lastservice.valid():
self.setCurrentSelection(lastservice)
return True
return False
def saveChannel(self, ref):
if ref is not None:
refstr = ref.toString()
else:
refstr = ''
if refstr != self.lastservice.value and not Components.ParentalControl.parentalControl.isProtected(ref):
self.lastservice.value = refstr
self.lastservice.save()
def setCurrentServicePath(self, path, doZap = True):
hlen = len(self.history)
if not hlen:
self.history.append(path)
self.history_pos = 0
if hlen == 1:
self.history[self.history_pos] = path
else:
if path in self.history:
self.history.remove(path)
self.history_pos -= 1
tmp = self.history[self.history_pos][:]
self.history.append(tmp)
self.history_pos += 1
self.history[self.history_pos] = path
self.setHistoryPath(doZap)
def getCurrentServicePath(self):
if self.history:
return self.history[self.history_pos]
def recallPrevService(self):
hlen = len(self.history)
currentPlayedRef = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if hlen > 0 and currentPlayedRef and self.history[self.history_pos][-1] != currentPlayedRef:
self.addToHistory(currentPlayedRef)
hlen = len(self.history)
if hlen > 1:
if self.history_pos == hlen - 1:
tmp = self.history[self.history_pos]
self.history[self.history_pos] = self.history[self.history_pos - 1]
self.history[self.history_pos - 1] = tmp
else:
tmp = self.history[self.history_pos + 1]
self.history[self.history_pos + 1] = self.history[self.history_pos]
self.history[self.history_pos] = tmp
self.setHistoryPath()
def cancel(self):
if self.revertMode is None:
self.restoreRoot()
if self.dopipzap:
self.setCurrentSelection(self.session.pip.getCurrentService())
else:
lastservice = eServiceReference(self.lastservice.value)
if lastservice.valid() and self.getCurrentSelection() != lastservice:
self.setCurrentSelection(lastservice)
self.asciiOff()
self.zapBack()
self.correctChannelNumber()
self.editMode = False
self.protectContextMenu = True
self.close(None)
def zapBack(self):
playingref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if self.startServiceRef and (playingref is None or playingref != self.startServiceRef):
self.setStartRoot(self.startRoot)
self.new_service_played = True
self.session.nav.playService(self.startServiceRef)
self.saveChannel(self.startServiceRef)
else:
self.restoreMode()
self.startServiceRef = None
self.startRoot = None
if self.dopipzap:
self.setCurrentSelection(self.session.pip.getCurrentService())
else:
lastservice = eServiceReference(self.lastservice.value)
if lastservice.valid() and self.getCurrentSelection() == lastservice:
pass
else:
self.setCurrentSelection(playingref)
def setStartRoot(self, root):
if root:
if self.revertMode == MODE_TV:
self.setModeTv()
elif self.revertMode == MODE_RADIO:
self.setModeRadio()
self.revertMode = None
self.enterUserbouquet(root)
def restoreMode(self):
if self.revertMode == MODE_TV:
self.setModeTv()
elif self.revertMode == MODE_RADIO:
self.setModeRadio()
self.revertMode = None
def correctChannelNumber(self):
current_ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if self.dopipzap:
tmp_mode = config.servicelist.lastmode.value
tmp_root = self.getRoot()
tmp_ref = self.getCurrentSelection()
pip_ref = self.session.pip.getCurrentService()
if tmp_ref and pip_ref and tmp_ref != pip_ref:
self.revertMode = None
return
if self.mainScreenMode == 'tv':
self.setModeTv()
elif self.mainScreenMode == 'radio':
self.setModeRadio()
if self.mainScreenRoot:
self.setRoot(self.mainScreenRoot)
self.setCurrentSelection(current_ref)
selected_ref = self.getCurrentSelection()
if selected_ref and current_ref and selected_ref.getChannelNum() != current_ref.getChannelNum():
oldref = self.session.nav.currentlyPlayingServiceReference
if oldref and selected_ref == oldref or oldref != current_ref and selected_ref == current_ref:
self.session.nav.currentlyPlayingServiceOrGroup = selected_ref
self.session.nav.pnav.navEvent(iPlayableService.evStart)
if self.dopipzap:
if tmp_mode == 'tv':
self.setModeTv()
elif tmp_mode == 'radio':
self.setModeRadio()
self.enterUserbouquet(tmp_root)
title = self.instance.getTitle()
pos = title.find(' (')
if pos != -1:
title = title[:pos]
title += _(' (PiP)')
self.setTitle(title)
self.buildTitleString()
if tmp_ref and pip_ref and tmp_ref.getChannelNum() != pip_ref.getChannelNum():
self.session.pip.currentService = tmp_ref
self.setCurrentSelection(tmp_ref)
self.revertMode = None
class RadioInfoBar(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self['RdsDecoder'] = RdsDecoder(self.session.nav)
class ChannelSelectionRadio(ChannelSelectionBase, ChannelSelectionEdit, ChannelSelectionEPG, InfoBarBase, SelectionEventInfo):
ALLOW_SUSPEND = True
def __init__(self, session, infobar):
ChannelSelectionBase.__init__(self, session)
ChannelSelectionEdit.__init__(self)
ChannelSelectionEPG.__init__(self)
InfoBarBase.__init__(self)
SelectionEventInfo.__init__(self)
self.infobar = infobar
self.startServiceRef = None
self.onLayoutFinish.append(self.onCreate)
self.info.setAnimationMode(0)
self.info = session.instantiateDialog(RadioInfoBar)
self['actions'] = ActionMap(['OkCancelActions', 'TvRadioActions'], {'keyTV': self.cancel,
'keyRadio': self.cancel,
'cancel': self.cancel,
'ok': self.channelSelected})
self.__event_tracker = ServiceEventTracker(screen=self, eventmap={iPlayableService.evStart: self.__evServiceStart,
iPlayableService.evEnd: self.__evServiceEnd})
self.infobar = infobar
self['RdsDecoder'] = self.info['RdsDecoder']
self['RdsActions'] = HelpableActionMap(self, 'InfobarRdsActions', {'startRassInteractive': (self.startRassInteractive, _('View Rass interactive...'))}, -1)
self['RdsActions'].setEnabled(False)
infobar.rds_display.onRassInteractivePossibilityChanged.append(self.RassInteractivePossibilityChanged)
self.onClose.append(self.__onClose)
self.onExecBegin.append(self.__onExecBegin)
self.onExecEnd.append(self.__onExecEnd)
def __onClose(self):
lastservice = eServiceReference(config.tv.lastservice.value)
self.session.nav.playService(lastservice)
def startRassInteractive(self):
self.info.hide()
self.infobar.rass_interactive = self.session.openWithCallback(self.RassInteractiveClosed, RassInteractive)
def RassInteractiveClosed(self):
self.info.show()
self.infobar.rass_interactive = None
self.infobar.RassSlidePicChanged()
def RassInteractivePossibilityChanged(self, state):
self['RdsActions'].setEnabled(state)
def __onExecBegin(self):
self.info.show()
def __onExecEnd(self):
self.info.hide()
def cancel(self):
self.infobar.rds_display.onRassInteractivePossibilityChanged.remove(self.RassInteractivePossibilityChanged)
self.info.hide()
self.close(None)
def __evServiceStart(self):
service = self.session.nav.getCurrentService()
if service:
info = service.info()
if info:
refstr = info.getInfoString(iServiceInformation.sServiceref)
self.servicelist.setPlayableIgnoreService(eServiceReference(refstr))
def __evServiceEnd(self):
self.servicelist.setPlayableIgnoreService(eServiceReference())
def saveRoot(self):
path = ''
for i in self.servicePathRadio:
path += i.toString()
path += ';'
if path and path != config.radio.lastroot.value:
config.radio.lastroot.value = path
config.radio.lastroot.save()
def restoreRoot(self):
tmp = [ x for x in config.radio.lastroot.value.split(';') if x != '' ]
current = [ x.toString() for x in self.servicePath ]
if tmp != current or self.rootChanged:
cnt = 0
for i in tmp:
self.servicePathRadio.append(eServiceReference(i))
cnt += 1
if cnt:
path = self.servicePathRadio.pop()
self.enterPath(path)
else:
self.showFavourites()
self.saveRoot()
self.rootChanged = False
def preEnterPath(self, refstr):
if self.servicePathRadio and self.servicePathRadio[0] != eServiceReference(refstr):
pathstr = config.radio.lastroot.value
if pathstr is not None and refstr in pathstr:
self.restoreRoot()
lastservice = eServiceReference(config.radio.lastservice.value)
if lastservice.valid():
self.setCurrentSelection(lastservice)
return True
return False
def onCreate(self):
self.setRadioMode()
self.restoreRoot()
lastservice = eServiceReference(config.radio.lastservice.value)
if lastservice.valid():
self.servicelist.setCurrent(lastservice)
self.session.nav.playService(lastservice)
else:
self.session.nav.stopService()
self.info.show()
def channelSelected(self, doClose = False):
ref = self.getCurrentSelection()
if self.movemode:
self.toggleMoveMarked()
elif ref.flags & eServiceReference.flagDirectory == eServiceReference.flagDirectory:
self.enterPath(ref)
self.gotoCurrentServiceOrProvider(ref)
elif self.bouquet_mark_edit != OFF:
if not (self.bouquet_mark_edit == EDIT_ALTERNATIVES and ref.flags & eServiceReference.isGroup):
self.doMark()
elif not ref.flags & eServiceReference.isMarker:
cur_root = self.getRoot()
if not cur_root or not cur_root.flags & eServiceReference.isGroup:
playingref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if playingref is None or playingref != ref:
self.session.nav.playService(ref)
config.radio.lastservice.value = ref.toString()
config.radio.lastservice.save()
self.saveRoot()
def zapBack(self):
self.channelSelected()
class SimpleChannelSelection(ChannelSelectionBase, SelectionEventInfo):
def __init__(self, session, title, currentBouquet = False, returnBouquet = False, setService = None, setBouquet = None):
ChannelSelectionBase.__init__(self, session)
SelectionEventInfo.__init__(self)
self['actions'] = ActionMap(['OkCancelActions', 'TvRadioActions'], {'cancel': self.close,
'ok': self.channelSelected,
'keyRadio': self.setModeRadio,
'keyTV': self.setModeTv})
self.bouquet_mark_edit = OFF
self.title = title
self.currentBouquet = currentBouquet
self.returnBouquet = returnBouquet
self.setService = setService
self.setBouquet = setBouquet
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setModeTv()
if self.currentBouquet or self.setBouquet:
ref = self.setBouquet or Screens.InfoBar.InfoBar.instance.servicelist.getRoot()
if ref:
self.enterPath(ref)
self.gotoCurrentServiceOrProvider(ref)
if self.setService:
self.setCurrentSelection(self.setService)
def saveRoot(self):
pass
def keyRecord(self):
return 0
def channelSelected(self):
ref = self.getCurrentSelection()
if ref.flags & eServiceReference.flagDirectory == eServiceReference.flagDirectory:
self.enterPath(ref)
self.gotoCurrentServiceOrProvider(ref)
elif not ref.flags & eServiceReference.isMarker:
ref = self.getCurrentSelection()
if self.returnBouquet and len(self.servicePath):
self.close(ref, self.servicePath[-1])
else:
self.close(ref)
def setModeTv(self):
self.setTvMode()
self.showFavourites()
def setModeRadio(self):
self.setRadioMode()
self.showFavourites()
class HistoryZapSelector(Screen):
def __init__(self, session, items=[], sel_item=0, mark_item=0, invert_items=False, redirect_buttons=False, wrap_around=True):
Screen.__init__(self, session)
self.redirectButton = redirect_buttons
self.invertItems = invert_items
if self.invertItems:
self.currentPos = len(items) - sel_item - 1
else:
self.currentPos = sel_item
self["actions"] = ActionMap(["OkCancelActions", "InfobarCueSheetActions"],
{
"ok": self.okbuttonClick,
"cancel": self.cancelClick,
"jumpPreviousMark": self.prev,
"jumpNextMark": self.next,
"toggleMark": self.okbuttonClick,
})
self.setTitle(_("History zap..."))
self.list = []
cnt = 0
serviceHandler = eServiceCenter.getInstance()
for x in items:
info = serviceHandler.info(x[-1])
if info:
orbpos = self.getOrbitalPos(ServiceReference(x[1]))
serviceName = info.getName(x[-1])
if serviceName is None:
serviceName = ""
eventName = ""
descriptionName = ""
durationTime = ""
# if config.plugins.SetupZapSelector.event.value != "0":
event = info.getEvent(x[-1])
if event:
eventName = event.getEventName()
if eventName is None:
eventName = ""
else:
eventName = eventName.replace('(18+)', '').replace('18+', '').replace('(16+)', '').replace('16+', '').replace('(12+)', '').replace('12+', '').replace('(7+)', '').replace('7+', '').replace('(6+)', '').replace('6+', '').replace('(0+)', '').replace('0+', '')
# if config.plugins.SetupZapSelector.event.value == "2":
descriptionName = event.getShortDescription()
if descriptionName is None or descriptionName == "":
descriptionName = event.getExtendedDescription()
if descriptionName is None:
descriptionName = ""
# if config.plugins.SetupZapSelector.duration.value:
begin = event.getBeginTime()
if begin is not None:
end = begin + event.getDuration()
remaining = (end - int(time())) / 60
prefix = ""
if remaining > 0:
prefix = "+"
local_begin = localtime(begin)
local_end = localtime(end)
durationTime = _("%02d.%02d - %02d.%02d (%s%d min)") % (local_begin[3],local_begin[4],local_end[3],local_end[4],prefix, remaining)
png = ""
picon = getPiconName(str(ServiceReference(x[1])))
if picon != "":
png = loadPNG(picon)
if self.invertItems:
self.list.insert(0, (x[1], cnt == mark_item and "»" or "", x[0], eventName, descriptionName, durationTime, png, orbpos))
else:
self.list.append((x[1], cnt == mark_item and "»" or "", x[0], eventName, descriptionName, durationTime, png, orbpos))
cnt += 1
self["menu"] = List(self.list, enableWrapAround=wrap_around)
self.onShown.append(self.__onShown)
def __onShown(self):
self["menu"].index = self.currentPos
def prev(self):
if self.redirectButton:
self.down()
else:
self.up()
def next(self):
if self.redirectButton:
self.up()
else:
self.down()
def up(self):
self["menu"].selectPrevious()
def down(self):
self["menu"].selectNext()
def getCurrent(self):
cur = self["menu"].current
return cur and cur[0]
def okbuttonClick(self):
self.close(self.getCurrent())
def cancelClick(self):
self.close(None)
def getOrbitalPos(self, ref):
refstr = None
if hasattr(ref, 'sref'):
refstr = str(ref.sref)
else:
refstr = str(ref)
refstr = refstr and GetWithAlternative(refstr)
print 'refstr:',refstr
if '%3a//' in refstr:
return "%s" % _("Stream")
op = int(refstr.split(':', 10)[6][:-4] or "0",16)
if op == 0xeeee:
return "%s" % _("DVB-T")
if op == 0xffff:
return "%s" % _("DVB-C")
direction = 'E'
if op > 1800:
op = 3600 - op
direction = 'W'
return ("%d.%d\xc2\xb0%s") % (op // 10, op % 10, direction)
|
rombie/contrail-controller | refs/heads/master | src/container/mesos-manager/mesos_manager/vnc/vnc_common.py | 1 | #
# Copyright (c) 2018 Juniper Networks, Inc. All rights reserved.
#
from vnc_mesos_config import VncMesosConfig as vnc_mesos_config
from vnc_api.vnc_api import (KeyValuePair,KeyValuePairs)
from config_db import DBBaseMM
class VncCommon(object):
"""VNC mesos common functionality.
"""
def __init__(self, mesos_obj_kind):
self.annotations = {}
self.annotations['kind'] = mesos_obj_kind
def get_annotations(self):
return self.annotations
|
royalharsh/grpc | refs/heads/master | tools/buildgen/plugins/make_fuzzer_tests.py | 14 | # Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Create tests for each fuzzer"""
import copy
import glob
def mako_plugin(dictionary):
targets = dictionary['targets']
tests = dictionary['tests']
for tgt in targets:
if tgt['build'] == 'fuzzer':
new_target = copy.deepcopy(tgt)
new_target['build'] = 'test'
new_target['name'] += '_one_entry'
new_target['run'] = False
new_target['src'].append('test/core/util/one_corpus_entry_fuzzer.c')
new_target['own_src'].append('test/core/util/one_corpus_entry_fuzzer.c')
targets.append(new_target)
for corpus in new_target['corpus_dirs']:
for fn in sorted(glob.glob('%s/*' % corpus)):
tests.append({
'name': new_target['name'],
'args': [fn],
'exclude_iomgrs': ['uv'],
'exclude_configs': ['tsan'],
'uses_polling': False,
'platforms': ['mac', 'linux'],
'ci_platforms': ['linux'],
'flaky': False,
'language': 'c',
'cpu_cost': 0.1,
})
|
Avantol13/music_generator | refs/heads/master | mgen/__init__.py | 2 | from mgen.create import MusicGenerator
from mgen.style import Style
from mgen.style import DEFAULT_CFG_FILE
from mgen.style import JAZZ_CFG_FILE
from mgen import cfg_import
|
schlueter/ansible | refs/heads/devel | lib/ansible/modules/cloud/azure/azure_rm_containerregistry.py | 21 | #!/usr/bin/python
#
# Copyright (c) 2017 Yawei Wang, <yaweiw@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_containerregistry
version_added: "2.5"
short_description: Manage an Azure Container Registry.
description:
- Create, update and delete an Azure Container Registry.
options:
resource_group:
description:
- Name of a resource group where the Container Registry exists or will be created.
required: true
name:
description:
- Name of the Container Registry.
default: null
required: true
state:
description:
- Assert the state of the container registry. Use 'present' to create or update an container registry and 'absent' to delete it.
default: present
choices:
- absent
- present
location:
description:
- Valid azure location. Defaults to location of the resource group.
default: resource_group location
admin_user_enabled:
description:
- If enabled, you can use the registry name as username and admin user access key as password to docker login to your container registry.
default: false
sku:
description:
- Specifies the SKU to use. Currently can be either Basic, Standard or Premium.
default: Standard
choices:
- Basic
- Standard
- Premium
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Yawei Wang (@yaweiw)"
'''
EXAMPLES = '''
- name: Create an azure container registry
azure_rm_containerregistry:
name: testacr1
location: eastus
resource_group: testrg
state: present
admin_user_enabled: true
sku: Premium
tags:
Release: beta1
Environment: Production
- name: Remove an azure container registry
azure_rm_containerregistry:
name: testacr2
resource_group: testrg
state: absent
'''
RETURN = '''
id:
description:
- Resource ID
returned: always
type: str
sample: /subscriptions/00000000-0000-0000-0000-000000000/resourceGroups/myResourceGroup/providers/Microsoft.ContainerRegistry/registries/myRegistry
name:
description:
- Registry name
returned: always
type: str
sample: myregistry
location:
description:
- Resource location
returned: always
type: str
sample: westus
admin_user_enabled:
description:
- Is admin user enabled
returned: always
type: bool
sample: true
sku:
description:
- SKU
returned: always
type: str
sample: Standard
provisioning_state:
description:
- Provisioning state
returned: always
type: str
sample: Succeeded
login_server:
description:
- Registry login server
returned: always
type: str
sample: myregistry.azurecr.io
credentials:
description:
- Passwords defined for the registry
returned: always
type: complex
contains:
password:
description:
- password value
returned: when registry exists and C(admin_user_enabled) is set
type: str
sample: pass1value
password2:
description:
- password2 value
returned: when registry exists and C(admin_user_enabled) is set
type: str
sample: pass2value
tags:
description:
- Tags
returned: always
type: dict
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.containerregistry.models import (
Registry,
RegistryUpdateParameters,
StorageAccountProperties,
Sku,
SkuName,
SkuTier,
ProvisioningState,
PasswordName,
WebhookCreateParameters,
WebhookUpdateParameters,
WebhookAction,
WebhookStatus
)
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
except ImportError as exc:
# This is handled in azure_rm_common
pass
def create_containerregistry_dict(registry, credentials):
'''
Helper method to deserialize a ContainerRegistry to a dict
:param: registry: return container registry object from Azure rest API call
:param: credentials: return credential objects from Azure rest API call
:return: dict of return container registry and it's credentials
'''
results = dict(
id=registry.id if registry is not None else "",
name=registry.name if registry is not None else "",
location=registry.location if registry is not None else "",
admin_user_enabled=registry.admin_user_enabled if registry is not None else "",
sku=registry.sku.name if registry is not None else "",
provisioning_state=registry.provisioning_state if registry is not None else "",
login_server=registry.login_server if registry is not None else "",
credentials=dict(),
tags=registry.tags if registry is not None else ""
)
if credentials:
results['credentials'] = dict(
password=credentials.passwords[0].value,
password2=credentials.passwords[1].value
)
return results
class Actions:
NoAction, Create, Update = range(3)
class AzureRMContainerRegistry(AzureRMModuleBase):
"""Configuration class for an Azure RM container registry resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
state=dict(
type='str',
required=False,
default='present',
choices=['present', 'absent']
),
location=dict(
type='str',
required=False
),
admin_user_enabled=dict(
type='bool',
required=False,
default=False
),
sku=dict(
type='str',
required=False,
default='Basic',
choices=['Basic', 'Standard', 'Premium']
)
)
self.resource_group = None
self.name = None
self.location = None
self.state = None
self.sku = None
self.tags = None
self._containerregistry_mgmt_client = None
self.results = dict(changed=False, state=dict())
super(AzureRMContainerRegistry, self).__init__(
derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
resource_group = None
response = None
to_do = Actions.NoAction
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
self.location = resource_group.location
# Check if the container registry instance already present in the RG
if self.state == 'present':
response = self.get_containerregistry()
if not response:
to_do = Actions.Create
else:
self.log('Results : {0}'.format(response))
self.results.update(response)
if response['provisioning_state'] == "Succeeded":
to_do = Actions.NoAction
if (self.location is not None) and self.location != response['location']:
to_do = Actions.Update
elif (self.sku is not None) and self.location != response['sku']:
to_do = Actions.Update
else:
to_do = Actions.NoAction
self.log("Create / Update the container registry instance")
if self.check_mode:
return self.results
self.results.update(self.create_update_containerregistry(to_do))
if to_do != Actions.NoAction:
self.results['changed'] = True
else:
self.results['changed'] = False
self.log("Container registry instance created or updated")
elif self.state == 'absent':
if self.check_mode:
return self.results
self.delete_containerregistry()
self.log("Container registry instance deleted")
return self.results
def create_update_containerregistry(self, to_do):
'''
Creates or updates a container registry.
:return: deserialized container registry instance state dictionary
'''
self.log("Creating / Updating the container registry instance {0}".format(self.name))
try:
if to_do != Actions.NoAction:
if to_do == Actions.Create:
name_status = self.containerregistry_mgmt_client.registries.check_name_availability(self.name)
if name_status.name_available:
poller = self.containerregistry_mgmt_client.registries.create(
resource_group_name=self.resource_group,
registry_name=self.name,
registry=Registry(
location=self.location,
sku=Sku(
name=self.sku
),
tags=self.tags,
admin_user_enabled=self.admin_user_enabled
)
)
else:
raise Exception("Invalid registry name. reason: " + name_status.reason + " message: " + name_status.message)
else:
registry = self.containerregistry_mgmt_client.registries.get(self.resource_group, self.name)
if registry is not None:
poller = self.containerregistry_mgmt_client.registries.update(
resource_group_name=self.resource_group,
registry_name=self.name,
registry_update_parameters=RegistryUpdateParameters(
sku=Sku(
name=self.sku
),
tags=self.tags,
admin_user_enabled=self.admin_user_enabled
)
)
else:
raise Exception("Update registry failed as registry '" + self.name + "' doesn't exist.")
response = self.get_poller_result(poller)
if self.admin_user_enabled:
credentials = self.containerregistry_mgmt_client.registries.list_credentials(self.resource_group, self.name)
else:
self.log('Cannot perform credential operations as admin user is disabled')
credentials = None
else:
response = None
credentials = None
except (CloudError, Exception) as exc:
self.log('Error attempting to create / update the container registry instance.')
self.fail("Error creating / updating the container registry instance: {0}".format(str(exc)))
return create_containerregistry_dict(response, credentials)
def delete_containerregistry(self):
'''
Deletes the specified container registry in the specified subscription and resource group.
:return: True
'''
self.log("Deleting the container registry instance {0}".format(self.name))
try:
self.containerregistry_mgmt_client.registries.delete(self.resource_group, self.name).wait()
except CloudError as e:
self.log('Error attempting to delete the container registry instance.')
self.fail("Error deleting the container registry instance: {0}".format(str(e)))
return True
def get_containerregistry(self):
'''
Gets the properties of the specified container registry.
:return: deserialized container registry state dictionary
'''
self.log("Checking if the container registry instance {0} is present".format(self.name))
found = False
try:
response = self.containerregistry_mgmt_client.registries.get(self.resource_group, self.name)
found = True
self.log("Response : {0}".format(response))
self.log("Container registry instance : {0} found".format(response.name))
except CloudError as e:
if e.error.error == 'ResourceNotFound':
self.log('Did not find the container registry instance: {0}'.format(str(e)))
else:
self.fail('Error while trying to get container registry instance: {0}'.format(str(e)))
response = None
if found is True and self.admin_user_enabled is True:
try:
credentials = self.containerregistry_mgmt_client.registries.list_credentials(self.resource_group, self.name)
except CloudError as e:
self.fail('List registry credentials failed: {0}'.format(str(e)))
credentials = None
elif found is True and self.admin_user_enabled is False:
credentials = None
else:
return None
return create_containerregistry_dict(response, credentials)
@property
def containerregistry_mgmt_client(self):
self.log('Getting container registry mgmt client')
if not self._containerregistry_mgmt_client:
self._containerregistry_mgmt_client = self.get_mgmt_svc_client(
ContainerRegistryManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2017-10-01'
)
return self._containerregistry_mgmt_client
def main():
"""Main execution"""
AzureRMContainerRegistry()
if __name__ == '__main__':
main()
|
olemis/brython | refs/heads/master | www/tests/test_list.py | 12 | # list examples
z=[1,2,3]
assert z.__class__ == list
assert isinstance(z,list)
assert str(z)=="[1, 2, 3]"
a=['spam','eggs',100,1234]
print(a[:2]+['bacon',2*2])
print(3*a[:3]+['Boo!'])
print(a[:])
a[2]=a[2]+23
print(a)
a[0:2]=[1,12]
print(a)
a[0:2]=[]
print(a)
a[1:1]=['bletch','xyzzy']
print(a)
a[:0]=a
print(a)
a[:]=[]
print(a)
a.extend('ab')
print(a)
a.extend([1,2,33])
print(a)
# tuple
t = (1,8)
assert t.__class__ == tuple
assert isinstance(t,tuple)
assert str(t)=='(1, 8)'
|
vamsirajendra/nupic | refs/heads/master | tests/unit/nupic/engine/network_test.py | 22 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from mock import Mock
from mock import patch
import unittest2 as unittest
from nupic import engine
class NetworkTest(unittest.TestCase):
def testErrorHandling(self):
n = engine.Network()
# Test trying to add non-existent node
with self.assertRaises(Exception) as cm:
n.addRegion('r', 'py.NonExistingNode', '')
self.assertEqual(cm.exception.message, "Matching Python module for NonExistingNode not found.")
orig_import = __import__
def import_mock(name, *args):
if name == "nupic.regions.UnimportableNode":
raise SyntaxError("invalid syntax (UnimportableNode.py, line 5)")
return orig_import(name, *args)
with patch('__builtin__.__import__', side_effect=import_mock):
# Test failure during import
with self.assertRaises(Exception) as cm:
n.addRegion('r', 'py.UnimportableNode', '')
self.assertEqual(cm.exception.message, "invalid syntax (UnimportableNode.py, line 5)")
# Test failure in the __init__() method
with self.assertRaises(Exception) as cm:
n.addRegion('r', 'py.TestNode', '{ failInInit: 1 }')
self.assertEqual(cm.exception.message, "TestNode.__init__() Failing on purpose as requested")
# Test failure inside the compute() method
with self.assertRaises(Exception) as cm:
r = n.addRegion('r', 'py.TestNode', '{ failInCompute: 1 }')
r.dimensions = engine.Dimensions([4, 4])
n.initialize()
n.run(1)
self.assertEqual(str(cm.exception),
'TestNode.compute() Failing on purpose as requested')
# Test failure in the static getSpec
from nupic.regions.TestNode import TestNode
TestNode._failIngetSpec = True
with self.assertRaises(Exception) as cm:
TestNode.getSpec()
self.assertEqual(str(cm.exception),
'Failing in TestNode.getSpec() as requested')
del TestNode._failIngetSpec
def testGetSpecFromType(self):
ns = engine.Region.getSpecFromType('py.SPRegion')
p = ns.parameters['breakPdb']
self.assertEqual(p.accessMode, 'ReadWrite')
def testOneRegionNetwork(self):
n = engine.Network()
print "Number of regions in new network: %d" % len(n.regions)
self.assertEqual(len(n.regions), 0)
print "Adding level1SP"
level1SP = n.addRegion("level1SP", "TestNode", "")
print "Current dimensions are: %s" % level1SP.dimensions
print "Number of regions in network: %d" % len(n.regions)
self.assertEqual(len(n.regions), 1)
self.assertEqual(len(n.regions), len(n.regions))
print 'Node type: ', level1SP.type
print("Attempting to initialize net when "
"one region has unspecified dimensions")
print "Current dimensions are: %s" % level1SP.dimensions
with self.assertRaises(Exception):
n.initialize()
# Test Dimensions
level1SP.dimensions = engine.Dimensions([4, 4])
print "Set dimensions of level1SP to %s" % str(level1SP.dimensions)
n.initialize()
# Test Array
a = engine.Array('Int32', 10)
self.assertEqual(a.getType(), 'Int32')
self.assertEqual(len(a), 10)
import nupic
self.assertEqual(type(a), nupic.bindings.engine_internal.Int32Array)
for i in range(len(a)):
a[i] = i
for i in range(len(a)):
self.assertEqual(type(a[i]), int)
self.assertEqual(a[i], i)
print i,
print
# --- Test Numpy Array
print 'Testing Numpy Array'
a = engine.Array('Byte', 15)
print len(a)
for i in range(len(a)):
a[i] = ord('A') + i
for i in range(len(a)):
print a[i], ord('A') + i
self.assertEqual(ord(a[i]), ord('A') + i)
print
print 'before asNumpyarray()'
na = a.asNumpyArray()
print 'after asNumpyarray()'
self.assertEqual(na.shape, (15,))
print 'na.shape:', na.shape
na = na.reshape(5, 3)
self.assertEqual(na.shape, (5, 3))
print 'na.shape:', na.shape
for i in range(5):
for j in range(3):
print chr(na[i, j]), ' ',
print
print
# --- Test get/setParameter for Int64 and Real64
print '---'
print 'Testing get/setParameter for Int64/Real64'
val = level1SP.getParameterInt64('int64Param')
rval = level1SP.getParameterReal64('real64Param')
print 'level1SP.int64Param = ', val
print 'level1SP.real64Param = ', rval
val = 20
level1SP.setParameterInt64('int64Param', val)
val = 0
val = level1SP.getParameterInt64('int64Param')
print 'level1SP.int64Param = ', val, ' after setting to 20'
rval = 30.1
level1SP.setParameterReal64('real64Param', rval)
rval = 0.0
rval = level1SP.getParameterReal64('real64Param')
print 'level1SP.real64Param = ', rval, ' after setting to 30.1'
# --- Test array parameter
# Array a will be allocated inside getParameter
print '---'
print 'Testing get/setParameterArray'
a = engine.Array('Int64', 4)
level1SP.getParameterArray("int64ArrayParam", a)
print 'level1SP.int64ArrayParam size = ', len(a)
print 'level1SP.int64ArrayParam = [ ',
for i in range(len(a)):
print a[i],
print ']'
#
# --- test setParameter of an Int64 Array ---
print 'Setting level1SP.int64ArrayParam to [ 1 2 3 4 ]'
a2 = engine.Array('Int64', 4)
for i in range(4):
a2[i] = i + 1
level1SP.setParameterArray('int64ArrayParam', a2)
# get the value of int64ArrayParam after the setParameter call.
# The array a owns its buffer, so we can call releaseBuffer if we
# want, but the buffer should be reused if we just pass it again.
#// a.releaseBuffer();
level1SP.getParameterArray('int64ArrayParam', a)
print 'level1SP.int64ArrayParam size = ', len(a)
print 'level1SP.int64ArrayParam = [ ',
for i in range(len(a)):
print a[i],
print ']'
level1SP.compute()
print "Running for 2 iteraitons"
n.run(2)
# --- Test input/output access
#
# Getting access via zero-copy
with self.assertRaises(Exception):
level1SP.getOutputData('doesnotexist')
output = level1SP.getOutputData('bottomUpOut')
print 'Element count in bottomUpOut is ', len(output)
# set the actual output
output[11] = 7777
output[12] = 54321
# Create a reshaped view of the numpy array
# original output is 32x1 -- 16 nodes, 2 elements per node
# Reshape to 8 rows, 4 columns
numpy_output2 = output.reshape(8, 4)
# Make sure the original output, the numpy array and the reshaped numpy view
# are all in sync and access the same underlying memory.
numpy_output2[1, 0] = 5555
self.assertEqual(output[4], 5555)
output[5] = 3333
self.assertEqual(numpy_output2[1, 1], 3333)
numpy_output2[1, 2] = 4444
# --- Test doc strings
# TODO: commented out because I'm not sure what to do with these
# now that regions have been converted to the Collection class.
# print
# print "Here are some docstrings for properties and methods:"
# for name in ('regionCount', 'getRegionCount', 'getRegionByName'):
# x = getattr(engine.Network, name)
# if isinstance(x, property):
# print 'property Network.{0}: "{1}"'.format(name, x.__doc__)
# else:
# print 'method Network.{0}(): "{1}"'.format(name, x.__doc__)
# Typed methods should return correct type
print "real64Param: %.2f" % level1SP.getParameterReal64("real64Param")
# Uncomment to get performance for getParameter
if 0:
import time
t1 = time.time()
t1 = time.time()
for i in xrange(0, 1000000):
# x = level1SP.getParameterInt64("int64Param") # buffered
x = level1SP.getParameterReal64("real64Param") # unbuffered
t2 = time.time()
print "Time for 1M getParameter calls: %.2f seconds" % (t2 - t1)
def testTwoRegionNetwork(self):
n = engine.Network()
region1 = n.addRegion("region1", "TestNode", "")
region2 = n.addRegion("region2", "TestNode", "")
names = []
for name in n.regions:
names.append(name)
self.assertEqual(names, ['region1', 'region2'])
print n.getPhases('region1')
self.assertEqual(n.getPhases('region1'), (0,))
self.assertEqual(n.getPhases('region2'), (1,))
n.link("region1", "region2", "TestFanIn2", "")
print "Initialize should fail..."
with self.assertRaises(Exception):
n.initialize()
print "Setting region1 dims"
r1dims = engine.Dimensions([6, 4])
region1.setDimensions(r1dims)
print "Initialize should now succeed"
n.initialize()
r2dims = region2.dimensions
self.assertEqual(len(r2dims), 2)
self.assertEqual(r2dims[0], 3)
self.assertEqual(r2dims[1], 2)
# Negative test
with self.assertRaises(Exception):
region2.setDimensions(r1dims)
def testInputsAndOutputs(self):
n = engine.Network()
region1 = n.addRegion("region1", "TestNode", "")
region2 = n.addRegion("region2", "TestNode", "")
region1.setDimensions(engine.Dimensions([6, 4]))
n.link("region1", "region2", "TestFanIn2", "")
n.initialize()
r1_output = region1.getOutputData("bottomUpOut")
region1.compute()
print "Region 1 output after first iteration:"
print "r1_output:", r1_output
region2.prepareInputs()
r2_input = region2.getInputData("bottomUpIn")
print "Region 2 input after first iteration:"
print 'r2_input:', r2_input
def testNodeSpec(self):
n = engine.Network()
r = n.addRegion("region", "TestNode", "")
print r.getSpec()
def testPyNodeGetSetParameter(self):
n = engine.Network()
r = n.addRegion("region", "py.TestNode", "")
print "Setting region1 dims"
r.dimensions = engine.Dimensions([6, 4])
print "Initialize should now succeed"
n.initialize()
result = r.getParameterReal64('real64Param')
self.assertEqual(result, 64.1)
r.setParameterReal64('real64Param', 77.7)
result = r.getParameterReal64('real64Param')
self.assertEqual(result, 77.7)
def testPyNodeGetNodeSpec(self):
n = engine.Network()
r = n.addRegion("region", "py.TestNode", "")
print "Setting region1 dims"
r.setDimensions(engine.Dimensions([6, 4]))
print "Initialize should now succeed"
n.initialize()
ns = r.spec
self.assertEqual(len(ns.inputs), 1)
i = ns.inputs['bottomUpIn']
self.assertEqual(i.description, 'Primary input for the node')
self.assertEqual(len(ns.outputs), 1)
i = ns.outputs['bottomUpOut']
self.assertEqual(i.description, 'Primary output for the node')
def testTwoRegionPyNodeNetwork(self):
n = engine.Network()
region1 = n.addRegion("region1", "py.TestNode", "")
region2 = n.addRegion("region2", "py.TestNode", "")
n.link("region1", "region2", "TestFanIn2", "")
print "Initialize should fail..."
with self.assertRaises(Exception):
n.initialize()
print "Setting region1 dims"
r1dims = engine.Dimensions([6, 4])
region1.setDimensions(r1dims)
print "Initialize should now succeed"
n.initialize()
r2dims = region2.dimensions
self.assertEqual(len(r2dims), 2)
self.assertEqual(r2dims[0], 3)
self.assertEqual(r2dims[1], 2)
if __name__ == "__main__":
unittest.main()
|
batxes/4c2vhic | refs/heads/master | SHH_WT_models_highres/SHH_WT_models_highres_final_output_0.1_-0.1_5000/Representative.py | 6 | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((272.681, 2031.23, 4894.15), (0.7, 0.7, 0.7), 182.271)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((11.9787, 1805.5, 4583.66), (0.7, 0.7, 0.7), 258.199)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((165.783, 1802.37, 4191.96), (0.7, 0.7, 0.7), 123.897)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((-203.732, 1940, 4318.69), (0.7, 0.7, 0.7), 146.739)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((-563.334, 2198.42, 4408.3), (0.7, 0.7, 0.7), 179.098)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((-81.1605, 2377.75, 4139.09), (0.7, 0.7, 0.7), 148.854)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((293.457, 2583.57, 3858.36), (0.7, 0.7, 0.7), 196.357)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((-220.838, 2641.36, 3758.42), (0.7, 0.7, 0.7), 166.873)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((-735.245, 2784.55, 3671.78), (0.7, 0.7, 0.7), 95.4711)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((-335.327, 2877.29, 3612.87), (0.7, 0.7, 0.7), 185.401)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((128.963, 2843.88, 3709.71), (0.7, 0.7, 0.7), 151.984)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((701.993, 2720.74, 3839.32), (0.7, 0.7, 0.7), 185.612)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((901.242, 2766.71, 4182.9), (0.7, 0.7, 0.7), 210.273)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((585.987, 2722.12, 4088.83), (0.7, 0.7, 0.7), 106.892)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((530.168, 2806.68, 4423.78), (0.7, 0.7, 0.7), 202.025)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((499.576, 2674.44, 4877.45), (0.7, 0.7, 0.7), 192.169)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((576.245, 2513.83, 5410.48), (0.7, 0.7, 0.7), 241.11)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((893.441, 2344.88, 5784.07), (0.7, 0.7, 0.7), 128.465)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((1199.48, 2053.52, 6180.66), (0.7, 0.7, 0.7), 217.38)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((1254.09, 1638.2, 6748.5), (0.7, 0.7, 0.7), 184.555)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((1654.24, 1760.66, 6240.57), (0.7, 0.7, 0.7), 140.055)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((2082.7, 1987.16, 6166.57), (0.7, 0.7, 0.7), 169.708)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((2595.37, 2092.3, 6205.16), (0.7, 0.7, 0.7), 184.639)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((2949.26, 2042.32, 5978.4), (0.7, 0.7, 0.7), 119.286)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((3173.7, 1910.53, 5758.37), (0.7, 0.7, 0.7), 147.754)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((3068.57, 1814.67, 5635.95), (0.7, 0.7, 0.7), 171.4)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((2764.07, 2063.56, 5493.29), (0.7, 0.7, 0.7), 156.341)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((2403.27, 2081.19, 5015.64), (0.7, 0.7, 0.7), 186.501)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((2127.14, 2134.53, 4510.58), (0.7, 0.7, 0.7), 308.325)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((1825.14, 2334.59, 4209.64), (0.7, 0.7, 0.7), 138.617)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((1755.45, 2563.12, 4000.77), (0.7, 0.7, 0.7), 130.03)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((1899.48, 2523.91, 4278.26), (0.7, 0.7, 0.7), 156.552)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((1811.76, 2233.18, 4303.99), (0.7, 0.7, 0.7), 183.244)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((1757.4, 1966.25, 4325.16), (0.7, 0.7, 0.7), 181.382)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((1887.84, 1808.14, 4329.07), (0.7, 0.7, 0.7), 101.943)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((1935.68, 1532.66, 4556.85), (1, 0.7, 0), 138.913)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((890.908, 1645.9, 4393.45), (0.7, 0.7, 0.7), 221.737)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((125.16, 1512.11, 4062.51), (0.7, 0.7, 0.7), 256.38)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((-51.3612, 1409.51, 3448.69), (0.7, 0.7, 0.7), 221.694)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((370.316, 1759.8, 3014.58), (0.7, 0.7, 0.7), 259.341)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((1004.5, 2266.29, 3066.26), (0.7, 0.7, 0.7), 117.89)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((1708.11, 2493.89, 3497.35), (0.7, 0.7, 0.7), 116.071)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((2043.01, 2171.35, 3621.79), (0.7, 0.7, 0.7), 268.224)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((1932.79, 1971.07, 3396.12), (0.7, 0.7, 0.7), 386.918)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((1560.78, 2094.73, 2928.5), (0.7, 0.7, 0.7), 121.316)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((1378.03, 1818.24, 2597.77), (0.7, 0.7, 0.7), 138.363)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((1644.73, 1502.03, 3165.69), (1, 0.7, 0), 175.207)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((1376.58, 1204.22, 2583.22), (0.7, 0.7, 0.7), 131.468)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((1232.41, 708.97, 2067.83), (0.7, 0.7, 0.7), 287.894)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((1723.68, 922.822, 2165.43), (0.7, 0.7, 0.7), 88.1109)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((1956.82, 1350.46, 2514.07), (0.7, 0.7, 0.7), 145.385)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((2073.24, 1545.39, 2535.6), (0.7, 0.7, 0.7), 155.452)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((2062.97, 1241.77, 1990.92), (0.7, 0.7, 0.7), 145.512)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((2031.42, 1039.31, 1512.45), (0.7, 0.7, 0.7), 99.9972)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((1930.17, 1010.9, 1075.98), (0.7, 0.7, 0.7), 327.529)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((1952.39, 1632.65, 1225.68), (0.7, 0.7, 0.7), 137.983)
if "particle_56 geometry" not in marker_sets:
s=new_marker_set('particle_56 geometry')
marker_sets["particle_56 geometry"]=s
s= marker_sets["particle_56 geometry"]
mark=s.place_marker((2148.99, 1857.11, 1634.27), (0.7, 0.7, 0.7), 83.3733)
if "particle_57 geometry" not in marker_sets:
s=new_marker_set('particle_57 geometry')
marker_sets["particle_57 geometry"]=s
s= marker_sets["particle_57 geometry"]
mark=s.place_marker((2329.6, 2014.01, 2147.36), (0.7, 0.7, 0.7), 101.562)
if "particle_58 geometry" not in marker_sets:
s=new_marker_set('particle_58 geometry')
marker_sets["particle_58 geometry"]=s
s= marker_sets["particle_58 geometry"]
mark=s.place_marker((2389.09, 2078.23, 2661.6), (0.7, 0.7, 0.7), 165.689)
if "particle_59 geometry" not in marker_sets:
s=new_marker_set('particle_59 geometry')
marker_sets["particle_59 geometry"]=s
s= marker_sets["particle_59 geometry"]
mark=s.place_marker((2409.87, 1781.96, 2728.7), (0.7, 0.7, 0.7), 136.925)
if "particle_60 geometry" not in marker_sets:
s=new_marker_set('particle_60 geometry')
marker_sets["particle_60 geometry"]=s
s= marker_sets["particle_60 geometry"]
mark=s.place_marker((2419.34, 1659.37, 2724.61), (0.7, 0.7, 0.7), 123.389)
if "particle_61 geometry" not in marker_sets:
s=new_marker_set('particle_61 geometry')
marker_sets["particle_61 geometry"]=s
s= marker_sets["particle_61 geometry"]
mark=s.place_marker((2503.08, 1684.94, 2283.98), (0.7, 0.7, 0.7), 184.47)
if "particle_62 geometry" not in marker_sets:
s=new_marker_set('particle_62 geometry')
marker_sets["particle_62 geometry"]=s
s= marker_sets["particle_62 geometry"]
mark=s.place_marker((2744.52, 1635.2, 1526.96), (0.7, 0.7, 0.7), 148.473)
if "particle_63 geometry" not in marker_sets:
s=new_marker_set('particle_63 geometry')
marker_sets["particle_63 geometry"]=s
s= marker_sets["particle_63 geometry"]
mark=s.place_marker((3078.17, 1486.06, 610.413), (0.7, 0.7, 0.7), 241.406)
if "particle_64 geometry" not in marker_sets:
s=new_marker_set('particle_64 geometry')
marker_sets["particle_64 geometry"]=s
s= marker_sets["particle_64 geometry"]
mark=s.place_marker((2601.95, 1834.07, 913.616), (0.7, 0.7, 0.7), 182.736)
if "particle_65 geometry" not in marker_sets:
s=new_marker_set('particle_65 geometry')
marker_sets["particle_65 geometry"]=s
s= marker_sets["particle_65 geometry"]
mark=s.place_marker((2281.71, 1913.85, 1234.97), (0.7, 0.7, 0.7), 166.62)
if "particle_66 geometry" not in marker_sets:
s=new_marker_set('particle_66 geometry')
marker_sets["particle_66 geometry"]=s
s= marker_sets["particle_66 geometry"]
mark=s.place_marker((2408.93, 1769.75, 1460.44), (0.7, 0.7, 0.7), 113.872)
if "particle_67 geometry" not in marker_sets:
s=new_marker_set('particle_67 geometry')
marker_sets["particle_67 geometry"]=s
s= marker_sets["particle_67 geometry"]
mark=s.place_marker((2339.8, 1728.12, 1774.24), (0.7, 0.7, 0.7), 110.065)
if "particle_68 geometry" not in marker_sets:
s=new_marker_set('particle_68 geometry')
marker_sets["particle_68 geometry"]=s
s= marker_sets["particle_68 geometry"]
mark=s.place_marker((2215.36, 1490.27, 2051.3), (0.7, 0.7, 0.7), 150.08)
if "particle_69 geometry" not in marker_sets:
s=new_marker_set('particle_69 geometry')
marker_sets["particle_69 geometry"]=s
s= marker_sets["particle_69 geometry"]
mark=s.place_marker((2046.33, 1121.21, 2300.53), (0.7, 0.7, 0.7), 118.525)
if "particle_70 geometry" not in marker_sets:
s=new_marker_set('particle_70 geometry')
marker_sets["particle_70 geometry"]=s
s= marker_sets["particle_70 geometry"]
mark=s.place_marker((2011.67, 613.167, 2485.13), (0.7, 0.7, 0.7), 163.955)
if "particle_71 geometry" not in marker_sets:
s=new_marker_set('particle_71 geometry')
marker_sets["particle_71 geometry"]=s
s= marker_sets["particle_71 geometry"]
mark=s.place_marker((2193.04, 324.606, 2304.31), (0.7, 0.7, 0.7), 170.131)
if "particle_72 geometry" not in marker_sets:
s=new_marker_set('particle_72 geometry')
marker_sets["particle_72 geometry"]=s
s= marker_sets["particle_72 geometry"]
mark=s.place_marker((2449.81, 628.547, 1662.25), (0.7, 0.7, 0.7), 78.2127)
if "particle_73 geometry" not in marker_sets:
s=new_marker_set('particle_73 geometry')
marker_sets["particle_73 geometry"]=s
s= marker_sets["particle_73 geometry"]
mark=s.place_marker((2631.94, 1056.21, 974.271), (0.7, 0.7, 0.7), 251.896)
if "particle_74 geometry" not in marker_sets:
s=new_marker_set('particle_74 geometry')
marker_sets["particle_74 geometry"]=s
s= marker_sets["particle_74 geometry"]
mark=s.place_marker((2610.74, 1536.49, 486.053), (0.7, 0.7, 0.7), 167.55)
if "particle_75 geometry" not in marker_sets:
s=new_marker_set('particle_75 geometry')
marker_sets["particle_75 geometry"]=s
s= marker_sets["particle_75 geometry"]
mark=s.place_marker((2470.23, 1915.07, 349.495), (0.7, 0.7, 0.7), 167.846)
if "particle_76 geometry" not in marker_sets:
s=new_marker_set('particle_76 geometry')
marker_sets["particle_76 geometry"]=s
s= marker_sets["particle_76 geometry"]
mark=s.place_marker((2936.28, 1754.23, 332.292), (0.7, 0.7, 0.7), 259.68)
if "particle_77 geometry" not in marker_sets:
s=new_marker_set('particle_77 geometry')
marker_sets["particle_77 geometry"]=s
s= marker_sets["particle_77 geometry"]
mark=s.place_marker((3037.31, 1313.74, 447.311), (0.7, 0.7, 0.7), 80.2854)
if "particle_78 geometry" not in marker_sets:
s=new_marker_set('particle_78 geometry')
marker_sets["particle_78 geometry"]=s
s= marker_sets["particle_78 geometry"]
mark=s.place_marker((3028.51, 1178.07, 288.069), (0.7, 0.7, 0.7), 82.4427)
if "particle_79 geometry" not in marker_sets:
s=new_marker_set('particle_79 geometry')
marker_sets["particle_79 geometry"]=s
s= marker_sets["particle_79 geometry"]
mark=s.place_marker((3305.56, 1232.53, 36.7666), (0.7, 0.7, 0.7), 212.811)
if "particle_80 geometry" not in marker_sets:
s=new_marker_set('particle_80 geometry')
marker_sets["particle_80 geometry"]=s
s= marker_sets["particle_80 geometry"]
mark=s.place_marker((3708, 1753.7, 391.192), (0.7, 0.7, 0.7), 176.391)
if "particle_81 geometry" not in marker_sets:
s=new_marker_set('particle_81 geometry')
marker_sets["particle_81 geometry"]=s
s= marker_sets["particle_81 geometry"]
mark=s.place_marker((3600.8, 2098.29, 1028.86), (0.7, 0.7, 0.7), 99.3204)
if "particle_82 geometry" not in marker_sets:
s=new_marker_set('particle_82 geometry')
marker_sets["particle_82 geometry"]=s
s= marker_sets["particle_82 geometry"]
mark=s.place_marker((3432.67, 2048.04, 1608.39), (0.7, 0.7, 0.7), 166.62)
if "particle_83 geometry" not in marker_sets:
s=new_marker_set('particle_83 geometry')
marker_sets["particle_83 geometry"]=s
s= marker_sets["particle_83 geometry"]
mark=s.place_marker((3560.7, 1982.38, 1893.66), (0.7, 0.7, 0.7), 102.831)
if "particle_84 geometry" not in marker_sets:
s=new_marker_set('particle_84 geometry')
marker_sets["particle_84 geometry"]=s
s= marker_sets["particle_84 geometry"]
mark=s.place_marker((4014.78, 1933.01, 1134.03), (0.7, 0.7, 0.7), 65.0997)
if "particle_85 geometry" not in marker_sets:
s=new_marker_set('particle_85 geometry')
marker_sets["particle_85 geometry"]=s
s= marker_sets["particle_85 geometry"]
mark=s.place_marker((3498.94, 1916.69, 944.697), (0.7, 0.7, 0.7), 92.1294)
if "particle_86 geometry" not in marker_sets:
s=new_marker_set('particle_86 geometry')
marker_sets["particle_86 geometry"]=s
s= marker_sets["particle_86 geometry"]
mark=s.place_marker((2917.07, 1943.69, 1085.1), (0.7, 0.7, 0.7), 194.791)
if "particle_87 geometry" not in marker_sets:
s=new_marker_set('particle_87 geometry')
marker_sets["particle_87 geometry"]=s
s= marker_sets["particle_87 geometry"]
mark=s.place_marker((2478.5, 2053.06, 1117.41), (0.7, 0.7, 0.7), 120.766)
if "particle_88 geometry" not in marker_sets:
s=new_marker_set('particle_88 geometry')
marker_sets["particle_88 geometry"]=s
s= marker_sets["particle_88 geometry"]
mark=s.place_marker((2526.09, 2187.07, 567.965), (0.7, 0.7, 0.7), 217.803)
if "particle_89 geometry" not in marker_sets:
s=new_marker_set('particle_89 geometry')
marker_sets["particle_89 geometry"]=s
s= marker_sets["particle_89 geometry"]
mark=s.place_marker((2634.02, 1821.08, 639.707), (0.7, 0.7, 0.7), 115.775)
if "particle_90 geometry" not in marker_sets:
s=new_marker_set('particle_90 geometry')
marker_sets["particle_90 geometry"]=s
s= marker_sets["particle_90 geometry"]
mark=s.place_marker((2542.95, 1508.24, 896.631), (0.7, 0.7, 0.7), 115.648)
if "particle_91 geometry" not in marker_sets:
s=new_marker_set('particle_91 geometry')
marker_sets["particle_91 geometry"]=s
s= marker_sets["particle_91 geometry"]
mark=s.place_marker((2475.17, 1672.11, 1177.28), (0.7, 0.7, 0.7), 83.8386)
if "particle_92 geometry" not in marker_sets:
s=new_marker_set('particle_92 geometry')
marker_sets["particle_92 geometry"]=s
s= marker_sets["particle_92 geometry"]
mark=s.place_marker((2275.16, 1980.7, 1124.07), (0.7, 0.7, 0.7), 124.32)
if "particle_93 geometry" not in marker_sets:
s=new_marker_set('particle_93 geometry')
marker_sets["particle_93 geometry"]=s
s= marker_sets["particle_93 geometry"]
mark=s.place_marker((1886.58, 2189.28, 1091.96), (0.7, 0.7, 0.7), 185.993)
if "particle_94 geometry" not in marker_sets:
s=new_marker_set('particle_94 geometry')
marker_sets["particle_94 geometry"]=s
s= marker_sets["particle_94 geometry"]
mark=s.place_marker((1347.55, 2068.17, 804.467), (0.7, 0.7, 0.7), 238.826)
if "particle_95 geometry" not in marker_sets:
s=new_marker_set('particle_95 geometry')
marker_sets["particle_95 geometry"]=s
s= marker_sets["particle_95 geometry"]
mark=s.place_marker((1097.13, 1654.46, 546.164), (0.7, 0.7, 0.7), 128.465)
if "particle_96 geometry" not in marker_sets:
s=new_marker_set('particle_96 geometry')
marker_sets["particle_96 geometry"]=s
s= marker_sets["particle_96 geometry"]
mark=s.place_marker((1543.72, 1299.97, 817.239), (0.7, 0.7, 0.7), 203.209)
if "particle_97 geometry" not in marker_sets:
s=new_marker_set('particle_97 geometry')
marker_sets["particle_97 geometry"]=s
s= marker_sets["particle_97 geometry"]
mark=s.place_marker((1951.26, 1467.92, 1060.33), (0.7, 0.7, 0.7), 160.486)
if "particle_98 geometry" not in marker_sets:
s=new_marker_set('particle_98 geometry')
marker_sets["particle_98 geometry"]=s
s= marker_sets["particle_98 geometry"]
mark=s.place_marker((1936.61, 1774.11, 904.959), (0.7, 0.7, 0.7), 149.277)
if "particle_99 geometry" not in marker_sets:
s=new_marker_set('particle_99 geometry')
marker_sets["particle_99 geometry"]=s
s= marker_sets["particle_99 geometry"]
mark=s.place_marker((1914.83, 1585.44, 397.057), (0.7, 0.7, 0.7), 35.7435)
if "particle_100 geometry" not in marker_sets:
s=new_marker_set('particle_100 geometry')
marker_sets["particle_100 geometry"]=s
s= marker_sets["particle_100 geometry"]
mark=s.place_marker((2338.65, 1480.77, 1287.08), (0.7, 0.7, 0.7), 98.3898)
if "particle_101 geometry" not in marker_sets:
s=new_marker_set('particle_101 geometry')
marker_sets["particle_101 geometry"]=s
s= marker_sets["particle_101 geometry"]
mark=s.place_marker((2730.91, 1619.64, 2250.85), (0.7, 0.7, 0.7), 188.404)
if "particle_102 geometry" not in marker_sets:
s=new_marker_set('particle_102 geometry')
marker_sets["particle_102 geometry"]=s
s= marker_sets["particle_102 geometry"]
mark=s.place_marker((2841.14, 2013.34, 2560.32), (0.7, 0.7, 0.7), 110.318)
if "particle_103 geometry" not in marker_sets:
s=new_marker_set('particle_103 geometry')
marker_sets["particle_103 geometry"]=s
s= marker_sets["particle_103 geometry"]
mark=s.place_marker((2935.52, 2062.51, 2189.99), (0.7, 0.7, 0.7), 127.534)
if "particle_104 geometry" not in marker_sets:
s=new_marker_set('particle_104 geometry')
marker_sets["particle_104 geometry"]=s
s= marker_sets["particle_104 geometry"]
mark=s.place_marker((2878.99, 1980.31, 1825.8), (0.7, 0.7, 0.7), 91.368)
if "particle_105 geometry" not in marker_sets:
s=new_marker_set('particle_105 geometry')
marker_sets["particle_105 geometry"]=s
s= marker_sets["particle_105 geometry"]
mark=s.place_marker((2703.67, 1850.49, 1494.66), (0.7, 0.7, 0.7), 131.045)
if "particle_106 geometry" not in marker_sets:
s=new_marker_set('particle_106 geometry')
marker_sets["particle_106 geometry"]=s
s= marker_sets["particle_106 geometry"]
mark=s.place_marker((2373.48, 1671.09, 1301.24), (0.7, 0.7, 0.7), 143.608)
if "particle_107 geometry" not in marker_sets:
s=new_marker_set('particle_107 geometry')
marker_sets["particle_107 geometry"]=s
s= marker_sets["particle_107 geometry"]
mark=s.place_marker((2072.96, 1853.4, 1145.38), (0.7, 0.7, 0.7), 135.783)
if "particle_108 geometry" not in marker_sets:
s=new_marker_set('particle_108 geometry')
marker_sets["particle_108 geometry"]=s
s= marker_sets["particle_108 geometry"]
mark=s.place_marker((1840.39, 2050.64, 1021.54), (0.7, 0.7, 0.7), 92.5947)
if "particle_109 geometry" not in marker_sets:
s=new_marker_set('particle_109 geometry')
marker_sets["particle_109 geometry"]=s
s= marker_sets["particle_109 geometry"]
mark=s.place_marker((1968.39, 2275.83, 1128.27), (0.7, 0.7, 0.7), 150.123)
if "particle_110 geometry" not in marker_sets:
s=new_marker_set('particle_110 geometry')
marker_sets["particle_110 geometry"]=s
s= marker_sets["particle_110 geometry"]
mark=s.place_marker((2200.31, 2412.19, 1206.37), (0.7, 0.7, 0.7), 121.57)
if "particle_111 geometry" not in marker_sets:
s=new_marker_set('particle_111 geometry')
marker_sets["particle_111 geometry"]=s
s= marker_sets["particle_111 geometry"]
mark=s.place_marker((2250.14, 2656.98, 988.778), (0.7, 0.7, 0.7), 104.777)
if "particle_112 geometry" not in marker_sets:
s=new_marker_set('particle_112 geometry')
marker_sets["particle_112 geometry"]=s
s= marker_sets["particle_112 geometry"]
mark=s.place_marker((2371.28, 2784.54, 1371.1), (0.7, 0.7, 0.7), 114.844)
if "particle_113 geometry" not in marker_sets:
s=new_marker_set('particle_113 geometry')
marker_sets["particle_113 geometry"]=s
s= marker_sets["particle_113 geometry"]
mark=s.place_marker((2484.59, 2927.78, 1787.69), (0.7, 0.7, 0.7), 150.588)
if "particle_114 geometry" not in marker_sets:
s=new_marker_set('particle_114 geometry')
marker_sets["particle_114 geometry"]=s
s= marker_sets["particle_114 geometry"]
mark=s.place_marker((2689.29, 2652.68, 2043.18), (0.7, 0.7, 0.7), 103.55)
if "particle_115 geometry" not in marker_sets:
s=new_marker_set('particle_115 geometry')
marker_sets["particle_115 geometry"]=s
s= marker_sets["particle_115 geometry"]
mark=s.place_marker((3164.31, 2344.77, 2096.22), (0.7, 0.7, 0.7), 215.392)
if "particle_116 geometry" not in marker_sets:
s=new_marker_set('particle_116 geometry')
marker_sets["particle_116 geometry"]=s
s= marker_sets["particle_116 geometry"]
mark=s.place_marker((3656.91, 2106.21, 2286.17), (0.7, 0.7, 0.7), 99.9126)
if "particle_117 geometry" not in marker_sets:
s=new_marker_set('particle_117 geometry')
marker_sets["particle_117 geometry"]=s
s= marker_sets["particle_117 geometry"]
mark=s.place_marker((4274.26, 2001.44, 2037.26), (0.7, 0.7, 0.7), 99.7857)
if "particle_118 geometry" not in marker_sets:
s=new_marker_set('particle_118 geometry')
marker_sets["particle_118 geometry"]=s
s= marker_sets["particle_118 geometry"]
mark=s.place_marker((4628.01, 1795.62, 1675.95), (0.7, 0.7, 0.7), 109.98)
if "particle_119 geometry" not in marker_sets:
s=new_marker_set('particle_119 geometry')
marker_sets["particle_119 geometry"]=s
s= marker_sets["particle_119 geometry"]
mark=s.place_marker((4241.73, 2121.71, 1709.88), (0.7, 0.7, 0.7), 102.831)
if "particle_120 geometry" not in marker_sets:
s=new_marker_set('particle_120 geometry')
marker_sets["particle_120 geometry"]=s
s= marker_sets["particle_120 geometry"]
mark=s.place_marker((3829.25, 2163.17, 1728.06), (0.7, 0.7, 0.7), 103.593)
if "particle_121 geometry" not in marker_sets:
s=new_marker_set('particle_121 geometry')
marker_sets["particle_121 geometry"]=s
s= marker_sets["particle_121 geometry"]
mark=s.place_marker((3330.38, 2167.4, 1612.8), (0.7, 0.7, 0.7), 173.472)
if "particle_122 geometry" not in marker_sets:
s=new_marker_set('particle_122 geometry')
marker_sets["particle_122 geometry"]=s
s= marker_sets["particle_122 geometry"]
mark=s.place_marker((2970.62, 2015.38, 1180.48), (0.7, 0.7, 0.7), 113.575)
if "particle_123 geometry" not in marker_sets:
s=new_marker_set('particle_123 geometry')
marker_sets["particle_123 geometry"]=s
s= marker_sets["particle_123 geometry"]
mark=s.place_marker((2470.76, 2039.6, 1077.54), (0.7, 0.7, 0.7), 128.296)
if "particle_124 geometry" not in marker_sets:
s=new_marker_set('particle_124 geometry')
marker_sets["particle_124 geometry"]=s
s= marker_sets["particle_124 geometry"]
mark=s.place_marker((2030.98, 2129.13, 941.108), (0.7, 0.7, 0.7), 145.004)
if "particle_125 geometry" not in marker_sets:
s=new_marker_set('particle_125 geometry')
marker_sets["particle_125 geometry"]=s
s= marker_sets["particle_125 geometry"]
mark=s.place_marker((1530.5, 2316.15, 986.034), (0.7, 0.7, 0.7), 148.261)
if "particle_126 geometry" not in marker_sets:
s=new_marker_set('particle_126 geometry')
marker_sets["particle_126 geometry"]=s
s= marker_sets["particle_126 geometry"]
mark=s.place_marker((959.472, 2372.52, 703.337), (0.7, 0.7, 0.7), 127.704)
if "particle_127 geometry" not in marker_sets:
s=new_marker_set('particle_127 geometry')
marker_sets["particle_127 geometry"]=s
s= marker_sets["particle_127 geometry"]
mark=s.place_marker((484.593, 2221.85, 404.69), (0.7, 0.7, 0.7), 129.607)
if "particle_128 geometry" not in marker_sets:
s=new_marker_set('particle_128 geometry')
marker_sets["particle_128 geometry"]=s
s= marker_sets["particle_128 geometry"]
mark=s.place_marker((869.504, 1898.97, 398.123), (0.7, 0.7, 0.7), 139.759)
if "particle_129 geometry" not in marker_sets:
s=new_marker_set('particle_129 geometry')
marker_sets["particle_129 geometry"]=s
s= marker_sets["particle_129 geometry"]
mark=s.place_marker((1431.34, 1611.31, 648.943), (0.7, 0.7, 0.7), 118.567)
if "particle_130 geometry" not in marker_sets:
s=new_marker_set('particle_130 geometry')
marker_sets["particle_130 geometry"]=s
s= marker_sets["particle_130 geometry"]
mark=s.place_marker((1865.45, 1721.78, 614.522), (0.7, 0.7, 0.7), 136.164)
if "particle_131 geometry" not in marker_sets:
s=new_marker_set('particle_131 geometry')
marker_sets["particle_131 geometry"]=s
s= marker_sets["particle_131 geometry"]
mark=s.place_marker((2243.73, 1911.7, 785.869), (0.7, 0.7, 0.7), 121.655)
if "particle_132 geometry" not in marker_sets:
s=new_marker_set('particle_132 geometry')
marker_sets["particle_132 geometry"]=s
s= marker_sets["particle_132 geometry"]
mark=s.place_marker((2505.24, 2220.99, 916.429), (0.7, 0.7, 0.7), 127.492)
if "particle_133 geometry" not in marker_sets:
s=new_marker_set('particle_133 geometry')
marker_sets["particle_133 geometry"]=s
s= marker_sets["particle_133 geometry"]
mark=s.place_marker((2765.25, 2494.54, 715.646), (0.7, 0.7, 0.7), 138.617)
if "particle_134 geometry" not in marker_sets:
s=new_marker_set('particle_134 geometry')
marker_sets["particle_134 geometry"]=s
s= marker_sets["particle_134 geometry"]
mark=s.place_marker((2935.38, 2767.71, 907.946), (0.7, 0.7, 0.7), 120.766)
if "particle_135 geometry" not in marker_sets:
s=new_marker_set('particle_135 geometry')
marker_sets["particle_135 geometry"]=s
s= marker_sets["particle_135 geometry"]
mark=s.place_marker((3140.72, 2754.71, 1166.5), (0.7, 0.7, 0.7), 145.893)
if "particle_136 geometry" not in marker_sets:
s=new_marker_set('particle_136 geometry')
marker_sets["particle_136 geometry"]=s
s= marker_sets["particle_136 geometry"]
mark=s.place_marker((2989.08, 2450.94, 1502.39), (0.7, 0.7, 0.7), 185.02)
if "particle_137 geometry" not in marker_sets:
s=new_marker_set('particle_137 geometry')
marker_sets["particle_137 geometry"]=s
s= marker_sets["particle_137 geometry"]
mark=s.place_marker((2828.41, 2331.42, 1995.43), (0.7, 0.7, 0.7), 221.314)
if "particle_138 geometry" not in marker_sets:
s=new_marker_set('particle_138 geometry')
marker_sets["particle_138 geometry"]=s
s= marker_sets["particle_138 geometry"]
mark=s.place_marker((2582.83, 2437.03, 2406.55), (0.7, 0.7, 0.7), 165.139)
if "particle_139 geometry" not in marker_sets:
s=new_marker_set('particle_139 geometry')
marker_sets["particle_139 geometry"]=s
s= marker_sets["particle_139 geometry"]
mark=s.place_marker((2460.74, 2316.65, 2368.87), (0.7, 0.7, 0.7), 179.437)
if "particle_140 geometry" not in marker_sets:
s=new_marker_set('particle_140 geometry')
marker_sets["particle_140 geometry"]=s
s= marker_sets["particle_140 geometry"]
mark=s.place_marker((2736.36, 2125.85, 2136.17), (0.7, 0.7, 0.7), 137.898)
if "particle_141 geometry" not in marker_sets:
s=new_marker_set('particle_141 geometry')
marker_sets["particle_141 geometry"]=s
s= marker_sets["particle_141 geometry"]
mark=s.place_marker((2848.9, 2003.19, 1842.44), (0.7, 0.7, 0.7), 124.658)
if "particle_142 geometry" not in marker_sets:
s=new_marker_set('particle_142 geometry')
marker_sets["particle_142 geometry"]=s
s= marker_sets["particle_142 geometry"]
mark=s.place_marker((2868.95, 2201.63, 1567.47), (0.7, 0.7, 0.7), 97.7553)
if "particle_143 geometry" not in marker_sets:
s=new_marker_set('particle_143 geometry')
marker_sets["particle_143 geometry"]=s
s= marker_sets["particle_143 geometry"]
mark=s.place_marker((2969.26, 2330.07, 1309.11), (0.7, 0.7, 0.7), 92.9331)
if "particle_144 geometry" not in marker_sets:
s=new_marker_set('particle_144 geometry')
marker_sets["particle_144 geometry"]=s
s= marker_sets["particle_144 geometry"]
mark=s.place_marker((3138.73, 2344.16, 1006.29), (0.7, 0.7, 0.7), 123.135)
if "particle_145 geometry" not in marker_sets:
s=new_marker_set('particle_145 geometry')
marker_sets["particle_145 geometry"]=s
s= marker_sets["particle_145 geometry"]
mark=s.place_marker((3071.48, 2233.32, 1388.04), (0.7, 0.7, 0.7), 125.716)
if "particle_146 geometry" not in marker_sets:
s=new_marker_set('particle_146 geometry')
marker_sets["particle_146 geometry"]=s
s= marker_sets["particle_146 geometry"]
mark=s.place_marker((2876.51, 2256.54, 1632.11), (0.7, 0.7, 0.7), 127.534)
if "particle_147 geometry" not in marker_sets:
s=new_marker_set('particle_147 geometry')
marker_sets["particle_147 geometry"]=s
s= marker_sets["particle_147 geometry"]
mark=s.place_marker((2617.25, 2234.93, 1509.67), (0.7, 0.7, 0.7), 94.9212)
if "particle_148 geometry" not in marker_sets:
s=new_marker_set('particle_148 geometry')
marker_sets["particle_148 geometry"]=s
s= marker_sets["particle_148 geometry"]
mark=s.place_marker((2411.38, 2220.14, 1890.31), (0.7, 0.7, 0.7), 137.644)
if "particle_149 geometry" not in marker_sets:
s=new_marker_set('particle_149 geometry')
marker_sets["particle_149 geometry"]=s
s= marker_sets["particle_149 geometry"]
mark=s.place_marker((2162.48, 2276.29, 2129.47), (0.7, 0.7, 0.7), 149.277)
if "particle_150 geometry" not in marker_sets:
s=new_marker_set('particle_150 geometry')
marker_sets["particle_150 geometry"]=s
s= marker_sets["particle_150 geometry"]
mark=s.place_marker((2239.16, 2602.14, 2027.01), (0.7, 0.7, 0.7), 103.677)
if "particle_151 geometry" not in marker_sets:
s=new_marker_set('particle_151 geometry')
marker_sets["particle_151 geometry"]=s
s= marker_sets["particle_151 geometry"]
mark=s.place_marker((2278.61, 2940.5, 1688.62), (0.7, 0.7, 0.7), 99.6588)
if "particle_152 geometry" not in marker_sets:
s=new_marker_set('particle_152 geometry')
marker_sets["particle_152 geometry"]=s
s= marker_sets["particle_152 geometry"]
mark=s.place_marker((2290.7, 3203.84, 1427.85), (0.7, 0.7, 0.7), 134.133)
if "particle_153 geometry" not in marker_sets:
s=new_marker_set('particle_153 geometry')
marker_sets["particle_153 geometry"]=s
s= marker_sets["particle_153 geometry"]
mark=s.place_marker((2557.27, 3071.05, 1604.05), (0.7, 0.7, 0.7), 173.007)
if "particle_154 geometry" not in marker_sets:
s=new_marker_set('particle_154 geometry')
marker_sets["particle_154 geometry"]=s
s= marker_sets["particle_154 geometry"]
mark=s.place_marker((2531.55, 2757.62, 2081.55), (0.7, 0.7, 0.7), 141.028)
if "particle_155 geometry" not in marker_sets:
s=new_marker_set('particle_155 geometry')
marker_sets["particle_155 geometry"]=s
s= marker_sets["particle_155 geometry"]
mark=s.place_marker((2518.02, 2426.34, 2411.43), (0.7, 0.7, 0.7), 161.121)
if "particle_156 geometry" not in marker_sets:
s=new_marker_set('particle_156 geometry')
marker_sets["particle_156 geometry"]=s
s= marker_sets["particle_156 geometry"]
mark=s.place_marker((2331.62, 2153.77, 2288.76), (0.7, 0.7, 0.7), 119.582)
if "particle_157 geometry" not in marker_sets:
s=new_marker_set('particle_157 geometry')
marker_sets["particle_157 geometry"]=s
s= marker_sets["particle_157 geometry"]
mark=s.place_marker((2353.63, 2135.4, 1879.24), (0.7, 0.7, 0.7), 137.094)
if "particle_158 geometry" not in marker_sets:
s=new_marker_set('particle_158 geometry')
marker_sets["particle_158 geometry"]=s
s= marker_sets["particle_158 geometry"]
mark=s.place_marker((2596.83, 2052.21, 1446.34), (0.7, 0.7, 0.7), 149.234)
if "particle_159 geometry" not in marker_sets:
s=new_marker_set('particle_159 geometry')
marker_sets["particle_159 geometry"]=s
s= marker_sets["particle_159 geometry"]
mark=s.place_marker((2971.71, 1919.7, 1652.94), (0.7, 0.7, 0.7), 151.011)
if "particle_160 geometry" not in marker_sets:
s=new_marker_set('particle_160 geometry')
marker_sets["particle_160 geometry"]=s
s= marker_sets["particle_160 geometry"]
mark=s.place_marker((3185.44, 1939.92, 2144.87), (0.7, 0.7, 0.7), 184.216)
if "particle_161 geometry" not in marker_sets:
s=new_marker_set('particle_161 geometry')
marker_sets["particle_161 geometry"]=s
s= marker_sets["particle_161 geometry"]
mark=s.place_marker((3325.05, 2323.7, 2239.6), (0.7, 0.7, 0.7), 170.596)
if "particle_162 geometry" not in marker_sets:
s=new_marker_set('particle_162 geometry')
marker_sets["particle_162 geometry"]=s
s= marker_sets["particle_162 geometry"]
mark=s.place_marker((3560.62, 2447.83, 1655.44), (0.7, 0.7, 0.7), 215.603)
if "particle_163 geometry" not in marker_sets:
s=new_marker_set('particle_163 geometry')
marker_sets["particle_163 geometry"]=s
s= marker_sets["particle_163 geometry"]
mark=s.place_marker((3880.92, 2532.4, 822.769), (0.7, 0.7, 0.7), 79.0164)
if "particle_164 geometry" not in marker_sets:
s=new_marker_set('particle_164 geometry')
marker_sets["particle_164 geometry"]=s
s= marker_sets["particle_164 geometry"]
mark=s.place_marker((3781.38, 2841.09, 716.87), (0.7, 0.7, 0.7), 77.2821)
if "particle_165 geometry" not in marker_sets:
s=new_marker_set('particle_165 geometry')
marker_sets["particle_165 geometry"]=s
s= marker_sets["particle_165 geometry"]
mark=s.place_marker((3467.41, 2922.63, 863.638), (0.7, 0.7, 0.7), 188.658)
if "particle_166 geometry" not in marker_sets:
s=new_marker_set('particle_166 geometry')
marker_sets["particle_166 geometry"]=s
s= marker_sets["particle_166 geometry"]
mark=s.place_marker((3534.94, 3185.09, 1011.58), (0.7, 0.7, 0.7), 115.437)
if "particle_167 geometry" not in marker_sets:
s=new_marker_set('particle_167 geometry')
marker_sets["particle_167 geometry"]=s
s= marker_sets["particle_167 geometry"]
mark=s.place_marker((3272.63, 2876.92, 1461.03), (0.7, 0.7, 0.7), 88.4916)
if "particle_168 geometry" not in marker_sets:
s=new_marker_set('particle_168 geometry')
marker_sets["particle_168 geometry"]=s
s= marker_sets["particle_168 geometry"]
mark=s.place_marker((2999.65, 2552.53, 1926.21), (0.7, 0.7, 0.7), 108.88)
if "particle_169 geometry" not in marker_sets:
s=new_marker_set('particle_169 geometry')
marker_sets["particle_169 geometry"]=s
s= marker_sets["particle_169 geometry"]
mark=s.place_marker((2936.54, 2240.37, 2089.69), (0.7, 0.7, 0.7), 172.119)
if "particle_170 geometry" not in marker_sets:
s=new_marker_set('particle_170 geometry')
marker_sets["particle_170 geometry"]=s
s= marker_sets["particle_170 geometry"]
mark=s.place_marker((3173.33, 2299.54, 1673.49), (0.7, 0.7, 0.7), 139.505)
if "particle_171 geometry" not in marker_sets:
s=new_marker_set('particle_171 geometry')
marker_sets["particle_171 geometry"]=s
s= marker_sets["particle_171 geometry"]
mark=s.place_marker((3416.07, 2361.71, 1249.57), (0.7, 0.7, 0.7), 92.7639)
if "particle_172 geometry" not in marker_sets:
s=new_marker_set('particle_172 geometry')
marker_sets["particle_172 geometry"]=s
s= marker_sets["particle_172 geometry"]
mark=s.place_marker((3478.6, 2134.1, 1326.2), (0.7, 0.7, 0.7), 89.8452)
if "particle_173 geometry" not in marker_sets:
s=new_marker_set('particle_173 geometry')
marker_sets["particle_173 geometry"]=s
s= marker_sets["particle_173 geometry"]
mark=s.place_marker((3538.04, 2346.02, 1506.88), (0.7, 0.7, 0.7), 149.446)
if "particle_174 geometry" not in marker_sets:
s=new_marker_set('particle_174 geometry')
marker_sets["particle_174 geometry"]=s
s= marker_sets["particle_174 geometry"]
mark=s.place_marker((3658.73, 2653.38, 1428.39), (0.7, 0.7, 0.7), 126.858)
if "particle_175 geometry" not in marker_sets:
s=new_marker_set('particle_175 geometry')
marker_sets["particle_175 geometry"]=s
s= marker_sets["particle_175 geometry"]
mark=s.place_marker((3652.08, 2577.6, 1100.18), (0.7, 0.7, 0.7), 106.046)
if "particle_176 geometry" not in marker_sets:
s=new_marker_set('particle_176 geometry')
marker_sets["particle_176 geometry"]=s
s= marker_sets["particle_176 geometry"]
mark=s.place_marker((3485.06, 2134.09, 912.541), (0.7, 0.7, 0.7), 156.298)
if "particle_177 geometry" not in marker_sets:
s=new_marker_set('particle_177 geometry')
marker_sets["particle_177 geometry"]=s
s= marker_sets["particle_177 geometry"]
mark=s.place_marker((3196.34, 1666.87, 672.315), (0.7, 0.7, 0.7), 231.212)
if "particle_178 geometry" not in marker_sets:
s=new_marker_set('particle_178 geometry')
marker_sets["particle_178 geometry"]=s
s= marker_sets["particle_178 geometry"]
mark=s.place_marker((3014.49, 1253.51, 995.933), (0.7, 0.7, 0.7), 88.4916)
if "particle_179 geometry" not in marker_sets:
s=new_marker_set('particle_179 geometry')
marker_sets["particle_179 geometry"]=s
s= marker_sets["particle_179 geometry"]
mark=s.place_marker((2965.1, 1251.67, 1479), (0.7, 0.7, 0.7), 111.334)
if "particle_180 geometry" not in marker_sets:
s=new_marker_set('particle_180 geometry')
marker_sets["particle_180 geometry"]=s
s= marker_sets["particle_180 geometry"]
mark=s.place_marker((2928.27, 1611.01, 1970.61), (0.7, 0.7, 0.7), 127.619)
if "particle_181 geometry" not in marker_sets:
s=new_marker_set('particle_181 geometry')
marker_sets["particle_181 geometry"]=s
s= marker_sets["particle_181 geometry"]
mark=s.place_marker((2865.6, 1916.29, 2294.36), (0.7, 0.7, 0.7), 230.746)
if "particle_182 geometry" not in marker_sets:
s=new_marker_set('particle_182 geometry')
marker_sets["particle_182 geometry"]=s
s= marker_sets["particle_182 geometry"]
mark=s.place_marker((2945.95, 1973.35, 1897.24), (0.7, 0.7, 0.7), 124.573)
if "particle_183 geometry" not in marker_sets:
s=new_marker_set('particle_183 geometry')
marker_sets["particle_183 geometry"]=s
s= marker_sets["particle_183 geometry"]
mark=s.place_marker((3218.58, 1936.24, 1328.25), (0.7, 0.7, 0.7), 124.489)
if "particle_184 geometry" not in marker_sets:
s=new_marker_set('particle_184 geometry')
marker_sets["particle_184 geometry"]=s
s= marker_sets["particle_184 geometry"]
mark=s.place_marker((3323.56, 2252.37, 1147.3), (0.7, 0.7, 0.7), 196.61)
if "particle_185 geometry" not in marker_sets:
s=new_marker_set('particle_185 geometry')
marker_sets["particle_185 geometry"]=s
s= marker_sets["particle_185 geometry"]
mark=s.place_marker((3493.86, 2177.55, 1440.18), (0.7, 0.7, 0.7), 134.049)
if "particle_186 geometry" not in marker_sets:
s=new_marker_set('particle_186 geometry')
marker_sets["particle_186 geometry"]=s
s= marker_sets["particle_186 geometry"]
mark=s.place_marker((3734.29, 1980.08, 1528.65), (0.7, 0.7, 0.7), 141.493)
if "particle_187 geometry" not in marker_sets:
s=new_marker_set('particle_187 geometry')
marker_sets["particle_187 geometry"]=s
s= marker_sets["particle_187 geometry"]
mark=s.place_marker((4054.94, 1757.43, 1394.34), (0.7, 0.7, 0.7), 172.203)
if "particle_188 geometry" not in marker_sets:
s=new_marker_set('particle_188 geometry')
marker_sets["particle_188 geometry"]=s
s= marker_sets["particle_188 geometry"]
mark=s.place_marker((3775.5, 2326.24, 1500.32), (0.7, 0.7, 0.7), 271.354)
if "particle_189 geometry" not in marker_sets:
s=new_marker_set('particle_189 geometry')
marker_sets["particle_189 geometry"]=s
s= marker_sets["particle_189 geometry"]
mark=s.place_marker((3354.46, 2599.48, 1402.04), (0.7, 0.7, 0.7), 97.0785)
if "particle_190 geometry" not in marker_sets:
s=new_marker_set('particle_190 geometry')
marker_sets["particle_190 geometry"]=s
s= marker_sets["particle_190 geometry"]
mark=s.place_marker((3009.74, 2654.76, 1179.18), (0.7, 0.7, 0.7), 151.857)
if "particle_191 geometry" not in marker_sets:
s=new_marker_set('particle_191 geometry')
marker_sets["particle_191 geometry"]=s
s= marker_sets["particle_191 geometry"]
mark=s.place_marker((2533.26, 2958.62, 1030.77), (0.7, 0.7, 0.7), 199.233)
if "particle_192 geometry" not in marker_sets:
s=new_marker_set('particle_192 geometry')
marker_sets["particle_192 geometry"]=s
s= marker_sets["particle_192 geometry"]
mark=s.place_marker((2149.57, 3060.81, 1473.31), (0.7, 0.7, 0.7), 118.863)
if "particle_193 geometry" not in marker_sets:
s=new_marker_set('particle_193 geometry')
marker_sets["particle_193 geometry"]=s
s= marker_sets["particle_193 geometry"]
mark=s.place_marker((1696.22, 3094.45, 1488.95), (0.7, 0.7, 0.7), 172.415)
if "particle_194 geometry" not in marker_sets:
s=new_marker_set('particle_194 geometry')
marker_sets["particle_194 geometry"]=s
s= marker_sets["particle_194 geometry"]
mark=s.place_marker((1282.27, 3165.32, 1158.27), (0.7, 0.7, 0.7), 134.26)
if "particle_195 geometry" not in marker_sets:
s=new_marker_set('particle_195 geometry')
marker_sets["particle_195 geometry"]=s
s= marker_sets["particle_195 geometry"]
mark=s.place_marker((781.026, 3386.32, 385.082), (0.7, 0.7, 0.7), 139.548)
if "particle_196 geometry" not in marker_sets:
s=new_marker_set('particle_196 geometry')
marker_sets["particle_196 geometry"]=s
s= marker_sets["particle_196 geometry"]
mark=s.place_marker((937.853, 2897.65, 214.221), (0.7, 0.7, 0.7), 196.526)
if "particle_197 geometry" not in marker_sets:
s=new_marker_set('particle_197 geometry')
marker_sets["particle_197 geometry"]=s
s= marker_sets["particle_197 geometry"]
mark=s.place_marker((1493.67, 2447.37, 520.682), (0.7, 0.7, 0.7), 136.206)
if "particle_198 geometry" not in marker_sets:
s=new_marker_set('particle_198 geometry')
marker_sets["particle_198 geometry"]=s
s= marker_sets["particle_198 geometry"]
mark=s.place_marker((1929.09, 2083.11, 1313.44), (0.7, 0.7, 0.7), 152.322)
if "particle_199 geometry" not in marker_sets:
s=new_marker_set('particle_199 geometry')
marker_sets["particle_199 geometry"]=s
s= marker_sets["particle_199 geometry"]
mark=s.place_marker((2295.97, 2004.84, 1868.25), (0.7, 0.7, 0.7), 126.054)
if "particle_200 geometry" not in marker_sets:
s=new_marker_set('particle_200 geometry')
marker_sets["particle_200 geometry"]=s
s= marker_sets["particle_200 geometry"]
mark=s.place_marker((2716.32, 2004.06, 1707.89), (0.7, 0.7, 0.7), 164.378)
if "particle_201 geometry" not in marker_sets:
s=new_marker_set('particle_201 geometry')
marker_sets["particle_201 geometry"]=s
s= marker_sets["particle_201 geometry"]
mark=s.place_marker((3059.31, 2215.73, 1479.95), (0.7, 0.7, 0.7), 122.205)
if "particle_202 geometry" not in marker_sets:
s=new_marker_set('particle_202 geometry')
marker_sets["particle_202 geometry"]=s
s= marker_sets["particle_202 geometry"]
mark=s.place_marker((3297.18, 2562.96, 1344.03), (0.7, 0.7, 0.7), 134.979)
if "particle_203 geometry" not in marker_sets:
s=new_marker_set('particle_203 geometry')
marker_sets["particle_203 geometry"]=s
s= marker_sets["particle_203 geometry"]
mark=s.place_marker((3125.43, 2799.64, 1574.44), (0.7, 0.7, 0.7), 136.375)
if "particle_204 geometry" not in marker_sets:
s=new_marker_set('particle_204 geometry')
marker_sets["particle_204 geometry"]=s
s= marker_sets["particle_204 geometry"]
mark=s.place_marker((2902.33, 2758.15, 1377.18), (0.7, 0.7, 0.7), 151.688)
if "particle_205 geometry" not in marker_sets:
s=new_marker_set('particle_205 geometry')
marker_sets["particle_205 geometry"]=s
s= marker_sets["particle_205 geometry"]
mark=s.place_marker((2919.72, 2674.17, 1203.58), (0.7, 0.7, 0.7), 116.156)
if "particle_206 geometry" not in marker_sets:
s=new_marker_set('particle_206 geometry')
marker_sets["particle_206 geometry"]=s
s= marker_sets["particle_206 geometry"]
mark=s.place_marker((2723.98, 2514.94, 1868.63), (0.7, 0.7, 0.7), 122.839)
if "particle_207 geometry" not in marker_sets:
s=new_marker_set('particle_207 geometry')
marker_sets["particle_207 geometry"]=s
s= marker_sets["particle_207 geometry"]
mark=s.place_marker((2467, 2219.81, 2182.58), (0.7, 0.7, 0.7), 164.716)
if "particle_208 geometry" not in marker_sets:
s=new_marker_set('particle_208 geometry')
marker_sets["particle_208 geometry"]=s
s= marker_sets["particle_208 geometry"]
mark=s.place_marker((2301.48, 2051.31, 1351.86), (0.7, 0.7, 0.7), 303.672)
if "particle_209 geometry" not in marker_sets:
s=new_marker_set('particle_209 geometry')
marker_sets["particle_209 geometry"]=s
s= marker_sets["particle_209 geometry"]
mark=s.place_marker((2298.69, 2341.1, 356.216), (0.7, 0.7, 0.7), 220.298)
if "particle_210 geometry" not in marker_sets:
s=new_marker_set('particle_210 geometry')
marker_sets["particle_210 geometry"]=s
s= marker_sets["particle_210 geometry"]
mark=s.place_marker((2877.66, 2571.42, 470.118), (0.7, 0.7, 0.7), 175.883)
if "particle_211 geometry" not in marker_sets:
s=new_marker_set('particle_211 geometry')
marker_sets["particle_211 geometry"]=s
s= marker_sets["particle_211 geometry"]
mark=s.place_marker((3496.2, 2397.79, 678.151), (0.7, 0.7, 0.7), 233.581)
if "particle_212 geometry" not in marker_sets:
s=new_marker_set('particle_212 geometry')
marker_sets["particle_212 geometry"]=s
s= marker_sets["particle_212 geometry"]
mark=s.place_marker((3723.05, 1771.95, 1056.03), (0.7, 0.7, 0.7), 231.127)
if "particle_213 geometry" not in marker_sets:
s=new_marker_set('particle_213 geometry')
marker_sets["particle_213 geometry"]=s
s= marker_sets["particle_213 geometry"]
mark=s.place_marker((4216.87, 1680.71, 1398.93), (0.7, 0.7, 0.7), 247.413)
if "particle_214 geometry" not in marker_sets:
s=new_marker_set('particle_214 geometry')
marker_sets["particle_214 geometry"]=s
s= marker_sets["particle_214 geometry"]
mark=s.place_marker((4730.53, 1998.71, 1637.63), (0.7, 0.7, 0.7), 200.206)
if "particle_215 geometry" not in marker_sets:
s=new_marker_set('particle_215 geometry')
marker_sets["particle_215 geometry"]=s
s= marker_sets["particle_215 geometry"]
mark=s.place_marker((4763.48, 2421.92, 1589.43), (0.7, 0.7, 0.7), 150.419)
if "particle_216 geometry" not in marker_sets:
s=new_marker_set('particle_216 geometry')
marker_sets["particle_216 geometry"]=s
s= marker_sets["particle_216 geometry"]
mark=s.place_marker((4332.16, 2246.47, 1989.03), (0.7, 0.7, 0.7), 140.14)
if "particle_217 geometry" not in marker_sets:
s=new_marker_set('particle_217 geometry')
marker_sets["particle_217 geometry"]=s
s= marker_sets["particle_217 geometry"]
mark=s.place_marker((4181.52, 1913.14, 2280.24), (0.7, 0.7, 0.7), 132.949)
if "particle_218 geometry" not in marker_sets:
s=new_marker_set('particle_218 geometry')
marker_sets["particle_218 geometry"]=s
s= marker_sets["particle_218 geometry"]
mark=s.place_marker((3934.47, 1752.68, 2545.23), (0.7, 0.7, 0.7), 141.113)
if "particle_219 geometry" not in marker_sets:
s=new_marker_set('particle_219 geometry')
marker_sets["particle_219 geometry"]=s
s= marker_sets["particle_219 geometry"]
mark=s.place_marker((3801.32, 1507.06, 2343.54), (0.7, 0.7, 0.7), 171.526)
if "particle_220 geometry" not in marker_sets:
s=new_marker_set('particle_220 geometry')
marker_sets["particle_220 geometry"]=s
s= marker_sets["particle_220 geometry"]
mark=s.place_marker((3896.29, 1455.11, 1758.06), (0.7, 0.7, 0.7), 326.937)
if "particle_221 geometry" not in marker_sets:
s=new_marker_set('particle_221 geometry')
marker_sets["particle_221 geometry"]=s
s= marker_sets["particle_221 geometry"]
mark=s.place_marker((3742.58, 1742.51, 1282.77), (0.7, 0.7, 0.7), 92.0871)
if "particle_222 geometry" not in marker_sets:
s=new_marker_set('particle_222 geometry')
marker_sets["particle_222 geometry"]=s
s= marker_sets["particle_222 geometry"]
mark=s.place_marker((3283.6, 1740.46, 1365.67), (0.7, 0.7, 0.7), 210.273)
if "particle_223 geometry" not in marker_sets:
s=new_marker_set('particle_223 geometry')
marker_sets["particle_223 geometry"]=s
s= marker_sets["particle_223 geometry"]
mark=s.place_marker((2975.8, 1748.67, 2051.98), (0.7, 0.7, 0.7), 122.628)
if "particle_224 geometry" not in marker_sets:
s=new_marker_set('particle_224 geometry')
marker_sets["particle_224 geometry"]=s
s= marker_sets["particle_224 geometry"]
mark=s.place_marker((3006.86, 1754.38, 2310.26), (0.7, 0.7, 0.7), 109.176)
if "particle_225 geometry" not in marker_sets:
s=new_marker_set('particle_225 geometry')
marker_sets["particle_225 geometry"]=s
s= marker_sets["particle_225 geometry"]
mark=s.place_marker((3176.68, 1898.47, 2113.09), (0.7, 0.7, 0.7), 142.213)
if "particle_226 geometry" not in marker_sets:
s=new_marker_set('particle_226 geometry')
marker_sets["particle_226 geometry"]=s
s= marker_sets["particle_226 geometry"]
mark=s.place_marker((3000.8, 1777.44, 1767.62), (0.7, 0.7, 0.7), 250.078)
if "particle_227 geometry" not in marker_sets:
s=new_marker_set('particle_227 geometry')
marker_sets["particle_227 geometry"]=s
s= marker_sets["particle_227 geometry"]
mark=s.place_marker((2971.7, 2236.13, 1792.14), (0.7, 0.7, 0.7), 123.558)
if "particle_228 geometry" not in marker_sets:
s=new_marker_set('particle_228 geometry')
marker_sets["particle_228 geometry"]=s
s= marker_sets["particle_228 geometry"]
mark=s.place_marker((2898.36, 2572.17, 2132.41), (0.7, 0.7, 0.7), 235.992)
if "particle_229 geometry" not in marker_sets:
s=new_marker_set('particle_229 geometry')
marker_sets["particle_229 geometry"]=s
s= marker_sets["particle_229 geometry"]
mark=s.place_marker((2796.27, 3007.14, 2345.46), (0.7, 0.7, 0.7), 172.373)
if "particle_230 geometry" not in marker_sets:
s=new_marker_set('particle_230 geometry')
marker_sets["particle_230 geometry"]=s
s= marker_sets["particle_230 geometry"]
mark=s.place_marker((2826.96, 3373.17, 2061.78), (0.7, 0.7, 0.7), 152.322)
if "particle_231 geometry" not in marker_sets:
s=new_marker_set('particle_231 geometry')
marker_sets["particle_231 geometry"]=s
s= marker_sets["particle_231 geometry"]
mark=s.place_marker((2930.8, 3570.36, 1836.33), (0.7, 0.7, 0.7), 196.653)
if "particle_232 geometry" not in marker_sets:
s=new_marker_set('particle_232 geometry')
marker_sets["particle_232 geometry"]=s
s= marker_sets["particle_232 geometry"]
mark=s.place_marker((3045.32, 3570.49, 2173.53), (0.7, 0.7, 0.7), 134.091)
if "particle_233 geometry" not in marker_sets:
s=new_marker_set('particle_233 geometry')
marker_sets["particle_233 geometry"]=s
s= marker_sets["particle_233 geometry"]
mark=s.place_marker((3199.68, 3424.33, 2427.32), (0.7, 0.7, 0.7), 180.325)
if "particle_234 geometry" not in marker_sets:
s=new_marker_set('particle_234 geometry')
marker_sets["particle_234 geometry"]=s
s= marker_sets["particle_234 geometry"]
mark=s.place_marker((2970.51, 3059.75, 2257.22), (0.7, 0.7, 0.7), 218.437)
if "particle_235 geometry" not in marker_sets:
s=new_marker_set('particle_235 geometry')
marker_sets["particle_235 geometry"]=s
s= marker_sets["particle_235 geometry"]
mark=s.place_marker((3020.98, 2787.58, 1880.88), (0.7, 0.7, 0.7), 148.008)
if "particle_236 geometry" not in marker_sets:
s=new_marker_set('particle_236 geometry')
marker_sets["particle_236 geometry"]=s
s= marker_sets["particle_236 geometry"]
mark=s.place_marker((3364.88, 2711.71, 1346.4), (0.7, 0.7, 0.7), 191.873)
if "particle_237 geometry" not in marker_sets:
s=new_marker_set('particle_237 geometry')
marker_sets["particle_237 geometry"]=s
s= marker_sets["particle_237 geometry"]
mark=s.place_marker((3822.66, 2549.99, 1092.24), (0.7, 0.7, 0.7), 138.575)
if "particle_238 geometry" not in marker_sets:
s=new_marker_set('particle_238 geometry')
marker_sets["particle_238 geometry"]=s
s= marker_sets["particle_238 geometry"]
mark=s.place_marker((4149.31, 2814.49, 989.033), (0.7, 0.7, 0.7), 161.205)
if "particle_239 geometry" not in marker_sets:
s=new_marker_set('particle_239 geometry')
marker_sets["particle_239 geometry"]=s
s= marker_sets["particle_239 geometry"]
mark=s.place_marker((3673.32, 2880.53, 943.355), (0.7, 0.7, 0.7), 288.021)
if "particle_240 geometry" not in marker_sets:
s=new_marker_set('particle_240 geometry')
marker_sets["particle_240 geometry"]=s
s= marker_sets["particle_240 geometry"]
mark=s.place_marker((3431.11, 3219.89, 1529.66), (0.7, 0.7, 0.7), 227.405)
if "particle_241 geometry" not in marker_sets:
s=new_marker_set('particle_241 geometry')
marker_sets["particle_241 geometry"]=s
s= marker_sets["particle_241 geometry"]
mark=s.place_marker((3088.21, 3257.75, 1921.1), (0.7, 0.7, 0.7), 126.519)
if "particle_242 geometry" not in marker_sets:
s=new_marker_set('particle_242 geometry')
marker_sets["particle_242 geometry"]=s
s= marker_sets["particle_242 geometry"]
mark=s.place_marker((2998.73, 3422.34, 1661.72), (0.7, 0.7, 0.7), 117.975)
if "particle_243 geometry" not in marker_sets:
s=new_marker_set('particle_243 geometry')
marker_sets["particle_243 geometry"]=s
s= marker_sets["particle_243 geometry"]
mark=s.place_marker((2808.09, 3149.62, 1865.72), (0.7, 0.7, 0.7), 200.883)
if "particle_244 geometry" not in marker_sets:
s=new_marker_set('particle_244 geometry')
marker_sets["particle_244 geometry"]=s
s= marker_sets["particle_244 geometry"]
mark=s.place_marker((2975.55, 3019.67, 2192.24), (0.7, 0.7, 0.7), 158.794)
if "particle_245 geometry" not in marker_sets:
s=new_marker_set('particle_245 geometry')
marker_sets["particle_245 geometry"]=s
s= marker_sets["particle_245 geometry"]
mark=s.place_marker((3215.02, 2963.49, 2401.9), (0.7, 0.7, 0.7), 115.86)
if "particle_246 geometry" not in marker_sets:
s=new_marker_set('particle_246 geometry')
marker_sets["particle_246 geometry"]=s
s= marker_sets["particle_246 geometry"]
mark=s.place_marker((3169.98, 2835.8, 2612.91), (0.7, 0.7, 0.7), 133.034)
if "particle_247 geometry" not in marker_sets:
s=new_marker_set('particle_247 geometry')
marker_sets["particle_247 geometry"]=s
s= marker_sets["particle_247 geometry"]
mark=s.place_marker((2722.59, 3003.57, 2648.25), (0.7, 0.7, 0.7), 314.627)
if "particle_248 geometry" not in marker_sets:
s=new_marker_set('particle_248 geometry')
marker_sets["particle_248 geometry"]=s
s= marker_sets["particle_248 geometry"]
mark=s.place_marker((2783.62, 3094.02, 2304.27), (0.7, 0.7, 0.7), 115.352)
if "particle_249 geometry" not in marker_sets:
s=new_marker_set('particle_249 geometry')
marker_sets["particle_249 geometry"]=s
s= marker_sets["particle_249 geometry"]
mark=s.place_marker((3113.29, 3169.66, 2049.91), (0.7, 0.7, 0.7), 180.621)
if "particle_250 geometry" not in marker_sets:
s=new_marker_set('particle_250 geometry')
marker_sets["particle_250 geometry"]=s
s= marker_sets["particle_250 geometry"]
mark=s.place_marker((3391.93, 3038.84, 2233.46), (0.7, 0.7, 0.7), 126.265)
if "particle_251 geometry" not in marker_sets:
s=new_marker_set('particle_251 geometry')
marker_sets["particle_251 geometry"]=s
s= marker_sets["particle_251 geometry"]
mark=s.place_marker((3440.49, 2732.24, 2461.44), (0.7, 0.7, 0.7), 133.541)
if "particle_252 geometry" not in marker_sets:
s=new_marker_set('particle_252 geometry')
marker_sets["particle_252 geometry"]=s
s= marker_sets["particle_252 geometry"]
mark=s.place_marker((3662.38, 2371.02, 2554.58), (0.7, 0.7, 0.7), 171.019)
if "particle_253 geometry" not in marker_sets:
s=new_marker_set('particle_253 geometry')
marker_sets["particle_253 geometry"]=s
s= marker_sets["particle_253 geometry"]
mark=s.place_marker((3938.4, 2082.89, 2496.42), (0.7, 0.7, 0.7), 115.437)
if "particle_254 geometry" not in marker_sets:
s=new_marker_set('particle_254 geometry')
marker_sets["particle_254 geometry"]=s
s= marker_sets["particle_254 geometry"]
mark=s.place_marker((3912.7, 2402.46, 2534.68), (0.7, 0.7, 0.7), 158.583)
if "particle_255 geometry" not in marker_sets:
s=new_marker_set('particle_255 geometry')
marker_sets["particle_255 geometry"]=s
s= marker_sets["particle_255 geometry"]
mark=s.place_marker((3464.75, 2429.18, 2563.26), (0.7, 0.7, 0.7), 192)
if "particle_256 geometry" not in marker_sets:
s=new_marker_set('particle_256 geometry')
marker_sets["particle_256 geometry"]=s
s= marker_sets["particle_256 geometry"]
mark=s.place_marker((3075.16, 2593.33, 2626.81), (0.7, 0.7, 0.7), 150.165)
if "particle_257 geometry" not in marker_sets:
s=new_marker_set('particle_257 geometry')
marker_sets["particle_257 geometry"]=s
s= marker_sets["particle_257 geometry"]
mark=s.place_marker((3076.92, 2323.96, 2745.34), (0.7, 0.7, 0.7), 157.567)
if "particle_258 geometry" not in marker_sets:
s=new_marker_set('particle_258 geometry')
marker_sets["particle_258 geometry"]=s
s= marker_sets["particle_258 geometry"]
mark=s.place_marker((2918.2, 2377.58, 2678.24), (0.7, 0.7, 0.7), 199.36)
if "particle_259 geometry" not in marker_sets:
s=new_marker_set('particle_259 geometry')
marker_sets["particle_259 geometry"]=s
s= marker_sets["particle_259 geometry"]
mark=s.place_marker((3136.11, 2350.89, 2268.12), (0.7, 0.7, 0.7), 105.369)
if "particle_260 geometry" not in marker_sets:
s=new_marker_set('particle_260 geometry')
marker_sets["particle_260 geometry"]=s
s= marker_sets["particle_260 geometry"]
mark=s.place_marker((3153.13, 2292.29, 2005), (0.7, 0.7, 0.7), 118.651)
if "particle_261 geometry" not in marker_sets:
s=new_marker_set('particle_261 geometry')
marker_sets["particle_261 geometry"]=s
s= marker_sets["particle_261 geometry"]
mark=s.place_marker((2871.74, 2502.6, 2276), (0.7, 0.7, 0.7), 219.664)
if "particle_262 geometry" not in marker_sets:
s=new_marker_set('particle_262 geometry')
marker_sets["particle_262 geometry"]=s
s= marker_sets["particle_262 geometry"]
mark=s.place_marker((2571.88, 2639.38, 2767.58), (0.7, 0.7, 0.7), 196.018)
if "particle_263 geometry" not in marker_sets:
s=new_marker_set('particle_263 geometry')
marker_sets["particle_263 geometry"]=s
s= marker_sets["particle_263 geometry"]
mark=s.place_marker((2246.86, 2729.87, 3139.59), (0.7, 0.7, 0.7), 218.141)
if "particle_264 geometry" not in marker_sets:
s=new_marker_set('particle_264 geometry')
marker_sets["particle_264 geometry"]=s
s= marker_sets["particle_264 geometry"]
mark=s.place_marker((2157.91, 2391.4, 3208.02), (0.7, 0.7, 0.7), 181.636)
if "particle_265 geometry" not in marker_sets:
s=new_marker_set('particle_265 geometry')
marker_sets["particle_265 geometry"]=s
s= marker_sets["particle_265 geometry"]
mark=s.place_marker((2300.59, 2225.88, 3003.24), (0.7, 0.7, 0.7), 195.003)
if "particle_266 geometry" not in marker_sets:
s=new_marker_set('particle_266 geometry')
marker_sets["particle_266 geometry"]=s
s= marker_sets["particle_266 geometry"]
mark=s.place_marker((2252.08, 2360.42, 3217.88), (0.7, 0.7, 0.7), 139.209)
if "particle_267 geometry" not in marker_sets:
s=new_marker_set('particle_267 geometry')
marker_sets["particle_267 geometry"]=s
s= marker_sets["particle_267 geometry"]
mark=s.place_marker((2305.72, 2331.02, 3276.56), (0.7, 0.7, 0.7), 189.885)
if "particle_268 geometry" not in marker_sets:
s=new_marker_set('particle_268 geometry')
marker_sets["particle_268 geometry"]=s
s= marker_sets["particle_268 geometry"]
mark=s.place_marker((2620.23, 2442.54, 3219.9), (0.7, 0.7, 0.7), 267.674)
if "particle_269 geometry" not in marker_sets:
s=new_marker_set('particle_269 geometry')
marker_sets["particle_269 geometry"]=s
s= marker_sets["particle_269 geometry"]
mark=s.place_marker((3051.83, 2800.47, 3140.89), (0.7, 0.7, 0.7), 196.568)
if "particle_270 geometry" not in marker_sets:
s=new_marker_set('particle_270 geometry')
marker_sets["particle_270 geometry"]=s
s= marker_sets["particle_270 geometry"]
mark=s.place_marker((3103.4, 2629.83, 3328.81), (0.7, 0.7, 0.7), 192.423)
if "particle_271 geometry" not in marker_sets:
s=new_marker_set('particle_271 geometry')
marker_sets["particle_271 geometry"]=s
s= marker_sets["particle_271 geometry"]
mark=s.place_marker((2863.7, 2433.16, 3602.32), (1, 0.7, 0), 202.405)
if "particle_272 geometry" not in marker_sets:
s=new_marker_set('particle_272 geometry')
marker_sets["particle_272 geometry"]=s
s= marker_sets["particle_272 geometry"]
mark=s.place_marker((3440.86, 2870.87, 3113), (0.7, 0.7, 0.7), 135.529)
if "particle_273 geometry" not in marker_sets:
s=new_marker_set('particle_273 geometry')
marker_sets["particle_273 geometry"]=s
s= marker_sets["particle_273 geometry"]
mark=s.place_marker((4208.85, 3249.99, 2597.08), (0.7, 0.7, 0.7), 114.21)
if "particle_274 geometry" not in marker_sets:
s=new_marker_set('particle_274 geometry')
marker_sets["particle_274 geometry"]=s
s= marker_sets["particle_274 geometry"]
mark=s.place_marker((4155.16, 3080.58, 2332.58), (0.7, 0.7, 0.7), 159.133)
if "particle_275 geometry" not in marker_sets:
s=new_marker_set('particle_275 geometry')
marker_sets["particle_275 geometry"]=s
s= marker_sets["particle_275 geometry"]
mark=s.place_marker((3772.32, 3058.89, 2225.04), (0.7, 0.7, 0.7), 144.412)
if "particle_276 geometry" not in marker_sets:
s=new_marker_set('particle_276 geometry')
marker_sets["particle_276 geometry"]=s
s= marker_sets["particle_276 geometry"]
mark=s.place_marker((3472.03, 3095.16, 2156.8), (0.7, 0.7, 0.7), 70.8525)
if "particle_277 geometry" not in marker_sets:
s=new_marker_set('particle_277 geometry')
marker_sets["particle_277 geometry"]=s
s= marker_sets["particle_277 geometry"]
mark=s.place_marker((3029.6, 2806.38, 2499.76), (0.7, 0.7, 0.7), 141.874)
if "particle_278 geometry" not in marker_sets:
s=new_marker_set('particle_278 geometry')
marker_sets["particle_278 geometry"]=s
s= marker_sets["particle_278 geometry"]
mark=s.place_marker((2646.61, 2540.48, 2900.66), (0.7, 0.7, 0.7), 217.337)
if "particle_279 geometry" not in marker_sets:
s=new_marker_set('particle_279 geometry')
marker_sets["particle_279 geometry"]=s
s= marker_sets["particle_279 geometry"]
mark=s.place_marker((2616.56, 2581.3, 2927.63), (0.7, 0.7, 0.7), 237.641)
if "particle_280 geometry" not in marker_sets:
s=new_marker_set('particle_280 geometry')
marker_sets["particle_280 geometry"]=s
s= marker_sets["particle_280 geometry"]
mark=s.place_marker((2722.82, 2932.38, 2640.89), (0.7, 0.7, 0.7), 229.393)
if "particle_281 geometry" not in marker_sets:
s=new_marker_set('particle_281 geometry')
marker_sets["particle_281 geometry"]=s
s= marker_sets["particle_281 geometry"]
mark=s.place_marker((2303.2, 3188.31, 2991.9), (0.7, 0.7, 0.7), 349.906)
if "particle_282 geometry" not in marker_sets:
s=new_marker_set('particle_282 geometry')
marker_sets["particle_282 geometry"]=s
s= marker_sets["particle_282 geometry"]
mark=s.place_marker((2149.55, 3291.66, 3536.9), (0.7, 0.7, 0.7), 162.347)
if "particle_283 geometry" not in marker_sets:
s=new_marker_set('particle_283 geometry')
marker_sets["particle_283 geometry"]=s
s= marker_sets["particle_283 geometry"]
mark=s.place_marker((2118.51, 3395.24, 3653.41), (0.7, 0.7, 0.7), 194.072)
if "particle_284 geometry" not in marker_sets:
s=new_marker_set('particle_284 geometry')
marker_sets["particle_284 geometry"]=s
s= marker_sets["particle_284 geometry"]
mark=s.place_marker((2078.49, 3557.3, 3599.23), (0.7, 0.7, 0.7), 242.21)
if "particle_285 geometry" not in marker_sets:
s=new_marker_set('particle_285 geometry')
marker_sets["particle_285 geometry"]=s
s= marker_sets["particle_285 geometry"]
mark=s.place_marker((2415.93, 3965.49, 3459.09), (0.7, 0.7, 0.7), 320.93)
if "particle_286 geometry" not in marker_sets:
s=new_marker_set('particle_286 geometry')
marker_sets["particle_286 geometry"]=s
s= marker_sets["particle_286 geometry"]
mark=s.place_marker((2468.91, 4503.22, 3642.16), (0.7, 0.7, 0.7), 226.432)
if "particle_287 geometry" not in marker_sets:
s=new_marker_set('particle_287 geometry')
marker_sets["particle_287 geometry"]=s
s= marker_sets["particle_287 geometry"]
mark=s.place_marker((2145.92, 4353.44, 3738.97), (0.7, 0.7, 0.7), 125.208)
if "particle_288 geometry" not in marker_sets:
s=new_marker_set('particle_288 geometry')
marker_sets["particle_288 geometry"]=s
s= marker_sets["particle_288 geometry"]
mark=s.place_marker((1631.72, 4003.97, 3507.32), (0.7, 0.7, 0.7), 197.837)
if "particle_289 geometry" not in marker_sets:
s=new_marker_set('particle_289 geometry')
marker_sets["particle_289 geometry"]=s
s= marker_sets["particle_289 geometry"]
mark=s.place_marker((1098.92, 4324.77, 3256.78), (0.7, 0.7, 0.7), 167.804)
if "particle_290 geometry" not in marker_sets:
s=new_marker_set('particle_290 geometry')
marker_sets["particle_290 geometry"]=s
s= marker_sets["particle_290 geometry"]
mark=s.place_marker((598.426, 4963.47, 3041.97), (0.7, 0.7, 0.7), 136.84)
if "particle_291 geometry" not in marker_sets:
s=new_marker_set('particle_291 geometry')
marker_sets["particle_291 geometry"]=s
s= marker_sets["particle_291 geometry"]
mark=s.place_marker((898.048, 5210.92, 2917.17), (0.7, 0.7, 0.7), 85.7421)
if "particle_292 geometry" not in marker_sets:
s=new_marker_set('particle_292 geometry')
marker_sets["particle_292 geometry"]=s
s= marker_sets["particle_292 geometry"]
mark=s.place_marker((1844.45, 4361.64, 3565.87), (1, 0.7, 0), 256)
if "particle_293 geometry" not in marker_sets:
s=new_marker_set('particle_293 geometry')
marker_sets["particle_293 geometry"]=s
s= marker_sets["particle_293 geometry"]
mark=s.place_marker((752.755, 4439.4, 3296.84), (0.7, 0.7, 0.7), 138.702)
if "particle_294 geometry" not in marker_sets:
s=new_marker_set('particle_294 geometry')
marker_sets["particle_294 geometry"]=s
s= marker_sets["particle_294 geometry"]
mark=s.place_marker((281.41, 4385.89, 3317.26), (0.7, 0.7, 0.7), 140.732)
if "particle_295 geometry" not in marker_sets:
s=new_marker_set('particle_295 geometry')
marker_sets["particle_295 geometry"]=s
s= marker_sets["particle_295 geometry"]
mark=s.place_marker((512.714, 4519.33, 3471.83), (0.7, 0.7, 0.7), 81.3006)
if "particle_296 geometry" not in marker_sets:
s=new_marker_set('particle_296 geometry')
marker_sets["particle_296 geometry"]=s
s= marker_sets["particle_296 geometry"]
mark=s.place_marker((577.524, 4931.2, 3609.62), (0.7, 0.7, 0.7), 133.837)
if "particle_297 geometry" not in marker_sets:
s=new_marker_set('particle_297 geometry')
marker_sets["particle_297 geometry"]=s
s= marker_sets["particle_297 geometry"]
mark=s.place_marker((1114.85, 4623.51, 3663.55), (0.7, 0.7, 0.7), 98.3475)
if "particle_298 geometry" not in marker_sets:
s=new_marker_set('particle_298 geometry')
marker_sets["particle_298 geometry"]=s
s= marker_sets["particle_298 geometry"]
mark=s.place_marker((1577.99, 3943.4, 3732.21), (0.7, 0.7, 0.7), 297.623)
if "particle_299 geometry" not in marker_sets:
s=new_marker_set('particle_299 geometry')
marker_sets["particle_299 geometry"]=s
s= marker_sets["particle_299 geometry"]
mark=s.place_marker((1975.5, 3770.56, 3697.65), (0.7, 0.7, 0.7), 212.938)
if "particle_300 geometry" not in marker_sets:
s=new_marker_set('particle_300 geometry')
marker_sets["particle_300 geometry"]=s
s= marker_sets["particle_300 geometry"]
mark=s.place_marker((1975.93, 3774.38, 3921.71), (0.7, 0.7, 0.7), 154.183)
if "particle_301 geometry" not in marker_sets:
s=new_marker_set('particle_301 geometry')
marker_sets["particle_301 geometry"]=s
s= marker_sets["particle_301 geometry"]
mark=s.place_marker((2289.6, 4063.99, 3982.24), (0.7, 0.7, 0.7), 180.832)
if "particle_302 geometry" not in marker_sets:
s=new_marker_set('particle_302 geometry')
marker_sets["particle_302 geometry"]=s
s= marker_sets["particle_302 geometry"]
mark=s.place_marker((2531.83, 4316.2, 3769.61), (0.7, 0.7, 0.7), 122.332)
if "particle_303 geometry" not in marker_sets:
s=new_marker_set('particle_303 geometry')
marker_sets["particle_303 geometry"]=s
s= marker_sets["particle_303 geometry"]
mark=s.place_marker((2649.7, 4514.86, 3450.58), (0.7, 0.7, 0.7), 209.047)
if "particle_304 geometry" not in marker_sets:
s=new_marker_set('particle_304 geometry')
marker_sets["particle_304 geometry"]=s
s= marker_sets["particle_304 geometry"]
mark=s.place_marker((2935.29, 4524.03, 3750.99), (0.7, 0.7, 0.7), 126.985)
if "particle_305 geometry" not in marker_sets:
s=new_marker_set('particle_305 geometry')
marker_sets["particle_305 geometry"]=s
s= marker_sets["particle_305 geometry"]
mark=s.place_marker((2994.63, 4863.42, 4055.02), (0.7, 0.7, 0.7), 122.205)
if "particle_306 geometry" not in marker_sets:
s=new_marker_set('particle_306 geometry')
marker_sets["particle_306 geometry"]=s
s= marker_sets["particle_306 geometry"]
mark=s.place_marker((2801.18, 4987.47, 4283.26), (0.7, 0.7, 0.7), 107.95)
if "particle_307 geometry" not in marker_sets:
s=new_marker_set('particle_307 geometry')
marker_sets["particle_307 geometry"]=s
s= marker_sets["particle_307 geometry"]
mark=s.place_marker((2608.05, 4431.17, 4172.92), (0.7, 0.7, 0.7), 182.567)
if "particle_308 geometry" not in marker_sets:
s=new_marker_set('particle_308 geometry')
marker_sets["particle_308 geometry"]=s
s= marker_sets["particle_308 geometry"]
mark=s.place_marker((2329.55, 3942.93, 3888.66), (0.7, 0.7, 0.7), 185.274)
if "particle_309 geometry" not in marker_sets:
s=new_marker_set('particle_309 geometry')
marker_sets["particle_309 geometry"]=s
s= marker_sets["particle_309 geometry"]
mark=s.place_marker((2225.53, 3755.19, 3464.52), (0.7, 0.7, 0.7), 413.567)
if "particle_310 geometry" not in marker_sets:
s=new_marker_set('particle_310 geometry')
marker_sets["particle_310 geometry"]=s
s= marker_sets["particle_310 geometry"]
mark=s.place_marker((2128.52, 3531.22, 3540.05), (0.7, 0.7, 0.7), 240.01)
if "particle_311 geometry" not in marker_sets:
s=new_marker_set('particle_311 geometry')
marker_sets["particle_311 geometry"]=s
s= marker_sets["particle_311 geometry"]
mark=s.place_marker((2159.75, 3564.91, 3518.14), (0.7, 0.7, 0.7), 238.995)
if "particle_312 geometry" not in marker_sets:
s=new_marker_set('particle_312 geometry')
marker_sets["particle_312 geometry"]=s
s= marker_sets["particle_312 geometry"]
mark=s.place_marker((2212.97, 3656.2, 3759.93), (0.7, 0.7, 0.7), 203.674)
if "particle_313 geometry" not in marker_sets:
s=new_marker_set('particle_313 geometry')
marker_sets["particle_313 geometry"]=s
s= marker_sets["particle_313 geometry"]
mark=s.place_marker((2693.45, 3730.82, 4119.61), (0.7, 0.7, 0.7), 266.744)
if "particle_314 geometry" not in marker_sets:
s=new_marker_set('particle_314 geometry')
marker_sets["particle_314 geometry"]=s
s= marker_sets["particle_314 geometry"]
mark=s.place_marker((2454.5, 3769.27, 4394.6), (0.7, 0.7, 0.7), 147.585)
if "particle_315 geometry" not in marker_sets:
s=new_marker_set('particle_315 geometry')
marker_sets["particle_315 geometry"]=s
s= marker_sets["particle_315 geometry"]
mark=s.place_marker((2261.68, 3641.29, 4222.09), (0.7, 0.7, 0.7), 249.485)
if "particle_316 geometry" not in marker_sets:
s=new_marker_set('particle_316 geometry')
marker_sets["particle_316 geometry"]=s
s= marker_sets["particle_316 geometry"]
mark=s.place_marker((2531.93, 3623.31, 3889.17), (0.7, 0.7, 0.7), 119.371)
if "particle_317 geometry" not in marker_sets:
s=new_marker_set('particle_317 geometry')
marker_sets["particle_317 geometry"]=s
s= marker_sets["particle_317 geometry"]
mark=s.place_marker((2864.34, 4048.7, 3443.91), (0.7, 0.7, 0.7), 155.875)
if "particle_318 geometry" not in marker_sets:
s=new_marker_set('particle_318 geometry')
marker_sets["particle_318 geometry"]=s
s= marker_sets["particle_318 geometry"]
mark=s.place_marker((2741.07, 4616.88, 2946.99), (0.7, 0.7, 0.7), 189.419)
if "particle_319 geometry" not in marker_sets:
s=new_marker_set('particle_319 geometry')
marker_sets["particle_319 geometry"]=s
s= marker_sets["particle_319 geometry"]
mark=s.place_marker((2292.06, 4571.08, 2629.29), (0.7, 0.7, 0.7), 137.475)
if "particle_320 geometry" not in marker_sets:
s=new_marker_set('particle_320 geometry')
marker_sets["particle_320 geometry"]=s
s= marker_sets["particle_320 geometry"]
mark=s.place_marker((1945.73, 4276.25, 2447.77), (0.7, 0.7, 0.7), 176.179)
if "particle_321 geometry" not in marker_sets:
s=new_marker_set('particle_321 geometry')
marker_sets["particle_321 geometry"]=s
s= marker_sets["particle_321 geometry"]
mark=s.place_marker((1552.33, 4162.78, 2253.37), (0.7, 0.7, 0.7), 138.829)
if "particle_322 geometry" not in marker_sets:
s=new_marker_set('particle_322 geometry')
marker_sets["particle_322 geometry"]=s
s= marker_sets["particle_322 geometry"]
mark=s.place_marker((1158.51, 4197.33, 2146.8), (0.7, 0.7, 0.7), 148.727)
if "particle_323 geometry" not in marker_sets:
s=new_marker_set('particle_323 geometry')
marker_sets["particle_323 geometry"]=s
s= marker_sets["particle_323 geometry"]
mark=s.place_marker((728.237, 4458.05, 2032.22), (0.7, 0.7, 0.7), 230.323)
if "particle_324 geometry" not in marker_sets:
s=new_marker_set('particle_324 geometry')
marker_sets["particle_324 geometry"]=s
s= marker_sets["particle_324 geometry"]
mark=s.place_marker((1259.09, 4495.03, 2355.26), (0.7, 0.7, 0.7), 175.376)
if "particle_325 geometry" not in marker_sets:
s=new_marker_set('particle_325 geometry')
marker_sets["particle_325 geometry"]=s
s= marker_sets["particle_325 geometry"]
mark=s.place_marker((1631.13, 4380.1, 2598.39), (0.7, 0.7, 0.7), 161.163)
if "particle_326 geometry" not in marker_sets:
s=new_marker_set('particle_326 geometry')
marker_sets["particle_326 geometry"]=s
s= marker_sets["particle_326 geometry"]
mark=s.place_marker((1901.54, 4416.02, 2148.42), (0.7, 0.7, 0.7), 125.885)
if "particle_327 geometry" not in marker_sets:
s=new_marker_set('particle_327 geometry')
marker_sets["particle_327 geometry"]=s
s= marker_sets["particle_327 geometry"]
mark=s.place_marker((2180.69, 4605.78, 1851.72), (0.7, 0.7, 0.7), 206.635)
if "particle_328 geometry" not in marker_sets:
s=new_marker_set('particle_328 geometry')
marker_sets["particle_328 geometry"]=s
s= marker_sets["particle_328 geometry"]
mark=s.place_marker((2015, 4165.67, 1844.76), (0.7, 0.7, 0.7), 151.392)
if "particle_329 geometry" not in marker_sets:
s=new_marker_set('particle_329 geometry')
marker_sets["particle_329 geometry"]=s
s= marker_sets["particle_329 geometry"]
mark=s.place_marker((1786.01, 3858.84, 1896.94), (0.7, 0.7, 0.7), 173.388)
if "particle_330 geometry" not in marker_sets:
s=new_marker_set('particle_330 geometry')
marker_sets["particle_330 geometry"]=s
s= marker_sets["particle_330 geometry"]
mark=s.place_marker((1446.11, 3929.26, 1895.94), (0.7, 0.7, 0.7), 135.825)
if "particle_331 geometry" not in marker_sets:
s=new_marker_set('particle_331 geometry')
marker_sets["particle_331 geometry"]=s
s= marker_sets["particle_331 geometry"]
mark=s.place_marker((1121.39, 4214.59, 1863.99), (0.7, 0.7, 0.7), 186.839)
if "particle_332 geometry" not in marker_sets:
s=new_marker_set('particle_332 geometry')
marker_sets["particle_332 geometry"]=s
s= marker_sets["particle_332 geometry"]
mark=s.place_marker((821.357, 4576.84, 1819.65), (0.7, 0.7, 0.7), 121.189)
if "particle_333 geometry" not in marker_sets:
s=new_marker_set('particle_333 geometry')
marker_sets["particle_333 geometry"]=s
s= marker_sets["particle_333 geometry"]
mark=s.place_marker((1196.45, 4499.97, 2014.37), (0.7, 0.7, 0.7), 102.916)
if "particle_334 geometry" not in marker_sets:
s=new_marker_set('particle_334 geometry')
marker_sets["particle_334 geometry"]=s
s= marker_sets["particle_334 geometry"]
mark=s.place_marker((1670.08, 4337.76, 2398.16), (0.7, 0.7, 0.7), 212.769)
if "particle_335 geometry" not in marker_sets:
s=new_marker_set('particle_335 geometry')
marker_sets["particle_335 geometry"]=s
s= marker_sets["particle_335 geometry"]
mark=s.place_marker((2119.87, 3919.85, 2695.47), (0.7, 0.7, 0.7), 173.092)
if "particle_336 geometry" not in marker_sets:
s=new_marker_set('particle_336 geometry')
marker_sets["particle_336 geometry"]=s
s= marker_sets["particle_336 geometry"]
mark=s.place_marker((2526.68, 3838.02, 2975.67), (0.7, 0.7, 0.7), 264.502)
if "particle_337 geometry" not in marker_sets:
s=new_marker_set('particle_337 geometry')
marker_sets["particle_337 geometry"]=s
s= marker_sets["particle_337 geometry"]
mark=s.place_marker((2894.37, 4173.53, 3203.84), (0.7, 0.7, 0.7), 208.666)
if "particle_338 geometry" not in marker_sets:
s=new_marker_set('particle_338 geometry')
marker_sets["particle_338 geometry"]=s
s= marker_sets["particle_338 geometry"]
mark=s.place_marker((3006.23, 4494.09, 3551.76), (0.7, 0.7, 0.7), 186.797)
if "particle_339 geometry" not in marker_sets:
s=new_marker_set('particle_339 geometry')
marker_sets["particle_339 geometry"]=s
s= marker_sets["particle_339 geometry"]
mark=s.place_marker((3003.06, 4285.97, 4006.02), (0.7, 0.7, 0.7), 255.534)
if "particle_340 geometry" not in marker_sets:
s=new_marker_set('particle_340 geometry')
marker_sets["particle_340 geometry"]=s
s= marker_sets["particle_340 geometry"]
mark=s.place_marker((2727.69, 4419.16, 4302.98), (0.7, 0.7, 0.7), 153.126)
if "particle_341 geometry" not in marker_sets:
s=new_marker_set('particle_341 geometry')
marker_sets["particle_341 geometry"]=s
s= marker_sets["particle_341 geometry"]
mark=s.place_marker((2983.43, 4712.12, 4279.86), (0.7, 0.7, 0.7), 165.816)
if "particle_342 geometry" not in marker_sets:
s=new_marker_set('particle_342 geometry')
marker_sets["particle_342 geometry"]=s
s= marker_sets["particle_342 geometry"]
mark=s.place_marker((3194.44, 4414.74, 4145.4), (0.7, 0.7, 0.7), 134.429)
if "particle_343 geometry" not in marker_sets:
s=new_marker_set('particle_343 geometry')
marker_sets["particle_343 geometry"]=s
s= marker_sets["particle_343 geometry"]
mark=s.place_marker((3166.01, 4282.65, 3780.63), (0.7, 0.7, 0.7), 178.971)
if "particle_344 geometry" not in marker_sets:
s=new_marker_set('particle_344 geometry')
marker_sets["particle_344 geometry"]=s
s= marker_sets["particle_344 geometry"]
mark=s.place_marker((2926.65, 4507.61, 3395), (0.7, 0.7, 0.7), 189.969)
if "particle_345 geometry" not in marker_sets:
s=new_marker_set('particle_345 geometry')
marker_sets["particle_345 geometry"]=s
s= marker_sets["particle_345 geometry"]
mark=s.place_marker((3011.47, 5054.81, 3123.81), (0.7, 0.7, 0.7), 121.359)
if "particle_346 geometry" not in marker_sets:
s=new_marker_set('particle_346 geometry')
marker_sets["particle_346 geometry"]=s
s= marker_sets["particle_346 geometry"]
mark=s.place_marker((2716.68, 5132.84, 2682.56), (0.7, 0.7, 0.7), 187.262)
if "particle_347 geometry" not in marker_sets:
s=new_marker_set('particle_347 geometry')
marker_sets["particle_347 geometry"]=s
s= marker_sets["particle_347 geometry"]
mark=s.place_marker((2359.96, 4739.37, 2311.69), (0.7, 0.7, 0.7), 164.335)
if "particle_348 geometry" not in marker_sets:
s=new_marker_set('particle_348 geometry')
marker_sets["particle_348 geometry"]=s
s= marker_sets["particle_348 geometry"]
mark=s.place_marker((1854.19, 4615.83, 2332.71), (0.7, 0.7, 0.7), 138.363)
if "particle_349 geometry" not in marker_sets:
s=new_marker_set('particle_349 geometry')
marker_sets["particle_349 geometry"]=s
s= marker_sets["particle_349 geometry"]
mark=s.place_marker((1484.97, 4609.36, 2213.84), (0.7, 0.7, 0.7), 138.49)
if "particle_350 geometry" not in marker_sets:
s=new_marker_set('particle_350 geometry')
marker_sets["particle_350 geometry"]=s
s= marker_sets["particle_350 geometry"]
mark=s.place_marker((1578.84, 4436.38, 1926.47), (0.7, 0.7, 0.7), 116.325)
if "particle_351 geometry" not in marker_sets:
s=new_marker_set('particle_351 geometry')
marker_sets["particle_351 geometry"]=s
s= marker_sets["particle_351 geometry"]
mark=s.place_marker((2001.52, 4371.31, 2056.68), (0.7, 0.7, 0.7), 106.511)
if "particle_352 geometry" not in marker_sets:
s=new_marker_set('particle_352 geometry')
marker_sets["particle_352 geometry"]=s
s= marker_sets["particle_352 geometry"]
mark=s.place_marker((2361.33, 4392.28, 2468.04), (0.7, 0.7, 0.7), 151.096)
if "particle_353 geometry" not in marker_sets:
s=new_marker_set('particle_353 geometry')
marker_sets["particle_353 geometry"]=s
s= marker_sets["particle_353 geometry"]
mark=s.place_marker((2722.37, 4616.8, 2989.84), (0.7, 0.7, 0.7), 240.856)
if "particle_354 geometry" not in marker_sets:
s=new_marker_set('particle_354 geometry')
marker_sets["particle_354 geometry"]=s
s= marker_sets["particle_354 geometry"]
mark=s.place_marker((2904.08, 4829.92, 3416.77), (0.7, 0.7, 0.7), 149.7)
if "particle_355 geometry" not in marker_sets:
s=new_marker_set('particle_355 geometry')
marker_sets["particle_355 geometry"]=s
s= marker_sets["particle_355 geometry"]
mark=s.place_marker((3110.2, 4694.09, 3633.37), (0.7, 0.7, 0.7), 165.943)
if "particle_356 geometry" not in marker_sets:
s=new_marker_set('particle_356 geometry')
marker_sets["particle_356 geometry"]=s
s= marker_sets["particle_356 geometry"]
mark=s.place_marker((2940.46, 4118.6, 3738.15), (0.7, 0.7, 0.7), 178.971)
if "particle_357 geometry" not in marker_sets:
s=new_marker_set('particle_357 geometry')
marker_sets["particle_357 geometry"]=s
s= marker_sets["particle_357 geometry"]
mark=s.place_marker((2998.9, 3405.03, 3968.56), (0.7, 0.7, 0.7), 154.945)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
|
ZenithDK/mopidy | refs/heads/develop | tests/local/test_library.py | 12 | from __future__ import absolute_import, unicode_literals
import os
import shutil
import tempfile
import unittest
import mock
import pykka
from mopidy import core, exceptions
from mopidy.local import actor, json
from mopidy.models import Album, Artist, Image, Track
from tests import path_to_data_dir
# TODO: update tests to only use backend, not core. we need a seperate
# core test that does this integration test.
class LocalLibraryProviderTest(unittest.TestCase):
artists = [
Artist(name='artist1'),
Artist(name='artist2'),
Artist(name='artist3'),
Artist(name='artist4'),
Artist(name='artist5'),
Artist(name='artist6'),
Artist(),
]
albums = [
Album(name='album1', artists=[artists[0]]),
Album(name='album2', artists=[artists[1]]),
Album(name='album3', artists=[artists[2]]),
Album(name='album4'),
Album(artists=[artists[-1]]),
]
tracks = [
Track(
uri='local:track:path1', name='track1',
artists=[artists[0]], album=albums[0],
date='2001-02-03', length=4000, track_no=1),
Track(
uri='local:track:path2', name='track2',
artists=[artists[1]], album=albums[1],
date='2002', length=4000, track_no=2),
Track(
uri='local:track:path3', name='track3',
artists=[artists[3]], album=albums[2],
date='2003', length=4000, track_no=3),
Track(
uri='local:track:path4', name='track4',
artists=[artists[2]], album=albums[3],
date='2004', length=60000, track_no=4,
comment='This is a fantastic track'),
Track(
uri='local:track:path5', name='track5', genre='genre1',
album=albums[3], length=4000, composers=[artists[4]]),
Track(
uri='local:track:path6', name='track6', genre='genre2',
album=albums[3], length=4000, performers=[artists[5]]),
Track(uri='local:track:nameless', album=albums[-1]),
]
config = {
'core': {
'data_dir': path_to_data_dir(''),
},
'local': {
'media_dir': path_to_data_dir(''),
'library': 'json',
},
}
def setUp(self): # noqa: N802
actor.LocalBackend.libraries = [json.JsonLibrary]
self.backend = actor.LocalBackend.start(
config=self.config, audio=None).proxy()
self.core = core.Core(backends=[self.backend])
self.library = self.core.library
def tearDown(self): # noqa: N802
pykka.ActorRegistry.stop_all()
actor.LocalBackend.libraries = []
def find_exact(self, **query):
# TODO: remove this helper?
return self.library.search(query=query, exact=True)
def search(self, **query):
# TODO: remove this helper?
return self.library.search(query=query)
def test_refresh(self):
self.library.refresh()
@unittest.SkipTest
def test_refresh_uri(self):
pass
def test_refresh_missing_uri(self):
# Verifies that https://github.com/mopidy/mopidy/issues/500
# has been fixed.
tmpdir = tempfile.mkdtemp()
try:
tmpdir_local = os.path.join(tmpdir, 'local')
shutil.copytree(path_to_data_dir('local'), tmpdir_local)
config = {
'core': {
'data_dir': tmpdir,
},
'local': self.config['local'],
}
backend = actor.LocalBackend(config=config, audio=None)
# Sanity check that value is in the library
result = backend.library.lookup(self.tracks[0].uri)
self.assertEqual(result, self.tracks[0:1])
# Clear and refresh.
tmplib = os.path.join(tmpdir_local, 'library.json.gz')
open(tmplib, 'w').close()
backend.library.refresh()
# Now it should be gone.
result = backend.library.lookup(self.tracks[0].uri)
self.assertEqual(result, [])
finally:
shutil.rmtree(tmpdir)
@unittest.SkipTest
def test_browse(self):
pass # TODO
def test_lookup(self):
uri = self.tracks[0].uri
result = self.library.lookup(uris=[uri])
self.assertEqual(result[uri], self.tracks[0:1])
def test_lookup_unknown_track(self):
tracks = self.library.lookup(uris=['fake:/uri'])
self.assertEqual(tracks, {'fake:/uri': []})
# test backward compatibility with local libraries returning a
# single Track
@mock.patch.object(json.JsonLibrary, 'lookup')
def test_lookup_return_single_track(self, mock_lookup):
backend = actor.LocalBackend(config=self.config, audio=None)
mock_lookup.return_value = self.tracks[0]
tracks = backend.library.lookup(self.tracks[0].uri)
mock_lookup.assert_called_with(self.tracks[0].uri)
self.assertEqual(tracks, self.tracks[0:1])
mock_lookup.return_value = None
tracks = backend.library.lookup('fake uri')
mock_lookup.assert_called_with('fake uri')
self.assertEqual(tracks, [])
# TODO: move to search_test module
def test_find_exact_no_hits(self):
result = self.find_exact(track_name=['unknown track'])
self.assertEqual(list(result[0].tracks), [])
result = self.find_exact(artist=['unknown artist'])
self.assertEqual(list(result[0].tracks), [])
result = self.find_exact(albumartist=['unknown albumartist'])
self.assertEqual(list(result[0].tracks), [])
result = self.find_exact(composer=['unknown composer'])
self.assertEqual(list(result[0].tracks), [])
result = self.find_exact(performer=['unknown performer'])
self.assertEqual(list(result[0].tracks), [])
result = self.find_exact(album=['unknown album'])
self.assertEqual(list(result[0].tracks), [])
result = self.find_exact(date=['1990'])
self.assertEqual(list(result[0].tracks), [])
result = self.find_exact(genre=['unknown genre'])
self.assertEqual(list(result[0].tracks), [])
result = self.find_exact(track_no=['9'])
self.assertEqual(list(result[0].tracks), [])
result = self.find_exact(track_no=['no_match'])
self.assertEqual(list(result[0].tracks), [])
result = self.find_exact(comment=['fake comment'])
self.assertEqual(list(result[0].tracks), [])
result = self.find_exact(uri=['fake uri'])
self.assertEqual(list(result[0].tracks), [])
result = self.find_exact(any=['unknown any'])
self.assertEqual(list(result[0].tracks), [])
def test_find_exact_uri(self):
track_1_uri = 'local:track:path1'
result = self.find_exact(uri=track_1_uri)
self.assertEqual(list(result[0].tracks), self.tracks[:1])
track_2_uri = 'local:track:path2'
result = self.find_exact(uri=track_2_uri)
self.assertEqual(list(result[0].tracks), self.tracks[1:2])
def test_find_exact_track_name(self):
result = self.find_exact(track_name=['track1'])
self.assertEqual(list(result[0].tracks), self.tracks[:1])
result = self.find_exact(track_name=['track2'])
self.assertEqual(list(result[0].tracks), self.tracks[1:2])
def test_find_exact_artist(self):
result = self.find_exact(artist=['artist1'])
self.assertEqual(list(result[0].tracks), self.tracks[:1])
result = self.find_exact(artist=['artist2'])
self.assertEqual(list(result[0].tracks), self.tracks[1:2])
result = self.find_exact(artist=['artist3'])
self.assertEqual(list(result[0].tracks), self.tracks[3:4])
def test_find_exact_composer(self):
result = self.find_exact(composer=['artist5'])
self.assertEqual(list(result[0].tracks), self.tracks[4:5])
result = self.find_exact(composer=['artist6'])
self.assertEqual(list(result[0].tracks), [])
def test_find_exact_performer(self):
result = self.find_exact(performer=['artist6'])
self.assertEqual(list(result[0].tracks), self.tracks[5:6])
result = self.find_exact(performer=['artist5'])
self.assertEqual(list(result[0].tracks), [])
def test_find_exact_album(self):
result = self.find_exact(album=['album1'])
self.assertEqual(list(result[0].tracks), self.tracks[:1])
result = self.find_exact(album=['album2'])
self.assertEqual(list(result[0].tracks), self.tracks[1:2])
def test_find_exact_albumartist(self):
# Artist is both track artist and album artist
result = self.find_exact(albumartist=['artist1'])
self.assertEqual(list(result[0].tracks), [self.tracks[0]])
# Artist is both track and album artist
result = self.find_exact(albumartist=['artist2'])
self.assertEqual(list(result[0].tracks), [self.tracks[1]])
# Artist is just album artist
result = self.find_exact(albumartist=['artist3'])
self.assertEqual(list(result[0].tracks), [self.tracks[2]])
def test_find_exact_track_no(self):
result = self.find_exact(track_no=['1'])
self.assertEqual(list(result[0].tracks), self.tracks[:1])
result = self.find_exact(track_no=['2'])
self.assertEqual(list(result[0].tracks), self.tracks[1:2])
def test_find_exact_genre(self):
result = self.find_exact(genre=['genre1'])
self.assertEqual(list(result[0].tracks), self.tracks[4:5])
result = self.find_exact(genre=['genre2'])
self.assertEqual(list(result[0].tracks), self.tracks[5:6])
def test_find_exact_date(self):
result = self.find_exact(date=['2001'])
self.assertEqual(list(result[0].tracks), [])
result = self.find_exact(date=['2001-02-03'])
self.assertEqual(list(result[0].tracks), self.tracks[:1])
result = self.find_exact(date=['2002'])
self.assertEqual(list(result[0].tracks), self.tracks[1:2])
def test_find_exact_comment(self):
result = self.find_exact(
comment=['This is a fantastic track'])
self.assertEqual(list(result[0].tracks), self.tracks[3:4])
result = self.find_exact(
comment=['This is a fantastic'])
self.assertEqual(list(result[0].tracks), [])
def test_find_exact_any(self):
# Matches on track artist
result = self.find_exact(any=['artist1'])
self.assertEqual(list(result[0].tracks), self.tracks[:1])
result = self.find_exact(any=['artist2'])
self.assertEqual(list(result[0].tracks), self.tracks[1:2])
# Matches on track name
result = self.find_exact(any=['track1'])
self.assertEqual(list(result[0].tracks), self.tracks[:1])
result = self.find_exact(any=['track2'])
self.assertEqual(list(result[0].tracks), self.tracks[1:2])
# Matches on track album
result = self.find_exact(any=['album1'])
self.assertEqual(list(result[0].tracks), self.tracks[:1])
# Matches on track album artists
result = self.find_exact(any=['artist3'])
self.assertEqual(len(result[0].tracks), 2)
self.assertIn(self.tracks[2], result[0].tracks)
self.assertIn(self.tracks[3], result[0].tracks)
# Matches on track composer
result = self.find_exact(any=['artist5'])
self.assertEqual(list(result[0].tracks), self.tracks[4:5])
# Matches on track performer
result = self.find_exact(any=['artist6'])
self.assertEqual(list(result[0].tracks), self.tracks[5:6])
# Matches on track genre
result = self.find_exact(any=['genre1'])
self.assertEqual(list(result[0].tracks), self.tracks[4:5])
result = self.find_exact(any=['genre2'])
self.assertEqual(list(result[0].tracks), self.tracks[5:6])
# Matches on track date
result = self.find_exact(any=['2002'])
self.assertEqual(list(result[0].tracks), self.tracks[1:2])
# Matches on track comment
result = self.find_exact(
any=['This is a fantastic track'])
self.assertEqual(list(result[0].tracks), self.tracks[3:4])
# Matches on URI
result = self.find_exact(any=['local:track:path1'])
self.assertEqual(list(result[0].tracks), self.tracks[:1])
# TODO: This is really just a test of the query validation code now,
# as this code path never even makes it to the local backend.
def test_find_exact_wrong_type(self):
with self.assertRaises(exceptions.ValidationError):
self.find_exact(wrong=['test'])
def test_find_exact_with_empty_query(self):
with self.assertRaises(exceptions.ValidationError):
self.find_exact(artist=[''])
with self.assertRaises(exceptions.ValidationError):
self.find_exact(albumartist=[''])
with self.assertRaises(exceptions.ValidationError):
self.find_exact(track_name=[''])
with self.assertRaises(exceptions.ValidationError):
self.find_exact(composer=[''])
with self.assertRaises(exceptions.ValidationError):
self.find_exact(performer=[''])
with self.assertRaises(exceptions.ValidationError):
self.find_exact(album=[''])
with self.assertRaises(exceptions.ValidationError):
self.find_exact(track_no=[''])
with self.assertRaises(exceptions.ValidationError):
self.find_exact(genre=[''])
with self.assertRaises(exceptions.ValidationError):
self.find_exact(date=[''])
with self.assertRaises(exceptions.ValidationError):
self.find_exact(comment=[''])
with self.assertRaises(exceptions.ValidationError):
self.find_exact(any=[''])
def test_search_no_hits(self):
result = self.search(track_name=['unknown track'])
self.assertEqual(list(result[0].tracks), [])
result = self.search(artist=['unknown artist'])
self.assertEqual(list(result[0].tracks), [])
result = self.search(albumartist=['unknown albumartist'])
self.assertEqual(list(result[0].tracks), [])
result = self.search(composer=['unknown composer'])
self.assertEqual(list(result[0].tracks), [])
result = self.search(performer=['unknown performer'])
self.assertEqual(list(result[0].tracks), [])
result = self.search(album=['unknown album'])
self.assertEqual(list(result[0].tracks), [])
result = self.search(track_no=['9'])
self.assertEqual(list(result[0].tracks), [])
result = self.search(track_no=['no_match'])
self.assertEqual(list(result[0].tracks), [])
result = self.search(genre=['unknown genre'])
self.assertEqual(list(result[0].tracks), [])
result = self.search(date=['unknown date'])
self.assertEqual(list(result[0].tracks), [])
result = self.search(comment=['unknown comment'])
self.assertEqual(list(result[0].tracks), [])
result = self.search(uri=['unknown uri'])
self.assertEqual(list(result[0].tracks), [])
result = self.search(any=['unknown anything'])
self.assertEqual(list(result[0].tracks), [])
def test_search_uri(self):
result = self.search(uri=['TH1'])
self.assertEqual(list(result[0].tracks), self.tracks[:1])
result = self.search(uri=['TH2'])
self.assertEqual(list(result[0].tracks), self.tracks[1:2])
def test_search_track_name(self):
result = self.search(track_name=['Rack1'])
self.assertEqual(list(result[0].tracks), self.tracks[:1])
result = self.search(track_name=['Rack2'])
self.assertEqual(list(result[0].tracks), self.tracks[1:2])
def test_search_artist(self):
result = self.search(artist=['Tist1'])
self.assertEqual(list(result[0].tracks), self.tracks[:1])
result = self.search(artist=['Tist2'])
self.assertEqual(list(result[0].tracks), self.tracks[1:2])
def test_search_albumartist(self):
# Artist is both track artist and album artist
result = self.search(albumartist=['Tist1'])
self.assertEqual(list(result[0].tracks), [self.tracks[0]])
# Artist is both track artist and album artist
result = self.search(albumartist=['Tist2'])
self.assertEqual(list(result[0].tracks), [self.tracks[1]])
# Artist is just album artist
result = self.search(albumartist=['Tist3'])
self.assertEqual(list(result[0].tracks), [self.tracks[2]])
def test_search_composer(self):
result = self.search(composer=['Tist5'])
self.assertEqual(list(result[0].tracks), self.tracks[4:5])
def test_search_performer(self):
result = self.search(performer=['Tist6'])
self.assertEqual(list(result[0].tracks), self.tracks[5:6])
def test_search_album(self):
result = self.search(album=['Bum1'])
self.assertEqual(list(result[0].tracks), self.tracks[:1])
result = self.search(album=['Bum2'])
self.assertEqual(list(result[0].tracks), self.tracks[1:2])
def test_search_genre(self):
result = self.search(genre=['Enre1'])
self.assertEqual(list(result[0].tracks), self.tracks[4:5])
result = self.search(genre=['Enre2'])
self.assertEqual(list(result[0].tracks), self.tracks[5:6])
def test_search_date(self):
result = self.search(date=['2001'])
self.assertEqual(list(result[0].tracks), self.tracks[:1])
result = self.search(date=['2001-02-03'])
self.assertEqual(list(result[0].tracks), self.tracks[:1])
result = self.search(date=['2001-02-04'])
self.assertEqual(list(result[0].tracks), [])
result = self.search(date=['2002'])
self.assertEqual(list(result[0].tracks), self.tracks[1:2])
def test_search_track_no(self):
result = self.search(track_no=['1'])
self.assertEqual(list(result[0].tracks), self.tracks[:1])
result = self.search(track_no=['2'])
self.assertEqual(list(result[0].tracks), self.tracks[1:2])
def test_search_comment(self):
result = self.search(comment=['fantastic'])
self.assertEqual(list(result[0].tracks), self.tracks[3:4])
result = self.search(comment=['antasti'])
self.assertEqual(list(result[0].tracks), self.tracks[3:4])
def test_search_any(self):
# Matches on track artist
result = self.search(any=['Tist1'])
self.assertEqual(list(result[0].tracks), self.tracks[:1])
# Matches on track composer
result = self.search(any=['Tist5'])
self.assertEqual(list(result[0].tracks), self.tracks[4:5])
# Matches on track performer
result = self.search(any=['Tist6'])
self.assertEqual(list(result[0].tracks), self.tracks[5:6])
# Matches on track
result = self.search(any=['Rack1'])
self.assertEqual(list(result[0].tracks), self.tracks[:1])
result = self.search(any=['Rack2'])
self.assertEqual(list(result[0].tracks), self.tracks[1:2])
# Matches on track album
result = self.search(any=['Bum1'])
self.assertEqual(list(result[0].tracks), self.tracks[:1])
# Matches on track album artists
result = self.search(any=['Tist3'])
self.assertEqual(len(result[0].tracks), 2)
self.assertIn(self.tracks[2], result[0].tracks)
self.assertIn(self.tracks[3], result[0].tracks)
# Matches on track genre
result = self.search(any=['Enre1'])
self.assertEqual(list(result[0].tracks), self.tracks[4:5])
result = self.search(any=['Enre2'])
self.assertEqual(list(result[0].tracks), self.tracks[5:6])
# Matches on track comment
result = self.search(any=['fanta'])
self.assertEqual(list(result[0].tracks), self.tracks[3:4])
result = self.search(any=['is a fan'])
self.assertEqual(list(result[0].tracks), self.tracks[3:4])
# Matches on URI
result = self.search(any=['TH1'])
self.assertEqual(list(result[0].tracks), self.tracks[:1])
def test_search_wrong_type(self):
with self.assertRaises(exceptions.ValidationError):
self.search(wrong=['test'])
def test_search_with_empty_query(self):
with self.assertRaises(exceptions.ValidationError):
self.search(artist=[''])
with self.assertRaises(exceptions.ValidationError):
self.search(albumartist=[''])
with self.assertRaises(exceptions.ValidationError):
self.search(composer=[''])
with self.assertRaises(exceptions.ValidationError):
self.search(performer=[''])
with self.assertRaises(exceptions.ValidationError):
self.search(track_name=[''])
with self.assertRaises(exceptions.ValidationError):
self.search(album=[''])
with self.assertRaises(exceptions.ValidationError):
self.search(genre=[''])
with self.assertRaises(exceptions.ValidationError):
self.search(date=[''])
with self.assertRaises(exceptions.ValidationError):
self.search(comment=[''])
with self.assertRaises(exceptions.ValidationError):
self.search(uri=[''])
with self.assertRaises(exceptions.ValidationError):
self.search(any=[''])
def test_default_get_images_impl_no_images(self):
result = self.library.get_images([track.uri for track in self.tracks])
self.assertEqual(result, {track.uri: tuple() for track in self.tracks})
@mock.patch.object(json.JsonLibrary, 'lookup')
def test_default_get_images_impl_album_images(self, mock_lookup):
library = actor.LocalBackend(config=self.config, audio=None).library
image = Image(uri='imageuri')
album = Album(images=[image.uri])
track = Track(uri='trackuri', album=album)
mock_lookup.return_value = [track]
result = library.get_images([track.uri])
self.assertEqual(result, {track.uri: [image]})
@mock.patch.object(json.JsonLibrary, 'lookup')
def test_default_get_images_impl_single_track(self, mock_lookup):
library = actor.LocalBackend(config=self.config, audio=None).library
image = Image(uri='imageuri')
album = Album(images=[image.uri])
track = Track(uri='trackuri', album=album)
mock_lookup.return_value = track
result = library.get_images([track.uri])
self.assertEqual(result, {track.uri: [image]})
@mock.patch.object(json.JsonLibrary, 'get_images')
def test_local_library_get_images(self, mock_get_images):
library = actor.LocalBackend(config=self.config, audio=None).library
image = Image(uri='imageuri')
track = Track(uri='trackuri')
mock_get_images.return_value = {track.uri: [image]}
result = library.get_images([track.uri])
self.assertEqual(result, {track.uri: [image]})
|
zrzka/blackmamba | refs/heads/master | blackmamba/lib/docutils/languages/zh_cn.py | 52 | # -*- coding: utf-8 -*-
# $Id: zh_cn.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Pan Junyong <panjy@zopechina.com>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Simplified Chinese language mappings for language-dependent features
of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
# fixed: language-dependent
'author': '作者',
'authors': '作者群',
'organization': '组织',
'address': '地址',
'contact': '联系',
'version': '版本',
'revision': '修订',
'status': '状态',
'date': '日期',
'copyright': '版权',
'dedication': '献辞',
'abstract': '摘要',
'attention': '注意',
'caution': '小心',
'danger': '危险',
'error': '错误',
'hint': '提示',
'important': '重要',
'note': '注解',
'tip': '技巧',
'warning': '警告',
'contents': '目录',
}
"""Mapping of node class name to label text."""
bibliographic_fields = {
# language-dependent: fixed
'作者': 'author',
'作者群': 'authors',
'组织': 'organization',
'地址': 'address',
'联系': 'contact',
'版本': 'version',
'修订': 'revision',
'状态': 'status',
'时间': 'date',
'版权': 'copyright',
'献辞': 'dedication',
'摘要': 'abstract'}
"""Simplified Chinese to canonical name mapping for bibliographic fields."""
author_separators = [';', ',',
'\uff1b', # ';'
'\uff0c', # ','
'\u3001', # '、'
]
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
|
caiocsalvador/whats_the_craic | refs/heads/master | lib/python3.4/site-packages/django/contrib/auth/migrations/0007_alter_validators_add_error_messages.py | 369 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auth', '0006_require_contenttypes_0002'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(
error_messages={'unique': 'A user with that username already exists.'},
help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.',
max_length=30,
unique=True,
validators=[
django.core.validators.RegexValidator(
'^[\\w.@+-]+$', 'Enter a valid username. '
'This value may contain only letters, numbers and @/./+/-/_ characters.'
),
],
verbose_name='username',
),
),
]
|
tysonclugg/django | refs/heads/master | tests/sitemaps_tests/test_http.py | 34 | import os
from datetime import date
from unittest import skipUnless
from django.apps import apps
from django.conf import settings
from django.contrib.sitemaps import Sitemap
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured
from django.test import modify_settings, override_settings
from django.utils.formats import localize
from django.utils.translation import activate, deactivate
from .base import SitemapTestsBase
from .models import TestModel
class HTTPSitemapTests(SitemapTestsBase):
use_sitemap_err_msg = (
'To use sitemaps, either enable the sites framework or pass a '
'Site/RequestSite object in your view.'
)
def test_simple_sitemap_index(self):
"A simple sitemap index can be rendered"
response = self.client.get('/simple/index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode(), expected_content)
def test_sitemap_not_callable(self):
"""A sitemap may not be callable."""
response = self.client.get('/simple-not-callable/index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode(), expected_content)
def test_paged_sitemap(self):
"""A sitemap may have multiple pages."""
response = self.client.get('/simple-paged/index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>{0}/simple/sitemap-simple.xml</loc></sitemap><sitemap><loc>{0}/simple/sitemap-simple.xml?p=2</loc></sitemap>
</sitemapindex>
""".format(self.base_url)
self.assertXMLEqual(response.content.decode(), expected_content)
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(os.path.dirname(__file__), 'templates')],
}])
def test_simple_sitemap_custom_index(self):
"A simple sitemap index can be rendered with a custom template"
response = self.client.get('/simple/custom-index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<!-- This is a customised template -->
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode(), expected_content)
def test_simple_sitemap_section(self):
"A simple sitemap section can be rendered"
response = self.client.get('/simple/sitemap-simple.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today())
self.assertXMLEqual(response.content.decode(), expected_content)
def test_no_section(self):
response = self.client.get('/simple/sitemap-simple2.xml')
self.assertEqual(str(response.context['exception']), "No sitemap available for section: 'simple2'")
self.assertEqual(response.status_code, 404)
def test_empty_page(self):
response = self.client.get('/simple/sitemap-simple.xml?p=0')
self.assertEqual(str(response.context['exception']), 'Page 0 empty')
self.assertEqual(response.status_code, 404)
def test_page_not_int(self):
response = self.client.get('/simple/sitemap-simple.xml?p=test')
self.assertEqual(str(response.context['exception']), "No page 'test'")
self.assertEqual(response.status_code, 404)
def test_simple_sitemap(self):
"A simple sitemap can be rendered"
response = self.client.get('/simple/sitemap.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today())
self.assertXMLEqual(response.content.decode(), expected_content)
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(os.path.dirname(__file__), 'templates')],
}])
def test_simple_custom_sitemap(self):
"A simple sitemap can be rendered with a custom template"
response = self.client.get('/simple/custom-sitemap.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<!-- This is a customised template -->
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today())
self.assertXMLEqual(response.content.decode(), expected_content)
def test_sitemap_last_modified(self):
"Last-Modified header is set correctly"
response = self.client.get('/lastmod/sitemap.xml')
self.assertEqual(response['Last-Modified'], 'Wed, 13 Mar 2013 10:00:00 GMT')
def test_sitemap_last_modified_date(self):
"""
The Last-Modified header should be support dates (without time).
"""
response = self.client.get('/lastmod/date-sitemap.xml')
self.assertEqual(response['Last-Modified'], 'Wed, 13 Mar 2013 00:00:00 GMT')
def test_sitemap_last_modified_tz(self):
"""
The Last-Modified header should be converted from timezone aware dates
to GMT.
"""
response = self.client.get('/lastmod/tz-sitemap.xml')
self.assertEqual(response['Last-Modified'], 'Wed, 13 Mar 2013 15:00:00 GMT')
def test_sitemap_last_modified_missing(self):
"Last-Modified header is missing when sitemap has no lastmod"
response = self.client.get('/generic/sitemap.xml')
self.assertFalse(response.has_header('Last-Modified'))
def test_sitemap_last_modified_mixed(self):
"Last-Modified header is omitted when lastmod not on all items"
response = self.client.get('/lastmod-mixed/sitemap.xml')
self.assertFalse(response.has_header('Last-Modified'))
def test_sitemaps_lastmod_mixed_ascending_last_modified_missing(self):
"""
The Last-Modified header is omitted when lastmod isn't found in all
sitemaps. Test sitemaps are sorted by lastmod in ascending order.
"""
response = self.client.get('/lastmod-sitemaps/mixed-ascending.xml')
self.assertFalse(response.has_header('Last-Modified'))
def test_sitemaps_lastmod_mixed_descending_last_modified_missing(self):
"""
The Last-Modified header is omitted when lastmod isn't found in all
sitemaps. Test sitemaps are sorted by lastmod in descending order.
"""
response = self.client.get('/lastmod-sitemaps/mixed-descending.xml')
self.assertFalse(response.has_header('Last-Modified'))
def test_sitemaps_lastmod_ascending(self):
"""
The Last-Modified header is set to the most recent sitemap lastmod.
Test sitemaps are sorted by lastmod in ascending order.
"""
response = self.client.get('/lastmod-sitemaps/ascending.xml')
self.assertEqual(response['Last-Modified'], 'Sat, 20 Apr 2013 05:00:00 GMT')
def test_sitemaps_lastmod_descending(self):
"""
The Last-Modified header is set to the most recent sitemap lastmod.
Test sitemaps are sorted by lastmod in descending order.
"""
response = self.client.get('/lastmod-sitemaps/descending.xml')
self.assertEqual(response['Last-Modified'], 'Sat, 20 Apr 2013 05:00:00 GMT')
@skipUnless(settings.USE_I18N, "Internationalization is not enabled")
@override_settings(USE_L10N=True)
def test_localized_priority(self):
"The priority value should not be localized (Refs #14164)"
activate('fr')
self.assertEqual('0,3', localize(0.3))
# Priorities haven't been rendered in localized format.
response = self.client.get('/simple/sitemap.xml')
self.assertContains(response, '<priority>0.5</priority>')
self.assertContains(response, '<lastmod>%s</lastmod>' % date.today())
deactivate()
@modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'})
def test_requestsite_sitemap(self):
# Hitting the flatpages sitemap without the sites framework installed
# doesn't raise an exception.
response = self.client.get('/simple/sitemap.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>http://testserver/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % date.today()
self.assertXMLEqual(response.content.decode(), expected_content)
@skipUnless(apps.is_installed('django.contrib.sites'),
"django.contrib.sites app not installed.")
def test_sitemap_get_urls_no_site_1(self):
"""
Check we get ImproperlyConfigured if we don't pass a site object to
Sitemap.get_urls and no Site objects exist
"""
Site.objects.all().delete()
with self.assertRaisesMessage(ImproperlyConfigured, self.use_sitemap_err_msg):
Sitemap().get_urls()
@modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'})
def test_sitemap_get_urls_no_site_2(self):
"""
Check we get ImproperlyConfigured when we don't pass a site object to
Sitemap.get_urls if Site objects exists, but the sites framework is not
actually installed.
"""
with self.assertRaisesMessage(ImproperlyConfigured, self.use_sitemap_err_msg):
Sitemap().get_urls()
def test_sitemap_item(self):
"""
Check to make sure that the raw item is included with each
Sitemap.get_url() url result.
"""
test_sitemap = Sitemap()
test_sitemap.items = TestModel.objects.order_by('pk').all
def is_testmodel(url):
return isinstance(url['item'], TestModel)
item_in_url_info = all(map(is_testmodel, test_sitemap.get_urls()))
self.assertTrue(item_in_url_info)
def test_cached_sitemap_index(self):
"""
A cached sitemap index can be rendered (#2713).
"""
response = self.client.get('/cached/index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/cached/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode(), expected_content)
def test_x_robots_sitemap(self):
response = self.client.get('/simple/index.xml')
self.assertEqual(response['X-Robots-Tag'], 'noindex, noodp, noarchive')
response = self.client.get('/simple/sitemap.xml')
self.assertEqual(response['X-Robots-Tag'], 'noindex, noodp, noarchive')
def test_empty_sitemap(self):
response = self.client.get('/empty/sitemap.xml')
self.assertEqual(response.status_code, 200)
@override_settings(LANGUAGES=(('en', 'English'), ('pt', 'Portuguese')))
def test_simple_i18nsitemap_index(self):
"A simple i18n sitemap index can be rendered"
response = self.client.get('/simple/i18n.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>{0}/en/i18n/testmodel/{1}/</loc><changefreq>never</changefreq><priority>0.5</priority></url><url><loc>{0}/pt/i18n/testmodel/{1}/</loc><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""".format(self.base_url, self.i18n_model.pk)
self.assertXMLEqual(response.content.decode(), expected_content)
def test_sitemap_without_entries(self):
response = self.client.get('/sitemap-without-entries/sitemap.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
</urlset>"""
self.assertXMLEqual(response.content.decode(), expected_content)
|
jniznan/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers | refs/heads/master | Chapter2_MorePyMC/separation_plot.py | 86 | # separation plot
# Author: Cameron Davidson-Pilon,2013
# see http://mdwardlab.com/sites/default/files/GreenhillWardSacks.pdf
import matplotlib.pyplot as plt
import numpy as np
def separation_plot( p, y, **kwargs ):
"""
This function creates a separation plot for logistic and probit classification.
See http://mdwardlab.com/sites/default/files/GreenhillWardSacks.pdf
p: The proportions/probabilities, can be a nxM matrix which represents M models.
y: the 0-1 response variables.
"""
assert p.shape[0] == y.shape[0], "p.shape[0] != y.shape[0]"
n = p.shape[0]
try:
M = p.shape[1]
except:
p = p.reshape( n, 1 )
M = p.shape[1]
#colors = np.array( ["#fdf2db", "#e44a32"] )
colors_bmh = np.array( ["#eeeeee", "#348ABD"] )
fig = plt.figure( )#figsize = (8, 1.3*M) )
for i in range(M):
ax = fig.add_subplot(M, 1, i+1)
ix = np.argsort( p[:,i] )
#plot the different bars
bars = ax.bar( np.arange(n), np.ones(n), width=1.,
color = colors_bmh[ y[ix].astype(int) ],
edgecolor = 'none')
ax.plot( np.arange(n+1), np.append(p[ix,i], p[ix,i][-1]), "k",
linewidth = 1.,drawstyle="steps-post" )
#create expected value bar.
ax.vlines( [(1-p[ix,i]).sum()], [0], [1] )
#ax.grid(False)
#ax.axis('off')
plt.xlim( 0, n)
plt.tight_layout()
return
|
moxon6/chemlab | refs/heads/master | build/lib.win32-3.4/chemlab/contrib/gromacs.py | 5 | '''A set of utilities to interact with gromacs'''
# Need to add a parser to insert this contrib script
# $ chemlab gromacs energy
# it should show a little interface to view the energy
# Let's launch the program and determine what happens
from chemlab.io import datafile
from pylab import *
from chemlab.molsim.analysis import rdf
import difflib
import sys, re
import numpy as np
def setup_commands(subparsers):
groparser = subparsers.add_parser("gromacs")
subparsers2 = groparser.add_subparsers()
eparser = subparsers2.add_parser("energy")
eparser.add_argument('filenames', metavar='filenames', type=str, nargs='+')
eparser.add_argument('-e', metavar='energies', type=str, nargs='+',
help='Properties to display in the energy viewer.')
eparser.add_argument('-o', help='Do not display GUI and save the plot')
eparser.set_defaults(func=lambda args: energy(args, args.o))
rdfparser = subparsers2.add_parser("rdf")
rdfparser.add_argument('selection', metavar='selection', type=str)
rdfparser.add_argument('filename', metavar='filename', type=str)
rdfparser.add_argument('trajectory', metavar='trajectory', type=str)
rdfparser.add_argument('-t', metavar='t', type=str)
rdfparser.set_defaults(func=rdffunc)
def energy(args, output=None):
ens = args.e
fns = args.filenames
datafiles = [datafile(fn) for fn in fns]
quants = datafiles[0].read('avail quantities')
for i,e in enumerate(ens):
if e not in quants:
match = difflib.get_close_matches(e, quants)
print('Quantity %s not present, taking close match: %s'
% (e, match[0]))
ens[i] = match[0]
toplot = []
for df in datafiles:
for e in ens:
plotargs = {}
plotargs['points'] = df.read('quantity', e)
plotargs['filename'] = df.fd.name
plotargs['quantity'] = e
toplot.append(plotargs)
plots = []
legends = []
for arg in toplot:
p, = plot(arg['points'][0], arg['points'][1])
plots.append(p)
legends.append(arg['filename'])
xlabel('Time(ps)')
ylabel(ens[0])
ticklabel_format(style='sci', axis='y', scilimits=(0,0))
grid()
figlegend(plots, legends, 'upper right')
show()
def get_rdf(arguments):
return rdf(arguments[0], arguments[1], periodic=arguments[2])
def rdffunc(args):
import multiprocessing
type_a, type_b = args.selection.split('-')
syst = datafile(args.filename).read("system")
sel_a = syst.type_array == type_a
sel_b = syst.type_array == type_b
df = datafile(args.trajectory)
t, coords = df.read("trajectory")
boxes = df.read("boxes")
times = [int(tim) for tim in args.t.split(',')]
ind = np.searchsorted(t, times)
arguments = ((coords[i][sel_a], coords[i][sel_b], boxes[i]) for i in ind)
rds = map(get_rdf, arguments)
for rd in rds:
plot(rd[0], rd[1])
ticklabel_format(style='sci', axis='y', scilimits=(0,0))
xlabel('Time(ps)')
ylabel(args.selection)
grid()
show()
if __name__ == '__main__':
main(['pressure'])
|
msebire/intellij-community | refs/heads/master | python/testData/intentions/addMissingParamsInGoogleDocStringNoParamSection.py | 53 | def <caret>f(x, y, z):
"""
""" |
mcltn/ansible-modules-extras | refs/heads/devel | cloud/centurylink/clc_firewall_policy.py | 40 | #!/usr/bin/python
#
# Copyright (c) 2015 CenturyLink
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>
#
DOCUMENTATION = '''
module: clc_firewall_policy
short_description: Create/delete/update firewall policies
description:
- Create or delete or update firewall polices on Centurylink Cloud
version_added: "2.0"
options:
location:
description:
- Target datacenter for the firewall policy
required: True
state:
description:
- Whether to create or delete the firewall policy
default: present
required: False
choices: ['present', 'absent']
source:
description:
- The list of source addresses for traffic on the originating firewall.
This is required when state is 'present"
default: None
required: False
destination:
description:
- The list of destination addresses for traffic on the terminating firewall.
This is required when state is 'present'
default: None
required: False
ports:
description:
- The list of ports associated with the policy.
TCP and UDP can take in single ports or port ranges.
default: None
required: False
choices: ['any', 'icmp', 'TCP/123', 'UDP/123', 'TCP/123-456', 'UDP/123-456']
firewall_policy_id:
description:
- Id of the firewall policy. This is required to update or delete an existing firewall policy
default: None
required: False
source_account_alias:
description:
- CLC alias for the source account
required: True
destination_account_alias:
description:
- CLC alias for the destination account
default: None
required: False
wait:
description:
- Whether to wait for the provisioning tasks to finish before returning.
default: True
required: False
choices: [True, False]
enabled:
description:
- Whether the firewall policy is enabled or disabled
default: True
required: False
choices: [True, False]
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
---
- name: Create Firewall Policy
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Create / Verify an Firewall Policy at CenturyLink Cloud
clc_firewall:
source_account_alias: WFAD
location: VA1
state: present
source: 10.128.216.0/24
destination: 10.128.216.0/24
ports: Any
destination_account_alias: WFAD
---
- name: Delete Firewall Policy
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Delete an Firewall Policy at CenturyLink Cloud
clc_firewall:
source_account_alias: WFAD
location: VA1
state: absent
firewall_policy_id: 'c62105233d7a4231bd2e91b9c791e43e1'
'''
RETURN = '''
firewall_policy_id:
description: The fire wall policy id
returned: success
type: string
sample: fc36f1bfd47242e488a9c44346438c05
firewall_policy:
description: The fire wall policy information
returned: success
type: dict
sample:
{
"destination":[
"10.1.1.0/24",
"10.2.2.0/24"
],
"destinationAccount":"wfad",
"enabled":true,
"id":"fc36f1bfd47242e488a9c44346438c05",
"links":[
{
"href":"http://api.ctl.io/v2-experimental/firewallPolicies/wfad/uc1/fc36f1bfd47242e488a9c44346438c05",
"rel":"self",
"verbs":[
"GET",
"PUT",
"DELETE"
]
}
],
"ports":[
"any"
],
"source":[
"10.1.1.0/24",
"10.2.2.0/24"
],
"status":"active"
}
'''
__version__ = '${version}'
import urlparse
from time import sleep
from distutils.version import LooseVersion
try:
import requests
except ImportError:
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
try:
import clc as clc_sdk
from clc import CLCException
from clc import APIFailedResponse
except ImportError:
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
class ClcFirewallPolicy:
clc = None
def __init__(self, module):
"""
Construct module
"""
self.clc = clc_sdk
self.module = module
self.firewall_dict = {}
if not CLC_FOUND:
self.module.fail_json(
msg='clc-python-sdk required for this module')
if not REQUESTS_FOUND:
self.module.fail_json(
msg='requests library is required for this module')
if requests.__version__ and LooseVersion(
requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
@staticmethod
def _define_module_argument_spec():
"""
Define the argument spec for the ansible module
:return: argument spec dictionary
"""
argument_spec = dict(
location=dict(required=True),
source_account_alias=dict(required=True, default=None),
destination_account_alias=dict(default=None),
firewall_policy_id=dict(default=None),
ports=dict(default=None, type='list'),
source=dict(defualt=None, type='list'),
destination=dict(defualt=None, type='list'),
wait=dict(default=True),
state=dict(default='present', choices=['present', 'absent']),
enabled=dict(defualt=True, choices=[True, False])
)
return argument_spec
def process_request(self):
"""
Execute the main code path, and handle the request
:return: none
"""
changed = False
firewall_policy = None
location = self.module.params.get('location')
source_account_alias = self.module.params.get('source_account_alias')
destination_account_alias = self.module.params.get(
'destination_account_alias')
firewall_policy_id = self.module.params.get('firewall_policy_id')
ports = self.module.params.get('ports')
source = self.module.params.get('source')
destination = self.module.params.get('destination')
wait = self.module.params.get('wait')
state = self.module.params.get('state')
enabled = self.module.params.get('enabled')
self.firewall_dict = {
'location': location,
'source_account_alias': source_account_alias,
'destination_account_alias': destination_account_alias,
'firewall_policy_id': firewall_policy_id,
'ports': ports,
'source': source,
'destination': destination,
'wait': wait,
'state': state,
'enabled': enabled}
self._set_clc_credentials_from_env()
if state == 'absent':
changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_absent(
source_account_alias, location, self.firewall_dict)
elif state == 'present':
changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_present(
source_account_alias, location, self.firewall_dict)
return self.module.exit_json(
changed=changed,
firewall_policy_id=firewall_policy_id,
firewall_policy=firewall_policy)
@staticmethod
def _get_policy_id_from_response(response):
"""
Method to parse out the policy id from creation response
:param response: response from firewall creation API call
:return: policy_id: firewall policy id from creation call
"""
url = response.get('links')[0]['href']
path = urlparse.urlparse(url).path
path_list = os.path.split(path)
policy_id = path_list[-1]
return policy_id
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
def _ensure_firewall_policy_is_present(
self,
source_account_alias,
location,
firewall_dict):
"""
Ensures that a given firewall policy is present
:param source_account_alias: the source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_dict: dictionary of request parameters for firewall policy
:return: (changed, firewall_policy_id, firewall_policy)
changed: flag for if a change occurred
firewall_policy_id: the firewall policy id that was created/updated
firewall_policy: The firewall_policy object
"""
firewall_policy = None
firewall_policy_id = firewall_dict.get('firewall_policy_id')
if firewall_policy_id is None:
if not self.module.check_mode:
response = self._create_firewall_policy(
source_account_alias,
location,
firewall_dict)
firewall_policy_id = self._get_policy_id_from_response(
response)
changed = True
else:
firewall_policy = self._get_firewall_policy(
source_account_alias, location, firewall_policy_id)
if not firewall_policy:
return self.module.fail_json(
msg='Unable to find the firewall policy id : {0}'.format(
firewall_policy_id))
changed = self._compare_get_request_with_dict(
firewall_policy,
firewall_dict)
if not self.module.check_mode and changed:
self._update_firewall_policy(
source_account_alias,
location,
firewall_policy_id,
firewall_dict)
if changed and firewall_policy_id:
firewall_policy = self._wait_for_requests_to_complete(
source_account_alias,
location,
firewall_policy_id)
return changed, firewall_policy_id, firewall_policy
def _ensure_firewall_policy_is_absent(
self,
source_account_alias,
location,
firewall_dict):
"""
Ensures that a given firewall policy is removed if present
:param source_account_alias: the source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_dict: firewall policy to delete
:return: (changed, firewall_policy_id, response)
changed: flag for if a change occurred
firewall_policy_id: the firewall policy id that was deleted
response: response from CLC API call
"""
changed = False
response = []
firewall_policy_id = firewall_dict.get('firewall_policy_id')
result = self._get_firewall_policy(
source_account_alias, location, firewall_policy_id)
if result:
if not self.module.check_mode:
response = self._delete_firewall_policy(
source_account_alias,
location,
firewall_policy_id)
changed = True
return changed, firewall_policy_id, response
def _create_firewall_policy(
self,
source_account_alias,
location,
firewall_dict):
"""
Creates the firewall policy for the given account alias
:param source_account_alias: the source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_dict: dictionary of request parameters for firewall policy
:return: response from CLC API call
"""
payload = {
'destinationAccount': firewall_dict.get('destination_account_alias'),
'source': firewall_dict.get('source'),
'destination': firewall_dict.get('destination'),
'ports': firewall_dict.get('ports')}
try:
response = self.clc.v2.API.Call(
'POST', '/v2-experimental/firewallPolicies/%s/%s' %
(source_account_alias, location), payload)
except APIFailedResponse as e:
return self.module.fail_json(
msg="Unable to create firewall policy. %s" %
str(e.response_text))
return response
def _delete_firewall_policy(
self,
source_account_alias,
location,
firewall_policy_id):
"""
Deletes a given firewall policy for an account alias in a datacenter
:param source_account_alias: the source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_policy_id: firewall policy id to delete
:return: response: response from CLC API call
"""
try:
response = self.clc.v2.API.Call(
'DELETE', '/v2-experimental/firewallPolicies/%s/%s/%s' %
(source_account_alias, location, firewall_policy_id))
except APIFailedResponse as e:
return self.module.fail_json(
msg="Unable to delete the firewall policy id : {0}. {1}".format(
firewall_policy_id, str(e.response_text)))
return response
def _update_firewall_policy(
self,
source_account_alias,
location,
firewall_policy_id,
firewall_dict):
"""
Updates a firewall policy for a given datacenter and account alias
:param source_account_alias: the source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_policy_id: firewall policy id to update
:param firewall_dict: dictionary of request parameters for firewall policy
:return: response: response from CLC API call
"""
try:
response = self.clc.v2.API.Call(
'PUT',
'/v2-experimental/firewallPolicies/%s/%s/%s' %
(source_account_alias,
location,
firewall_policy_id),
firewall_dict)
except APIFailedResponse as e:
return self.module.fail_json(
msg="Unable to update the firewall policy id : {0}. {1}".format(
firewall_policy_id, str(e.response_text)))
return response
@staticmethod
def _compare_get_request_with_dict(response, firewall_dict):
"""
Helper method to compare the json response for getting the firewall policy with the request parameters
:param response: response from the get method
:param firewall_dict: dictionary of request parameters for firewall policy
:return: changed: Boolean that returns true if there are differences between
the response parameters and the playbook parameters
"""
changed = False
response_dest_account_alias = response.get('destinationAccount')
response_enabled = response.get('enabled')
response_source = response.get('source')
response_dest = response.get('destination')
response_ports = response.get('ports')
request_dest_account_alias = firewall_dict.get(
'destination_account_alias')
request_enabled = firewall_dict.get('enabled')
if request_enabled is None:
request_enabled = True
request_source = firewall_dict.get('source')
request_dest = firewall_dict.get('destination')
request_ports = firewall_dict.get('ports')
if (
response_dest_account_alias and str(response_dest_account_alias) != str(request_dest_account_alias)) or (
response_enabled != request_enabled) or (
response_source and response_source != request_source) or (
response_dest and response_dest != request_dest) or (
response_ports and response_ports != request_ports):
changed = True
return changed
def _get_firewall_policy(
self,
source_account_alias,
location,
firewall_policy_id):
"""
Get back details for a particular firewall policy
:param source_account_alias: the source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_policy_id: id of the firewall policy to get
:return: response - The response from CLC API call
"""
response = None
try:
response = self.clc.v2.API.Call(
'GET', '/v2-experimental/firewallPolicies/%s/%s/%s' %
(source_account_alias, location, firewall_policy_id))
except APIFailedResponse as e:
if e.response_status_code != 404:
self.module.fail_json(
msg="Unable to fetch the firewall policy with id : {0}. {1}".format(
firewall_policy_id, str(e.response_text)))
return response
def _wait_for_requests_to_complete(
self,
source_account_alias,
location,
firewall_policy_id,
wait_limit=50):
"""
Waits until the CLC requests are complete if the wait argument is True
:param source_account_alias: The source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_policy_id: The firewall policy id
:param wait_limit: The number of times to check the status for completion
:return: the firewall_policy object
"""
wait = self.module.params.get('wait')
count = 0
firewall_policy = None
while wait:
count += 1
firewall_policy = self._get_firewall_policy(
source_account_alias, location, firewall_policy_id)
status = firewall_policy.get('status')
if status == 'active' or count > wait_limit:
wait = False
else:
# wait for 2 seconds
sleep(2)
return firewall_policy
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
The main function. Instantiates the module and calls process_request.
:return: none
"""
module = AnsibleModule(
argument_spec=ClcFirewallPolicy._define_module_argument_spec(),
supports_check_mode=True)
clc_firewall = ClcFirewallPolicy(module)
clc_firewall.process_request()
from ansible.module_utils.basic import * # pylint: disable=W0614
if __name__ == '__main__':
main()
|
bencomp/dataverse.org | refs/heads/master | dataverse_org/apps/dataverse_stats/views.py | 6445 | from django.shortcuts import render
# Create your views here.
|
schieb/angr | refs/heads/master | angr/analyses/identifier/runner.py | 2 |
import random
import logging
import os
import claripy
from ...sim_type import SimTypeFunction, SimTypeInt
from ... import sim_options as so
from ... import SIM_LIBRARIES
from ... import BP_BEFORE, BP_AFTER
from ...storage.file import SimFile, SimFileDescriptor
from ...state_plugins import SimSystemPosix
from ...errors import AngrCallableMultistateError, AngrCallableError, AngrError, SimError
from .custom_callable import IdentifierCallable
l = logging.getLogger(name=__name__)
flag_loc = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../example_flag_page'))
try:
with open(flag_loc, "rb") as f:
FLAG_DATA = f.read()
except IOError:
FLAG_DATA = b"A"*0x1000
assert len(FLAG_DATA) == 0x1000
class Runner(object):
def __init__(self, project, cfg):
# this is kind of fucked up
project.simos.syscall_library.update(SIM_LIBRARIES['cgcabi_tracer'])
self.project = project
self.cfg = cfg
self.base_state = None
def _get_recv_state(self):
try:
options = set()
options.add(so.CGC_ZERO_FILL_UNCONSTRAINED_MEMORY)
options.add(so.CGC_NO_SYMBOLIC_RECEIVE_LENGTH)
options.add(so.TRACK_MEMORY_MAPPING)
options.add(so.AVOID_MULTIVALUED_READS)
options.add(so.AVOID_MULTIVALUED_WRITES)
# try to enable unicorn, continue if it doesn't exist
options.add(so.UNICORN)
l.info("unicorn tracing enabled")
remove_options = so.simplification | { so.LAZY_SOLVES } | so.resilience | { so.SUPPORT_FLOATING_POINT }
add_options = options
entry_state = self.project.factory.entry_state(
add_options=add_options,
remove_options=remove_options)
# map the CGC flag page
fake_flag_data = entry_state.solver.BVV(FLAG_DATA)
entry_state.memory.store(0x4347c000, fake_flag_data)
# map the place where I put arguments
entry_state.memory.mem.map_region(0x2000, 0x10000, 7)
entry_state.unicorn._register_check_count = 100
entry_state.unicorn._runs_since_symbolic_data = 100
entry_state.unicorn._runs_since_unicorn = 100
# cooldowns
entry_state.unicorn.cooldown_symbolic_registers = 0
entry_state.unicorn.cooldown_symbolic_memory = 0
entry_state.unicorn.cooldown_nonunicorn_blocks = 1
entry_state.unicorn.max_steps = 10000
pg = self.project.factory.simulation_manager(entry_state)
stop_addr = self.project.simos.syscall_from_number(2).addr
num_steps = 0
while len(pg.active) > 0:
if pg.one_active.addr == stop_addr:
# execute until receive
break
if len(pg.active) > 1:
pp = pg.one_active
pg = self.project.factory.simulation_manager(pp)
pg.step()
num_steps += 1
if num_steps > 50:
break
if len(pg.active) > 0:
out_state = pg.one_active
elif len(pg.deadended) > 0:
out_state = pg.deadended[0]
else:
return self.project.factory.entry_state()
out_state.scratch.clear()
out_state.history.jumpkind = "Ijk_Boring"
return out_state
except SimError as e:
l.warning("SimError in get recv state %s", e)
return self.project.factory.entry_state()
except AngrError as e:
l.warning("AngrError in get recv state %s", e)
return self.project.factory.entry_state()
def setup_state(self, function, test_data, initial_state=None, concrete_rand=False):
# FIXME fdwait should do something concrete...
if initial_state is None:
if self.base_state is None:
self.base_state = self._get_recv_state()
entry_state = self.base_state.copy()
else:
entry_state = initial_state.copy()
stdin = SimFile('stdin', content=test_data.preloaded_stdin)
stdout = SimFile('stdout')
stderr = SimFile('stderr')
fd = {0: SimFileDescriptor(stdin, 0), 1: SimFileDescriptor(stdout, 0), 2: SimFileDescriptor(stderr, 0)}
entry_state.register_plugin('posix', SimSystemPosix(stdin=stdin, stdout=stdout, stderr=stderr, fd=fd))
entry_state.options.add(so.STRICT_PAGE_ACCESS)
# make sure unicorn will run
for k in dir(entry_state.regs):
r = getattr(entry_state.regs, k)
if r.symbolic:
setattr(entry_state.regs, k, 0)
entry_state.unicorn._register_check_count = 100
entry_state.unicorn._runs_since_symbolic_data = 100
entry_state.unicorn._runs_since_unicorn = 100
# cooldowns
entry_state.unicorn.cooldown_symbolic_registers = 0
entry_state.unicorn.cooldown_symbolic_memory = 0
entry_state.unicorn.cooldown_nonunicorn_blocks = 1
entry_state.unicorn.max_steps = 10000
# syscall hook
entry_state.inspect.b(
'syscall',
BP_BEFORE,
action=self.syscall_hook
)
if concrete_rand:
entry_state.inspect.b(
'syscall',
BP_AFTER,
action=self.syscall_hook_concrete_rand
)
# solver timeout
entry_state.solver._solver.timeout = 500
return entry_state
@staticmethod
def syscall_hook(state):
# FIXME maybe we need to fix transmit/receive to handle huge vals properly
# kill path that try to read/write large amounts
syscall_name = state.inspect.syscall_name
if syscall_name == "transmit":
count = state.solver.eval(state.regs.edx)
if count > 0x10000:
state.regs.edx = 0
state.add_constraints(claripy.BoolV(False))
if syscall_name == "receive":
count = state.solver.eval(state.regs.edx)
if count > 0x10000:
state.regs.edx = 0
state.add_constraints(claripy.BoolV(False))
if syscall_name == "random":
count = state.solver.eval(state.regs.ecx)
if count > 0x1000:
state.regs.ecx = 0
state.add_constraints(claripy.BoolV(False))
@staticmethod
def syscall_hook_concrete_rand(state):
# FIXME maybe we need to fix transmit/receive to handle huge vals properly
# kill path that try to read/write large amounts
syscall_name = state.inspect.syscall_name
if syscall_name == "random":
count = state.solver.eval(state.regs.ecx)
if count > 100:
return
buf = state.solver.eval(state.regs.ebx)
for i in range(count):
a = random.randint(0, 255)
state.memory.store(buf+i, state.solver.BVV(a, 8))
def get_base_call_state(self, function, test_data, initial_state=None, concrete_rand=False):
curr_buf_loc = 0x2000
mapped_input = []
s = self.setup_state(function, test_data, initial_state, concrete_rand=concrete_rand)
for i in test_data.input_args:
if isinstance(i, (bytes, claripy.ast.BV)):
s.memory.store(curr_buf_loc, i)
mapped_input.append(curr_buf_loc)
curr_buf_loc += max(len(i), 0x1000)
else:
if not isinstance(i, int):
raise Exception("Expected int/bytes got %s" % type(i))
mapped_input.append(i)
inttype = SimTypeInt(self.project.arch.bits, False)
func_ty = SimTypeFunction([inttype] * len(mapped_input), inttype)
cc = self.project.factory.cc(func_ty=func_ty)
call = IdentifierCallable(self.project, function.startpoint.addr, concrete_only=True,
cc=cc, base_state=s, max_steps=test_data.max_steps)
return call.get_base_state(*mapped_input)
def test(self, function, test_data, concrete_rand=False, custom_offs=None):
curr_buf_loc = 0x2000
mapped_input = []
s = self.setup_state(function, test_data, concrete_rand=concrete_rand)
if custom_offs is None:
for i in test_data.input_args:
if isinstance(i, bytes):
s.memory.store(curr_buf_loc, i + b"\x00")
mapped_input.append(curr_buf_loc)
curr_buf_loc += max(len(i), 0x1000)
else:
if not isinstance(i, int):
raise Exception("Expected int/str got %s" % type(i))
mapped_input.append(i)
else:
for i, off in zip(test_data.input_args, custom_offs):
if isinstance(i, bytes):
s.memory.store(curr_buf_loc, i + b"\x00")
mapped_input.append(curr_buf_loc+off)
curr_buf_loc += max(len(i), 0x1000)
else:
if not isinstance(i, int):
raise Exception("Expected int/str got %s" % type(i))
mapped_input.append(i)
inttype = SimTypeInt(self.project.arch.bits, False)
func_ty = SimTypeFunction([inttype] * len(mapped_input), inttype)
cc = self.project.factory.cc(func_ty=func_ty)
try:
call = IdentifierCallable(self.project, function.startpoint.addr, concrete_only=True,
cc=cc, base_state=s, max_steps=test_data.max_steps)
result = call(*mapped_input)
result_state = call.result_state
except AngrCallableMultistateError as e:
l.info("multistate error: %s", e)
return False
except AngrCallableError as e:
l.info("other callable error: %s", e)
return False
# check matches
outputs = []
for i, out in enumerate(test_data.expected_output_args):
if isinstance(out, bytes):
if len(out) == 0:
raise Exception("len 0 out")
outputs.append(result_state.memory.load(mapped_input[i], len(out)))
else:
outputs.append(None)
tmp_outputs = outputs
outputs = []
for out in tmp_outputs:
if out is None:
outputs.append(None)
elif result_state.solver.symbolic(out):
l.info("symbolic memory output")
return False
else:
outputs.append(result_state.solver.eval(out, cast_to=bytes))
if outputs != test_data.expected_output_args:
# print map(lambda x: x.encode('hex'), [a for a in outputs if a is not None]), map(lambda x: x.encode('hex'), [a for a in test_data.expected_output_args if a is not None])
l.info("mismatch output")
return False
if result_state.solver.symbolic(result):
l.info("result value sybolic")
return False
if test_data.expected_return_val is not None and test_data.expected_return_val < 0:
test_data.expected_return_val &= (2**self.project.arch.bits - 1)
if test_data.expected_return_val is not None and \
result_state.solver.eval(result) != test_data.expected_return_val:
l.info("return val mismatch got %#x, expected %#x", result_state.solver.eval(result), test_data.expected_return_val)
return False
if result_state.solver.symbolic(result_state.posix.stdout.size):
l.info("symbolic stdout pos")
return False
if result_state.solver.eval(result_state.posix.stdout.size) == 0:
stdout = ""
else:
stdout = result_state.posix.stdout.load(0, result_state.posix.stdout.size)
if stdout.symbolic:
l.info("symbolic stdout")
return False
stdout = result_state.solver.eval(stdout, cast_to=bytes)
if stdout != test_data.expected_stdout:
l.info("mismatch stdout")
return False
return True
def get_out_state(self, function, test_data, initial_state=None, concrete_rand=False, custom_offs=None):
curr_buf_loc = 0x2000
mapped_input = []
s = self.setup_state(function, test_data, initial_state, concrete_rand=concrete_rand)
if custom_offs is None:
for i in test_data.input_args:
if isinstance(i, bytes):
s.memory.store(curr_buf_loc, i + b"\x00")
mapped_input.append(curr_buf_loc)
curr_buf_loc += max(len(i), 0x1000)
else:
if not isinstance(i, int):
raise Exception("Expected int/bytes got %s" % type(i))
mapped_input.append(i)
else:
for i, off in zip(test_data.input_args, custom_offs):
if isinstance(i, bytes):
s.memory.store(curr_buf_loc, i + b"\x00")
mapped_input.append(curr_buf_loc+off)
curr_buf_loc += max(len(i), 0x1000)
else:
if not isinstance(i, int):
raise Exception("Expected int/bytes got %s" % type(i))
mapped_input.append(i)
inttype = SimTypeInt(self.project.arch.bits, False)
func_ty = SimTypeFunction([inttype] * len(mapped_input), inttype)
cc = self.project.factory.cc(func_ty=func_ty)
try:
call = IdentifierCallable(self.project, function.startpoint.addr, concrete_only=True,
cc=cc, base_state=s, max_steps=test_data.max_steps)
_ = call(*mapped_input)
result_state = call.result_state
except AngrCallableMultistateError as e:
l.info("multistate error: %s", e)
return None
except AngrCallableError as e:
l.info("other callable error: %s", e)
return None
return result_state
|
askeing/servo | refs/heads/master | tests/wpt/web-platform-tests/mixed-content/generic/expect.py | 26 | import json, os, urllib, urlparse
def redirect(url, response):
response.add_required_headers = False
response.writer.write_status(301)
response.writer.write_header("access-control-allow-origin", "*")
response.writer.write_header("location", url)
response.writer.end_headers()
response.writer.write("")
def create_redirect_url(request, swap_scheme = False):
parsed = urlparse.urlsplit(request.url)
destination_netloc = parsed.netloc
scheme = parsed.scheme
if swap_scheme:
scheme = "http" if parsed.scheme == "https" else "https"
hostname = parsed.netloc.split(':')[0]
port = request.server.config["ports"][scheme][0]
destination_netloc = ":".join([hostname, str(port)])
# Remove "redirection" from query to avoid redirect loops.
parsed_query = dict(urlparse.parse_qsl(parsed.query))
assert "redirection" in parsed_query
del parsed_query["redirection"]
destination_url = urlparse.urlunsplit(urlparse.SplitResult(
scheme = scheme,
netloc = destination_netloc,
path = parsed.path,
query = urllib.urlencode(parsed_query),
fragment = None))
return destination_url
def main(request, response):
if "redirection" in request.GET:
redirection = request.GET["redirection"]
if redirection == "no-redirect":
pass
elif redirection == "keep-scheme-redirect":
redirect(create_redirect_url(request, swap_scheme=False), response)
return
elif redirection == "swap-scheme-redirect":
redirect(create_redirect_url(request, swap_scheme=True), response)
return
else:
raise ValueError ("Invalid redirect type: %s" % redirection)
content_type = "text/plain"
response_data = ""
if "action" in request.GET:
action = request.GET["action"]
if "content_type" in request.GET:
content_type = request.GET["content_type"]
key = request.GET["key"]
stash = request.server.stash
path = request.GET.get("path", request.url.split('?'))[0]
if action == "put":
value = request.GET["value"]
stash.take(key=key, path=path)
stash.put(key=key, value=value, path=path)
response_data = json.dumps({"status": "success", "result": key})
elif action == "purge":
value = stash.take(key=key, path=path)
if content_type == "image/png":
response_data = open(os.path.join(request.doc_root,
"images",
"smiley.png"), "rb").read()
elif content_type == "audio/wav":
response_data = open(os.path.join(request.doc_root,
"webaudio", "resources", "sin_440Hz_-6dBFS_1s.wav"), "rb").read()
elif content_type == "video/ogg":
response_data = open(os.path.join(request.doc_root,
"media",
"movie_5.ogv"), "rb").read()
elif content_type == "application/javascript":
response_data = open(os.path.join(request.doc_root,
"mixed-content",
"generic",
"worker.js"), "rb").read()
else:
response_data = "/* purged */"
elif action == "take":
value = stash.take(key=key, path=path)
if value is None:
status = "allowed"
else:
status = "blocked"
response_data = json.dumps({"status": status, "result": value})
response.add_required_headers = False
response.writer.write_status(200)
response.writer.write_header("content-type", content_type)
response.writer.write_header("cache-control", "no-cache; must-revalidate")
response.writer.end_headers()
response.writer.write(response_data)
|
agry/NGECore2 | refs/heads/master | scripts/object/draft_schematic/dance_prop/prop_ribbon_r_s08.py | 85615 | import sys
def setup(core, object):
return |
agry/NGECore2 | refs/heads/master | scripts/object/tangible/ship/components/engine/engine_kuat_f31.py | 85615 | import sys
def setup(core, object):
return |
ProjectSWGCore/NGECore2 | refs/heads/master | scripts/object/tangible/loot/creature_loot/collections/publish_gift_datapad_component_08.py | 85615 | import sys
def setup(core, object):
return |
ProjectSWGCore/NGECore2 | refs/heads/master | scripts/object/draft_schematic/clothing/clothing_backpack_field_05.py | 85615 | import sys
def setup(core, object):
return |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.